diff options
Diffstat (limited to 'drivers/net/ethernet/ti')
-rw-r--r-- | drivers/net/ethernet/ti/cpmac.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/cpsw.c | 171 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/davinci_emac.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/netcp_core.c | 90 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/netcp_ethss.c | 456 |
5 files changed, 519 insertions, 204 deletions
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index dd9430043..cba3d9fcb 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -41,6 +41,8 @@ #include <linux/gpio.h> #include <linux/atomic.h> +#include <asm/mach-ar7/ar7.h> + MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index d155bf257..874fb297e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -30,6 +30,7 @@ #include <linux/delay.h> #include <linux/pm_runtime.h> #include <linux/of.h> +#include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/of_device.h> #include <linux/if_vlan.h> @@ -365,7 +366,9 @@ struct cpsw_priv { spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; - struct napi_struct napi; + struct device_node *phy_node; + struct napi_struct napi_rx; + struct napi_struct napi_tx; struct device *dev; struct cpsw_platform_data data; struct cpsw_ss_regs __iomem *regs; @@ -386,10 +389,12 @@ struct cpsw_priv { struct cpsw_ale *ale; bool rx_pause; bool tx_pause; + bool quirk_irq; + bool rx_irq_disabled; + bool tx_irq_disabled; /* snapshot of IRQ numbers */ u32 irqs_table[4]; u32 num_irqs; - bool irq_enabled; struct cpts *cpts; u32 emac_port; }; @@ -752,13 +757,15 @@ static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; + writel(0, &priv->wr_regs->tx_en); cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - cpdma_chan_process(priv->txch, 128); - priv = cpsw_get_slave_priv(priv, 1); - if (priv) - cpdma_chan_process(priv->txch, 128); + if (priv->quirk_irq) { + disable_irq_nosync(priv->irqs_table[1]); + priv->tx_irq_disabled = true; + } + napi_schedule(&priv->napi_tx); return IRQ_HANDLED; } @@ -767,43 +774,49 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) struct cpsw_priv *priv = dev_id; cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); + writel(0, &priv->wr_regs->rx_en); - cpsw_intr_disable(priv); - if (priv->irq_enabled == true) { + if (priv->quirk_irq) { disable_irq_nosync(priv->irqs_table[0]); - priv->irq_enabled = false; + priv->rx_irq_disabled = true; } - if (netif_running(priv->ndev)) { - napi_schedule(&priv->napi); - return IRQ_HANDLED; - } + napi_schedule(&priv->napi_rx); + return IRQ_HANDLED; +} - priv = cpsw_get_slave_priv(priv, 1); - if (!priv) - return IRQ_NONE; +static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget) +{ + struct cpsw_priv *priv = napi_to_priv(napi_tx); + int num_tx; - if (netif_running(priv->ndev)) { - napi_schedule(&priv->napi); - return IRQ_HANDLED; + num_tx = cpdma_chan_process(priv->txch, budget); + if (num_tx < budget) { + napi_complete(napi_tx); + writel(0xff, &priv->wr_regs->tx_en); + if (priv->quirk_irq && priv->tx_irq_disabled) { + priv->tx_irq_disabled = false; + enable_irq(priv->irqs_table[1]); + } } - return IRQ_NONE; + + if (num_tx) + cpsw_dbg(priv, intr, "poll %d tx pkts\n", num_tx); + + return num_tx; } -static int cpsw_poll(struct napi_struct *napi, int budget) +static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget) { - struct cpsw_priv *priv = napi_to_priv(napi); + struct cpsw_priv *priv = napi_to_priv(napi_rx); int num_rx; num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { - struct cpsw_priv *prim_cpsw; - - napi_complete(napi); - cpsw_intr_enable(priv); - prim_cpsw = cpsw_get_slave_priv(priv, 0); - if (prim_cpsw->irq_enabled == false) { - prim_cpsw->irq_enabled = true; + napi_complete(napi_rx); + writel(0xff, &priv->wr_regs->rx_en); + if (priv->quirk_irq && priv->rx_irq_disabled) { + priv->rx_irq_disabled = false; enable_irq(priv->irqs_table[0]); } } @@ -1134,7 +1147,11 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); - slave->phy = phy_connect(priv->ndev, slave->data->phy_id, + if (priv->phy_node) + slave->phy = of_phy_connect(priv->ndev, priv->phy_node, + &cpsw_adjust_link, 0, slave->data->phy_if); + else + slave->phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, slave->data->phy_if); if (IS_ERR(slave->phy)) { dev_err(priv->dev, "phy %s not found on slave %d\n", @@ -1230,7 +1247,6 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) static int cpsw_ndo_open(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_priv *prim_cpsw; int i, ret; u32 reg; @@ -1260,6 +1276,8 @@ static int cpsw_ndo_open(struct net_device *ndev) ALE_ALL_PORTS << priv->host_port, 0, 0); if (!cpsw_common_res_usage_state(priv)) { + struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + /* setup tx dma to fixed prio and zero offset */ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0); @@ -1273,6 +1291,19 @@ static int cpsw_ndo_open(struct net_device *ndev) /* Enable internal fifo flow control */ writel(0x7, &priv->regs->flow_control); + napi_enable(&priv_sl0->napi_rx); + napi_enable(&priv_sl0->napi_tx); + + if (priv_sl0->tx_irq_disabled) { + priv_sl0->tx_irq_disabled = false; + enable_irq(priv->irqs_table[1]); + } + + if (priv_sl0->rx_irq_disabled) { + priv_sl0->rx_irq_disabled = false; + enable_irq(priv->irqs_table[0]); + } + if (WARN_ON(!priv->data.rx_descs)) priv->data.rx_descs = 128; @@ -1311,18 +1342,9 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_set_coalesce(ndev, &coal); } - napi_enable(&priv->napi); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); - prim_cpsw = cpsw_get_slave_priv(priv, 0); - if (prim_cpsw->irq_enabled == false) { - if ((priv == prim_cpsw) || !netif_running(prim_cpsw->ndev)) { - prim_cpsw->irq_enabled = true; - enable_irq(prim_cpsw->irqs_table[0]); - } - } - if (priv->data.dual_emac) priv->slaves[priv->emac_port].open_stat = true; return 0; @@ -1341,10 +1363,13 @@ static int cpsw_ndo_stop(struct net_device *ndev) cpsw_info(priv, ifdown, "shutting down cpsw device\n"); netif_stop_queue(priv->ndev); - napi_disable(&priv->napi); netif_carrier_off(priv->ndev); if (cpsw_common_res_usage_state(priv) <= 1) { + struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + + napi_disable(&priv_sl0->napi_rx); + napi_disable(&priv_sl0->napi_tx); cpts_unregister(priv->cpts); cpsw_intr_disable(priv); cpdma_ctlr_stop(priv->dma); @@ -1915,11 +1940,12 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->port_vlan = data->dual_emac_res_vlan; } -static int cpsw_probe_dt(struct cpsw_platform_data *data, +static int cpsw_probe_dt(struct cpsw_priv *priv, struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device_node *slave_node; + struct cpsw_platform_data *data = &priv->data; int i = 0, ret; u32 prop; @@ -2010,6 +2036,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (strcmp(slave_node->name, "slave")) continue; + priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); parp = of_get_property(slave_node, "phy_id", &lenp); if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); @@ -2025,7 +2052,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); - slave_data->phy_if = of_get_phy_mode(slave_node); if (slave_data->phy_if < 0) { dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n", @@ -2127,7 +2153,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); @@ -2141,6 +2166,44 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, return ret; } +#define CPSW_QUIRK_IRQ BIT(0) + +static struct platform_device_id cpsw_devtype[] = { + { + /* keep it for existing comaptibles */ + .name = "cpsw", + .driver_data = CPSW_QUIRK_IRQ, + }, { + .name = "am335x-cpsw", + .driver_data = CPSW_QUIRK_IRQ, + }, { + .name = "am4372-cpsw", + .driver_data = 0, + }, { + .name = "dra7-cpsw", + .driver_data = 0, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, cpsw_devtype); + +enum ti_cpsw_type { + CPSW = 0, + AM335X_CPSW, + AM4372_CPSW, + DRA7_CPSW, +}; + +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", .data = &cpsw_devtype[CPSW], }, + { .compatible = "ti,am335x-cpsw", .data = &cpsw_devtype[AM335X_CPSW], }, + { .compatible = "ti,am4372-cpsw", .data = &cpsw_devtype[AM4372_CPSW], }, + { .compatible = "ti,dra7-cpsw", .data = &cpsw_devtype[DRA7_CPSW], }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, cpsw_of_mtable); + static int cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data; @@ -2150,6 +2213,7 @@ static int cpsw_probe(struct platform_device *pdev) struct cpsw_ale_params ale_params; void __iomem *ss_regs; struct resource *res, *ss_res; + const struct of_device_id *of_id; u32 slave_offset, sliver_offset, slave_size; int ret = 0, i; int irq; @@ -2169,7 +2233,6 @@ static int cpsw_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL); - priv->irq_enabled = true; if (!priv->cpts) { dev_err(&pdev->dev, "error allocating cpts\n"); ret = -ENOMEM; @@ -2184,7 +2247,7 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(&priv->data, pdev)) { + if (cpsw_probe_dt(priv, pdev)) { dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; goto clean_runtime_disable_ret; @@ -2341,6 +2404,13 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } + of_id = of_match_device(cpsw_of_mtable, &pdev->dev); + if (of_id) { + pdev->id_entry = of_id->data; + if (pdev->id_entry->driver_data) + priv->quirk_irq = true; + } + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and * MISC IRQs which are always kept disabled with this driver so * we will not request them. @@ -2380,7 +2450,8 @@ static int cpsw_probe(struct platform_device *pdev) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); + netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); @@ -2504,12 +2575,6 @@ static int cpsw_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); -static const struct of_device_id cpsw_of_mtable[] = { - { .compatible = "ti,cpsw", }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, cpsw_of_mtable); - static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index aeebc0a7b..a21c77bc1 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -2004,8 +2004,10 @@ static int davinci_emac_probe(struct platform_device *pdev) if (res_ctrl) { priv->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl); - if (IS_ERR(priv->ctrl_base)) + if (IS_ERR(priv->ctrl_base)) { + rc = PTR_ERR(priv->ctrl_base); goto no_pdata; + } } else { priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 4755838c6..9f9832f0d 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -52,6 +52,8 @@ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \ NETIF_MSG_RX_STATUS) +#define NETCP_EFUSE_ADDR_SWAP 2 + #define knav_queue_get_id(q) knav_queue_device_control(q, \ KNAV_QUEUE_GET_ID, (unsigned long)NULL) @@ -173,13 +175,22 @@ static void set_words(u32 *words, int num_words, u32 *desc) } /* Read the e-fuse value as 32 bit values to be endian independent */ -static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac) +static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) { unsigned int addr0, addr1; addr1 = readl(efuse_mac + 4); addr0 = readl(efuse_mac); + switch (swap) { + case NETCP_EFUSE_ADDR_SWAP: + addr0 = addr1; + addr1 = readl(efuse_mac); + break; + default: + break; + } + x[0] = (addr1 & 0x0000ff00) >> 8; x[1] = addr1 & 0x000000ff; x[2] = (addr0 & 0xff000000) >> 24; @@ -280,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device, interface_list) { struct netcp_intf_modpriv *intf_modpriv; - /* If interface not registered then register now */ - if (!netcp_intf->netdev_registered) - ret = netcp_register_interface(netcp_intf); - - if (ret) - return -ENODEV; - intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv), GFP_KERNEL); if (!intf_modpriv) @@ -295,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device, interface = of_parse_phandle(netcp_intf->node_interface, module->name, 0); + if (!interface) { + devm_kfree(dev, intf_modpriv); + continue; + } + intf_modpriv->netcp_priv = netcp_intf; intf_modpriv->netcp_module = module; list_add_tail(&intf_modpriv->intf_list, @@ -312,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device, continue; } } + + /* Now register the interface with netdev */ + list_for_each_entry(netcp_intf, + &netcp_device->interface_head, + interface_list) { + /* If interface not registered then register now */ + if (!netcp_intf->netdev_registered) { + ret = netcp_register_interface(netcp_intf); + if (ret) + return -ENODEV; + } + } return 0; } @@ -346,7 +367,6 @@ int netcp_register_module(struct netcp_module *module) if (ret < 0) goto fail; } - mutex_unlock(&netcp_modules_lock); return 0; @@ -785,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp) netcp->rx_pool = NULL; } -static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) +static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) { struct knav_dma_desc *hwdesc; unsigned int buf_len, dma_sz; @@ -799,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) hwdesc = knav_pool_desc_get(netcp->rx_pool); if (IS_ERR_OR_NULL(hwdesc)) { dev_dbg(netcp->ndev_dev, "out of rx pool desc\n"); - return; + return -ENOMEM; } if (likely(fdq == 0)) { @@ -851,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq) knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma, &dma_sz); knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0); - return; + return 0; fail: knav_pool_desc_put(netcp->rx_pool, hwdesc); + return -ENOMEM; } /* Refill Rx FDQ with descriptors & attached buffers */ static void netcp_rxpool_refill(struct netcp_intf *netcp) { u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0}; - int i; + int i, ret = 0; /* Calculate the FDQ deficit and refill */ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) { fdq_deficit[i] = netcp->rx_queue_depths[i] - knav_queue_get_count(netcp->rx_fdq[i]); - while (fdq_deficit[i]--) - netcp_allocate_rx_buf(netcp, i); + while (fdq_deficit[i]-- && !ret) + ret = netcp_allocate_rx_buf(netcp, i); } /* end for fdqs */ } @@ -882,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget) packets = netcp_process_rx_packets(netcp, budget); + netcp_rxpool_refill(netcp); if (packets < budget) { napi_complete(&netcp->rx_napi); knav_queue_enable_notify(netcp->rx_queue); } - netcp_rxpool_refill(netcp); return packets; } @@ -1373,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) continue; dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n", naddr->addr, naddr->type); - mutex_lock(&netcp_modules_lock); for_each_module(netcp, priv) { module = priv->netcp_module; if (!module->del_addr) @@ -1382,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp) naddr); WARN_ON(error); } - mutex_unlock(&netcp_modules_lock); netcp_addr_del(netcp, naddr); } } @@ -1399,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) continue; dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n", naddr->addr, naddr->type); - mutex_lock(&netcp_modules_lock); + for_each_module(netcp, priv) { module = priv->netcp_module; if (!module->add_addr) @@ -1407,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp) error = module->add_addr(priv->module_priv, naddr); WARN_ON(error); } - mutex_unlock(&netcp_modules_lock); } } @@ -1421,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR); + spin_lock(&netcp->lock); /* first clear all marks */ netcp_addr_clear_mark(netcp); @@ -1439,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev) /* finally sweep and callout into modules */ netcp_addr_sweep_del(netcp); netcp_addr_sweep_add(netcp); + spin_unlock(&netcp->lock); } static void netcp_free_navigator_resources(struct netcp_intf *netcp) @@ -1603,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev) goto fail; } - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->open) { @@ -1614,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev) } } } - mutex_unlock(&netcp_modules_lock); napi_enable(&netcp->rx_napi); napi_enable(&netcp->tx_napi); @@ -1631,7 +1649,6 @@ fail_open: if (module->close) module->close(intf_modpriv->module_priv, ndev); } - mutex_unlock(&netcp_modules_lock); fail: netcp_free_navigator_resources(netcp); @@ -1655,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev) napi_disable(&netcp->rx_napi); napi_disable(&netcp->tx_napi); - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->close) { @@ -1664,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev) dev_err(netcp->ndev_dev, "Close failed\n"); } } - mutex_unlock(&netcp_modules_lock); /* Recycle Rx descriptors from completion queue */ netcp_empty_rx_queue(netcp); @@ -1692,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, if (!netif_running(ndev)) return -EINVAL; - mutex_lock(&netcp_modules_lock); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (!module->ioctl) @@ -1708,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev, } out: - mutex_unlock(&netcp_modules_lock); return (ret == 0) ? 0 : err; } @@ -1743,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_intf_modpriv *intf_modpriv; struct netcp_module *module; + unsigned long flags; int err = 0; dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid); - mutex_lock(&netcp_modules_lock); + spin_lock_irqsave(&netcp->lock, flags); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if ((module->add_vid) && (vid != 0)) { @@ -1759,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) } } } - mutex_unlock(&netcp_modules_lock); + spin_unlock_irqrestore(&netcp->lock, flags); + return err; } @@ -1768,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_intf_modpriv *intf_modpriv; struct netcp_module *module; + unsigned long flags; int err = 0; dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid); - mutex_lock(&netcp_modules_lock); + spin_lock_irqsave(&netcp->lock, flags); for_each_module(netcp, intf_modpriv) { module = intf_modpriv->netcp_module; if (module->del_vid) { @@ -1784,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) } } } - mutex_unlock(&netcp_modules_lock); + spin_unlock_irqrestore(&netcp->lock, flags); return err; } @@ -1901,7 +1917,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, goto quit; } - emac_arch_get_mac_addr(efuse_mac_addr, efuse); + emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac); if (is_valid_ether_addr(efuse_mac_addr)) ether_addr_copy(ndev->dev_addr, efuse_mac_addr); else @@ -2029,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev) struct device_node *child, *interfaces; struct netcp_device *netcp_device; struct device *dev = &pdev->dev; - struct netcp_module *module; int ret; if (!node) { @@ -2076,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev) /* Add the device instance to the list */ list_add_tail(&netcp_device->device_list, &netcp_devices); - /* Probe & attach any modules already registered */ - mutex_lock(&netcp_modules_lock); - for_each_netcp_module(module) { - ret = netcp_module_probe(netcp_device, module); - if (ret < 0) - dev_err(dev, "module(%s) probe failed\n", module->name); - } - mutex_unlock(&netcp_modules_lock); return 0; probe_quit_interface: @@ -2141,7 +2148,6 @@ MODULE_DEVICE_TABLE(of, of_match); static struct platform_driver netcp_driver = { .driver = { .name = "netcp-1.0", - .owner = THIS_MODULE, .of_match_table = of_match, }, .probe = netcp_probe, diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 1974a8ae7..4e70e7586 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -77,6 +77,7 @@ #define GBENU_ALE_OFFSET 0x1e000 #define GBENU_HOST_PORT_NUM 0 #define GBENU_NUM_ALE_ENTRIES 1024 +#define GBENU_SGMII_MODULE_SIZE 0x100 /* 10G Ethernet SS defines */ #define XGBE_MODULE_NAME "netcp-xgbe" @@ -149,8 +150,8 @@ #define XGBE_STATS2_MODULE 2 /* s: 0-based slave_port */ -#define SGMII_BASE(s) \ - (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs) +#define SGMII_BASE(d, s) \ + (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs) #define GBE_TX_QUEUE 648 #define GBE_TXHOOK_ORDER 0 @@ -295,8 +296,6 @@ struct xgbe_hw_stats { u32 rx_dma_overruns; }; -#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32)) - struct gbenu_ss_regs { u32 id_ver; u32 synce_count; /* NU */ @@ -480,7 +479,6 @@ struct gbenu_hw_stats { u32 tx_pri7_drop_bcnt; }; -#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32)) #define GBENU_HW_STATS_REG_MAP_SZ 0x200 struct gbe_ss_regs { @@ -615,7 +613,6 @@ struct gbe_hw_stats { u32 rx_dma_overruns; }; -#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32)) #define GBE_MAX_HW_STAT_MODS 9 #define GBE_HW_STATS_REG_MAP_SZ 0x100 @@ -646,6 +643,7 @@ struct gbe_priv { bool enable_ale; u8 max_num_slaves; u8 max_num_ports; /* max_num_slaves + 1 */ + u8 num_stats_mods; struct netcp_tx_pipe tx_pipe; int host_port; @@ -675,6 +673,7 @@ struct gbe_priv { struct net_device *dummy_ndev; u64 *hw_stats; + u32 *hw_stats_prev; const struct netcp_ethtool_stat *et_stats; int num_et_stats; /* Lock for updating the hwstats */ @@ -874,7 +873,7 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { }; /* This is the size of entries in GBENU_STATS_HOST */ -#define GBENU_ET_STATS_HOST_SIZE 33 +#define GBENU_ET_STATS_HOST_SIZE 52 #define GBENU_STATS_HOST(field) \ { \ @@ -883,8 +882,8 @@ static const struct netcp_ethtool_stat gbe13_et_stats[] = { offsetof(struct gbenu_hw_stats, field) \ } -/* This is the size of entries in GBENU_STATS_HOST */ -#define GBENU_ET_STATS_PORT_SIZE 46 +/* This is the size of entries in GBENU_STATS_PORT */ +#define GBENU_ET_STATS_PORT_SIZE 65 #define GBENU_STATS_P1(field) \ { \ @@ -976,7 +975,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_HOST(ale_unknown_mcast_bytes), GBENU_STATS_HOST(ale_unknown_bcast), GBENU_STATS_HOST(ale_unknown_bcast_bytes), + GBENU_STATS_HOST(ale_pol_match), + GBENU_STATS_HOST(ale_pol_match_red), + GBENU_STATS_HOST(ale_pol_match_yellow), GBENU_STATS_HOST(tx_mem_protect_err), + GBENU_STATS_HOST(tx_pri0_drop), + GBENU_STATS_HOST(tx_pri1_drop), + GBENU_STATS_HOST(tx_pri2_drop), + GBENU_STATS_HOST(tx_pri3_drop), + GBENU_STATS_HOST(tx_pri4_drop), + GBENU_STATS_HOST(tx_pri5_drop), + GBENU_STATS_HOST(tx_pri6_drop), + GBENU_STATS_HOST(tx_pri7_drop), + GBENU_STATS_HOST(tx_pri0_drop_bcnt), + GBENU_STATS_HOST(tx_pri1_drop_bcnt), + GBENU_STATS_HOST(tx_pri2_drop_bcnt), + GBENU_STATS_HOST(tx_pri3_drop_bcnt), + GBENU_STATS_HOST(tx_pri4_drop_bcnt), + GBENU_STATS_HOST(tx_pri5_drop_bcnt), + GBENU_STATS_HOST(tx_pri6_drop_bcnt), + GBENU_STATS_HOST(tx_pri7_drop_bcnt), /* GBENU Module 1 */ GBENU_STATS_P1(rx_good_frames), GBENU_STATS_P1(rx_broadcast_frames), @@ -1023,7 +1041,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P1(ale_unknown_mcast_bytes), GBENU_STATS_P1(ale_unknown_bcast), GBENU_STATS_P1(ale_unknown_bcast_bytes), + GBENU_STATS_P1(ale_pol_match), + GBENU_STATS_P1(ale_pol_match_red), + GBENU_STATS_P1(ale_pol_match_yellow), GBENU_STATS_P1(tx_mem_protect_err), + GBENU_STATS_P1(tx_pri0_drop), + GBENU_STATS_P1(tx_pri1_drop), + GBENU_STATS_P1(tx_pri2_drop), + GBENU_STATS_P1(tx_pri3_drop), + GBENU_STATS_P1(tx_pri4_drop), + GBENU_STATS_P1(tx_pri5_drop), + GBENU_STATS_P1(tx_pri6_drop), + GBENU_STATS_P1(tx_pri7_drop), + GBENU_STATS_P1(tx_pri0_drop_bcnt), + GBENU_STATS_P1(tx_pri1_drop_bcnt), + GBENU_STATS_P1(tx_pri2_drop_bcnt), + GBENU_STATS_P1(tx_pri3_drop_bcnt), + GBENU_STATS_P1(tx_pri4_drop_bcnt), + GBENU_STATS_P1(tx_pri5_drop_bcnt), + GBENU_STATS_P1(tx_pri6_drop_bcnt), + GBENU_STATS_P1(tx_pri7_drop_bcnt), /* GBENU Module 2 */ GBENU_STATS_P2(rx_good_frames), GBENU_STATS_P2(rx_broadcast_frames), @@ -1070,7 +1107,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P2(ale_unknown_mcast_bytes), GBENU_STATS_P2(ale_unknown_bcast), GBENU_STATS_P2(ale_unknown_bcast_bytes), + GBENU_STATS_P2(ale_pol_match), + GBENU_STATS_P2(ale_pol_match_red), + GBENU_STATS_P2(ale_pol_match_yellow), GBENU_STATS_P2(tx_mem_protect_err), + GBENU_STATS_P2(tx_pri0_drop), + GBENU_STATS_P2(tx_pri1_drop), + GBENU_STATS_P2(tx_pri2_drop), + GBENU_STATS_P2(tx_pri3_drop), + GBENU_STATS_P2(tx_pri4_drop), + GBENU_STATS_P2(tx_pri5_drop), + GBENU_STATS_P2(tx_pri6_drop), + GBENU_STATS_P2(tx_pri7_drop), + GBENU_STATS_P2(tx_pri0_drop_bcnt), + GBENU_STATS_P2(tx_pri1_drop_bcnt), + GBENU_STATS_P2(tx_pri2_drop_bcnt), + GBENU_STATS_P2(tx_pri3_drop_bcnt), + GBENU_STATS_P2(tx_pri4_drop_bcnt), + GBENU_STATS_P2(tx_pri5_drop_bcnt), + GBENU_STATS_P2(tx_pri6_drop_bcnt), + GBENU_STATS_P2(tx_pri7_drop_bcnt), /* GBENU Module 3 */ GBENU_STATS_P3(rx_good_frames), GBENU_STATS_P3(rx_broadcast_frames), @@ -1117,7 +1173,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P3(ale_unknown_mcast_bytes), GBENU_STATS_P3(ale_unknown_bcast), GBENU_STATS_P3(ale_unknown_bcast_bytes), + GBENU_STATS_P3(ale_pol_match), + GBENU_STATS_P3(ale_pol_match_red), + GBENU_STATS_P3(ale_pol_match_yellow), GBENU_STATS_P3(tx_mem_protect_err), + GBENU_STATS_P3(tx_pri0_drop), + GBENU_STATS_P3(tx_pri1_drop), + GBENU_STATS_P3(tx_pri2_drop), + GBENU_STATS_P3(tx_pri3_drop), + GBENU_STATS_P3(tx_pri4_drop), + GBENU_STATS_P3(tx_pri5_drop), + GBENU_STATS_P3(tx_pri6_drop), + GBENU_STATS_P3(tx_pri7_drop), + GBENU_STATS_P3(tx_pri0_drop_bcnt), + GBENU_STATS_P3(tx_pri1_drop_bcnt), + GBENU_STATS_P3(tx_pri2_drop_bcnt), + GBENU_STATS_P3(tx_pri3_drop_bcnt), + GBENU_STATS_P3(tx_pri4_drop_bcnt), + GBENU_STATS_P3(tx_pri5_drop_bcnt), + GBENU_STATS_P3(tx_pri6_drop_bcnt), + GBENU_STATS_P3(tx_pri7_drop_bcnt), /* GBENU Module 4 */ GBENU_STATS_P4(rx_good_frames), GBENU_STATS_P4(rx_broadcast_frames), @@ -1164,7 +1239,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P4(ale_unknown_mcast_bytes), GBENU_STATS_P4(ale_unknown_bcast), GBENU_STATS_P4(ale_unknown_bcast_bytes), + GBENU_STATS_P4(ale_pol_match), + GBENU_STATS_P4(ale_pol_match_red), + GBENU_STATS_P4(ale_pol_match_yellow), GBENU_STATS_P4(tx_mem_protect_err), + GBENU_STATS_P4(tx_pri0_drop), + GBENU_STATS_P4(tx_pri1_drop), + GBENU_STATS_P4(tx_pri2_drop), + GBENU_STATS_P4(tx_pri3_drop), + GBENU_STATS_P4(tx_pri4_drop), + GBENU_STATS_P4(tx_pri5_drop), + GBENU_STATS_P4(tx_pri6_drop), + GBENU_STATS_P4(tx_pri7_drop), + GBENU_STATS_P4(tx_pri0_drop_bcnt), + GBENU_STATS_P4(tx_pri1_drop_bcnt), + GBENU_STATS_P4(tx_pri2_drop_bcnt), + GBENU_STATS_P4(tx_pri3_drop_bcnt), + GBENU_STATS_P4(tx_pri4_drop_bcnt), + GBENU_STATS_P4(tx_pri5_drop_bcnt), + GBENU_STATS_P4(tx_pri6_drop_bcnt), + GBENU_STATS_P4(tx_pri7_drop_bcnt), /* GBENU Module 5 */ GBENU_STATS_P5(rx_good_frames), GBENU_STATS_P5(rx_broadcast_frames), @@ -1211,7 +1305,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P5(ale_unknown_mcast_bytes), GBENU_STATS_P5(ale_unknown_bcast), GBENU_STATS_P5(ale_unknown_bcast_bytes), + GBENU_STATS_P5(ale_pol_match), + GBENU_STATS_P5(ale_pol_match_red), + GBENU_STATS_P5(ale_pol_match_yellow), GBENU_STATS_P5(tx_mem_protect_err), + GBENU_STATS_P5(tx_pri0_drop), + GBENU_STATS_P5(tx_pri1_drop), + GBENU_STATS_P5(tx_pri2_drop), + GBENU_STATS_P5(tx_pri3_drop), + GBENU_STATS_P5(tx_pri4_drop), + GBENU_STATS_P5(tx_pri5_drop), + GBENU_STATS_P5(tx_pri6_drop), + GBENU_STATS_P5(tx_pri7_drop), + GBENU_STATS_P5(tx_pri0_drop_bcnt), + GBENU_STATS_P5(tx_pri1_drop_bcnt), + GBENU_STATS_P5(tx_pri2_drop_bcnt), + GBENU_STATS_P5(tx_pri3_drop_bcnt), + GBENU_STATS_P5(tx_pri4_drop_bcnt), + GBENU_STATS_P5(tx_pri5_drop_bcnt), + GBENU_STATS_P5(tx_pri6_drop_bcnt), + GBENU_STATS_P5(tx_pri7_drop_bcnt), /* GBENU Module 6 */ GBENU_STATS_P6(rx_good_frames), GBENU_STATS_P6(rx_broadcast_frames), @@ -1258,7 +1371,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P6(ale_unknown_mcast_bytes), GBENU_STATS_P6(ale_unknown_bcast), GBENU_STATS_P6(ale_unknown_bcast_bytes), + GBENU_STATS_P6(ale_pol_match), + GBENU_STATS_P6(ale_pol_match_red), + GBENU_STATS_P6(ale_pol_match_yellow), GBENU_STATS_P6(tx_mem_protect_err), + GBENU_STATS_P6(tx_pri0_drop), + GBENU_STATS_P6(tx_pri1_drop), + GBENU_STATS_P6(tx_pri2_drop), + GBENU_STATS_P6(tx_pri3_drop), + GBENU_STATS_P6(tx_pri4_drop), + GBENU_STATS_P6(tx_pri5_drop), + GBENU_STATS_P6(tx_pri6_drop), + GBENU_STATS_P6(tx_pri7_drop), + GBENU_STATS_P6(tx_pri0_drop_bcnt), + GBENU_STATS_P6(tx_pri1_drop_bcnt), + GBENU_STATS_P6(tx_pri2_drop_bcnt), + GBENU_STATS_P6(tx_pri3_drop_bcnt), + GBENU_STATS_P6(tx_pri4_drop_bcnt), + GBENU_STATS_P6(tx_pri5_drop_bcnt), + GBENU_STATS_P6(tx_pri6_drop_bcnt), + GBENU_STATS_P6(tx_pri7_drop_bcnt), /* GBENU Module 7 */ GBENU_STATS_P7(rx_good_frames), GBENU_STATS_P7(rx_broadcast_frames), @@ -1305,7 +1437,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P7(ale_unknown_mcast_bytes), GBENU_STATS_P7(ale_unknown_bcast), GBENU_STATS_P7(ale_unknown_bcast_bytes), + GBENU_STATS_P7(ale_pol_match), + GBENU_STATS_P7(ale_pol_match_red), + GBENU_STATS_P7(ale_pol_match_yellow), GBENU_STATS_P7(tx_mem_protect_err), + GBENU_STATS_P7(tx_pri0_drop), + GBENU_STATS_P7(tx_pri1_drop), + GBENU_STATS_P7(tx_pri2_drop), + GBENU_STATS_P7(tx_pri3_drop), + GBENU_STATS_P7(tx_pri4_drop), + GBENU_STATS_P7(tx_pri5_drop), + GBENU_STATS_P7(tx_pri6_drop), + GBENU_STATS_P7(tx_pri7_drop), + GBENU_STATS_P7(tx_pri0_drop_bcnt), + GBENU_STATS_P7(tx_pri1_drop_bcnt), + GBENU_STATS_P7(tx_pri2_drop_bcnt), + GBENU_STATS_P7(tx_pri3_drop_bcnt), + GBENU_STATS_P7(tx_pri4_drop_bcnt), + GBENU_STATS_P7(tx_pri5_drop_bcnt), + GBENU_STATS_P7(tx_pri6_drop_bcnt), + GBENU_STATS_P7(tx_pri7_drop_bcnt), /* GBENU Module 8 */ GBENU_STATS_P8(rx_good_frames), GBENU_STATS_P8(rx_broadcast_frames), @@ -1352,7 +1503,26 @@ static const struct netcp_ethtool_stat gbenu_et_stats[] = { GBENU_STATS_P8(ale_unknown_mcast_bytes), GBENU_STATS_P8(ale_unknown_bcast), GBENU_STATS_P8(ale_unknown_bcast_bytes), + GBENU_STATS_P8(ale_pol_match), + GBENU_STATS_P8(ale_pol_match_red), + GBENU_STATS_P8(ale_pol_match_yellow), GBENU_STATS_P8(tx_mem_protect_err), + GBENU_STATS_P8(tx_pri0_drop), + GBENU_STATS_P8(tx_pri1_drop), + GBENU_STATS_P8(tx_pri2_drop), + GBENU_STATS_P8(tx_pri3_drop), + GBENU_STATS_P8(tx_pri4_drop), + GBENU_STATS_P8(tx_pri5_drop), + GBENU_STATS_P8(tx_pri6_drop), + GBENU_STATS_P8(tx_pri7_drop), + GBENU_STATS_P8(tx_pri0_drop_bcnt), + GBENU_STATS_P8(tx_pri1_drop_bcnt), + GBENU_STATS_P8(tx_pri2_drop_bcnt), + GBENU_STATS_P8(tx_pri3_drop_bcnt), + GBENU_STATS_P8(tx_pri4_drop_bcnt), + GBENU_STATS_P8(tx_pri5_drop_bcnt), + GBENU_STATS_P8(tx_pri6_drop_bcnt), + GBENU_STATS_P8(tx_pri7_drop_bcnt), }; #define XGBE_STATS0_INFO(field) \ @@ -1554,70 +1724,97 @@ static int keystone_get_sset_count(struct net_device *ndev, int stringset) } } -static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod) +{ + void __iomem *base = gbe_dev->hw_stats_regs[stats_mod]; + u32 __iomem *p_stats_entry; + int i; + + for (i = 0; i < gbe_dev->num_et_stats; i++) { + if (gbe_dev->et_stats[i].type == stats_mod) { + p_stats_entry = base + gbe_dev->et_stats[i].offset; + gbe_dev->hw_stats[i] = 0; + gbe_dev->hw_stats_prev[i] = readl(p_stats_entry); + } + } +} + +static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev, + int et_stats_entry) { void __iomem *base = NULL; - u32 __iomem *p; - u32 tmp = 0; + u32 __iomem *p_stats_entry; + u32 curr, delta; + + /* The hw_stats_regs pointers are already + * properly set to point to the right base: + */ + base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type]; + p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset; + curr = readl(p_stats_entry); + delta = curr - gbe_dev->hw_stats_prev[et_stats_entry]; + gbe_dev->hw_stats_prev[et_stats_entry] = curr; + gbe_dev->hw_stats[et_stats_entry] += delta; +} + +static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data) +{ int i; for (i = 0; i < gbe_dev->num_et_stats; i++) { - base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type]; - p = base + gbe_dev->et_stats[i].offset; - tmp = readl(p); - gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp; + gbe_update_hw_stats_entry(gbe_dev, i); + if (data) data[i] = gbe_dev->hw_stats[i]; - /* write-to-decrement: - * new register value = old register value - write value - */ - writel(tmp, p); } } -static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev, + int stats_mod) { - void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0]; - void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1]; - u64 *hw_stats = &gbe_dev->hw_stats[0]; - void __iomem *base = NULL; - u32 __iomem *p; - u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2); - int i, j, pair; + u32 val; - for (pair = 0; pair < 2; pair++) { - val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); - if (pair == 0) - val &= ~GBE_STATS_CD_SEL; - else - val |= GBE_STATS_CD_SEL; + switch (stats_mod) { + case GBE_STATSA_MODULE: + case GBE_STATSB_MODULE: + val &= ~GBE_STATS_CD_SEL; + break; + case GBE_STATSC_MODULE: + case GBE_STATSD_MODULE: + val |= GBE_STATS_CD_SEL; + break; + default: + return; + } - /* make the stat modules visible */ - writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); + /* make the stat module visible */ + writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en)); +} - for (i = 0; i < pair_size; i++) { - j = pair * pair_size + i; - switch (gbe_dev->et_stats[j].type) { - case GBE_STATSA_MODULE: - case GBE_STATSC_MODULE: - base = gbe_statsa; - break; - case GBE_STATSB_MODULE: - case GBE_STATSD_MODULE: - base = gbe_statsb; - break; - } +static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod) +{ + gbe_stats_mod_visible_ver14(gbe_dev, stats_mod); + gbe_reset_mod_stats(gbe_dev, stats_mod); +} + +static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data) +{ + u32 half_num_et_stats = (gbe_dev->num_et_stats / 2); + int et_entry, j, pair; + + for (pair = 0; pair < 2; pair++) { + gbe_stats_mod_visible_ver14(gbe_dev, (pair ? + GBE_STATSC_MODULE : + GBE_STATSA_MODULE)); + + for (j = 0; j < half_num_et_stats; j++) { + et_entry = pair * half_num_et_stats + j; + gbe_update_hw_stats_entry(gbe_dev, et_entry); - p = base + gbe_dev->et_stats[j].offset; - tmp = readl(p); - hw_stats[j] += tmp; if (data) - data[j] = hw_stats[j]; - /* write-to-decrement: - * new register value = old register value - write value - */ - writel(tmp, p); + data[et_entry] = gbe_dev->hw_stats[et_entry]; } } } @@ -1801,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev, return; if (!SLAVE_LINK_IS_XGMII(slave)) { - if (gbe_dev->ss_version == GBE_SS_VERSION_14) - sgmii_link_state = - netcp_sgmii_get_port_link(SGMII_BASE(sp), sp); - else - sgmii_link_state = - netcp_sgmii_get_port_link( - gbe_dev->sgmii_port_regs, sp); + sgmii_link_state = + netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp); } phy_link_state = gbe_phy_link_status(slave); @@ -1904,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave, static void gbe_sgmii_rtreset(struct gbe_priv *priv, struct gbe_slave *slave, bool set) { - void __iomem *sgmii_port_regs; - if (SLAVE_LINK_IS_XGMII(slave)) return; - if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) - sgmii_port_regs = priv->sgmii_port34_regs; - else - sgmii_port_regs = priv->sgmii_port_regs; - - netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set); + netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num), + slave->slave_num, set); } static void gbe_slave_stop(struct gbe_intf *intf) @@ -1940,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf) static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave) { - void __iomem *sgmii_port_regs; - - sgmii_port_regs = priv->sgmii_port_regs; - if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2)) - sgmii_port_regs = priv->sgmii_port34_regs; + if (SLAVE_LINK_IS_XGMII(slave)) + return; - if (!SLAVE_LINK_IS_XGMII(slave)) { - netcp_sgmii_reset(sgmii_port_regs, slave->slave_num); - netcp_sgmii_config(sgmii_port_regs, slave->slave_num, - slave->link_interface); - } + netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num); + netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num, + slave->link_interface); } static int gbe_slave_open(struct gbe_intf *gbe_intf) @@ -2207,14 +2388,15 @@ static void netcp_ethss_timer(unsigned long arg) netcp_ethss_update_link_state(gbe_dev, slave, NULL); } - spin_lock_bh(&gbe_dev->hw_stats_lock); + /* A timer runs as a BH, no need to block them */ + spin_lock(&gbe_dev->hw_stats_lock); if (gbe_dev->ss_version == GBE_SS_VERSION_14) gbe_update_stats_ver14(gbe_dev, NULL); else gbe_update_stats(gbe_dev, NULL); - spin_unlock_bh(&gbe_dev->hw_stats_lock); + spin_unlock(&gbe_dev->hw_stats_lock); gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; add_timer(&gbe_dev->timer); @@ -2455,8 +2637,10 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, mac_phy_link = true; slave->open = true; - if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) { + of_node_put(port); break; + } } /* of_phy_connect() is needed only for MAC-PHY interface */ @@ -2571,15 +2755,28 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, } gbe_dev->xgbe_serdes_regs = regs; + gbe_dev->num_stats_mods = gbe_dev->max_num_ports; + gbe_dev->et_stats = xgbe10_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - XGBE10_NUM_STAT_ENTRIES * - (gbe_dev->max_num_ports) * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + gbe_dev->ss_version = XGBE_SS_VERSION_10; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + XGBE10_SGMII_MODULE_OFFSET; @@ -2593,8 +2790,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = XGBE10_HOST_PORT_NUM; gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES; - gbe_dev->et_stats = xgbe10_et_stats; - gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats); gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; /* Subsystem registers */ @@ -2679,30 +2874,45 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, } gbe_dev->switch_regs = regs; + gbe_dev->num_stats_mods = gbe_dev->max_num_slaves; + gbe_dev->et_stats = gbe13_et_stats; + gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - GBE13_NUM_HW_STAT_ENTRIES * - gbe_dev->max_num_slaves * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET; gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET; + /* K2HK has only 2 hw stats modules visible at a time, so + * module 0 & 2 points to one base and + * module 1 & 3 points to the other base + */ for (i = 0; i < gbe_dev->max_num_slaves; i++) { gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET + - (GBE_HW_STATS_REG_MAP_SZ * i); + (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1)); } gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET; gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBE13_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; - gbe_dev->et_stats = gbe13_et_stats; - gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats); gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL; /* Subsystem registers */ @@ -2729,15 +2939,34 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, void __iomem *regs; int i, ret; + gbe_dev->num_stats_mods = gbe_dev->max_num_ports; + gbe_dev->et_stats = gbenu_et_stats; + + if (IS_SS_ID_NU(gbe_dev)) + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); + else + gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + + GBENU_ET_STATS_PORT_SIZE; + gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev, - GBENU_NUM_HW_STAT_ENTRIES * - (gbe_dev->max_num_ports) * sizeof(u64), - GFP_KERNEL); + gbe_dev->num_et_stats * sizeof(u64), + GFP_KERNEL); if (!gbe_dev->hw_stats) { dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n"); return -ENOMEM; } + gbe_dev->hw_stats_prev = + devm_kzalloc(gbe_dev->dev, + gbe_dev->num_et_stats * sizeof(u32), + GFP_KERNEL); + if (!gbe_dev->hw_stats_prev) { + dev_err(gbe_dev->dev, + "hw_stats_prev memory allocation failed\n"); + return -ENOMEM; + } + ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, @@ -2755,6 +2984,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->switch_regs = regs; gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET; + + /* Although sgmii modules are mem mapped to one contiguous + * region on GBENU devices, setting sgmii_port34_regs allows + * consistent code when accessing sgmii api + */ + gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs + + (2 * GBENU_SGMII_MODULE_SIZE); + gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET; for (i = 0; i < (gbe_dev->max_num_ports); i++) @@ -2765,16 +3002,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, gbe_dev->ale_ports = gbe_dev->max_num_ports; gbe_dev->host_port = GBENU_HOST_PORT_NUM; gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES; - gbe_dev->et_stats = gbenu_et_stats; gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1; - if (IS_SS_ID_NU(gbe_dev)) - gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + - (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE); - else - gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE + - GBENU_ET_STATS_PORT_SIZE; - /* Subsystem registers */ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver); @@ -2804,7 +3033,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, struct cpsw_ale_params ale_params; struct gbe_priv *gbe_dev; u32 slave_num; - int ret = 0; + int i, ret = 0; if (!node) { dev_err(dev, "device tree info unavailable\n"); @@ -2910,8 +3139,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, continue; } gbe_dev->num_slaves++; - if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) + if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) { + of_node_put(interface); break; + } } of_node_put(interfaces); @@ -2951,6 +3182,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* initialize host port */ gbe_init_host_port(gbe_dev); + spin_lock_bh(&gbe_dev->hw_stats_lock); + for (i = 0; i < gbe_dev->num_stats_mods; i++) { + if (gbe_dev->ss_version == GBE_SS_VERSION_14) + gbe_reset_mod_stats_ver14(gbe_dev, i); + else + gbe_reset_mod_stats(gbe_dev, i); + } + spin_unlock_bh(&gbe_dev->hw_stats_lock); + init_timer(&gbe_dev->timer); gbe_dev->timer.data = (unsigned long)gbe_dev; gbe_dev->timer.function = netcp_ethss_timer; |