From b4b7ff4b08e691656c9d77c758fc355833128ac0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Wed, 20 Jan 2016 14:01:31 -0300 Subject: Linux-libre 4.4-gnu --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mv643xx_eth.c | 44 +-- drivers/net/ethernet/marvell/mvneta.c | 458 ++++++++++++++++++++++------- drivers/net/ethernet/marvell/mvpp2.c | 52 ++-- drivers/net/ethernet/marvell/sky2.c | 12 + 5 files changed, 420 insertions(+), 147 deletions(-) (limited to 'drivers/net/ethernet/marvell') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 80af9ffce..a1c862b46 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -44,6 +44,7 @@ config MVNETA tristate "Marvell Armada 370/38x/XP network interface support" depends on PLAT_ORION select MVMDIO + select FIXED_PHY ---help--- This driver supports the network interface units in the Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family. diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index dfb6d5f79..4182290fd 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1618,7 +1618,6 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); - drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); } static int mv643xx_eth_nway_reset(struct net_device *dev) @@ -1877,29 +1876,19 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev) struct netdev_hw_addr *ha; int i; - if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { - int port_num; - u32 accept; + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) + goto promiscuous; -oom: - port_num = mp->port_num; - accept = 0x01010101; - for (i = 0; i < 0x100; i += 4) { - wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept); - wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept); - } - return; - } - - mc_spec = kzalloc(0x200, GFP_ATOMIC); - if (mc_spec == NULL) - goto oom; - mc_other = mc_spec + (0x100 >> 2); + /* Allocate both mc_spec and mc_other tables */ + mc_spec = kcalloc(128, sizeof(u32), GFP_ATOMIC); + if (!mc_spec) + goto promiscuous; + mc_other = &mc_spec[64]; netdev_for_each_mc_addr(ha, dev) { u8 *a = ha->addr; u32 *table; - int entry; + u8 entry; if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) { table = mc_spec; @@ -1912,12 +1901,23 @@ oom: table[entry >> 2] |= 1 << (8 * (entry & 3)); } - for (i = 0; i < 0x100; i += 4) { - wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, mc_spec[i >> 2]); - wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, mc_other[i >> 2]); + for (i = 0; i < 64; i++) { + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + mc_spec[i]); + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + mc_other[i]); } kfree(mc_spec); + return; + +promiscuous: + for (i = 0; i < 64; i++) { + wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + 0x01010101u); + wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i * sizeof(u32), + 0x01010101u); + } } static void mv643xx_eth_set_rx_mode(struct net_device *dev) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0e4924498..ed622fa29 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -32,10 +32,11 @@ #include #include #include +#include /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) -#define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) +#define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) @@ -61,6 +62,7 @@ #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) #define MVNETA_BASE_ADDR_ENABLE 0x2290 +#define MVNETA_ACCESS_PROTECT_ENABLE 0x2294 #define MVNETA_PORT_CONFIG 0x2400 #define MVNETA_UNI_PROMISC_MODE BIT(0) #define MVNETA_DEF_RXQ(q) ((q) << 1) @@ -100,6 +102,8 @@ #define MVNETA_TXQ_CMD 0x2448 #define MVNETA_TXQ_DISABLE_SHIFT 8 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff +#define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484 +#define MVNETA_OVERRUN_FRAME_COUNT 0x2488 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) #define MVNETA_ACC_MODE 0x2500 @@ -156,7 +160,7 @@ #define MVNETA_INTR_ENABLE 0x25b8 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00 -#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF +#define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff #define MVNETA_RXQ_CMD 0x2680 #define MVNETA_RXQ_DISABLE_SHIFT 8 @@ -191,7 +195,7 @@ #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) -#define MVNETA_MIB_COUNTERS_BASE 0x3080 +#define MVNETA_MIB_COUNTERS_BASE 0x3000 #define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 #define MVNETA_DA_FILT_OTH_MCAST 0x3500 @@ -239,6 +243,7 @@ #define MVNETA_VLAN_TAG_LEN 4 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 +#define MVNETA_TX_CSUM_DEF_SIZE 1600 #define MVNETA_TX_CSUM_MAX_SIZE 9800 #define MVNETA_ACC_MODE_EXT 1 @@ -277,6 +282,50 @@ #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +struct mvneta_statistic { + unsigned short offset; + unsigned short type; + const char name[ETH_GSTRING_LEN]; +}; + +#define T_REG_32 32 +#define T_REG_64 64 + +static const struct mvneta_statistic mvneta_statistics[] = { + { 0x3000, T_REG_64, "good_octets_received", }, + { 0x3010, T_REG_32, "good_frames_received", }, + { 0x3008, T_REG_32, "bad_octets_received", }, + { 0x3014, T_REG_32, "bad_frames_received", }, + { 0x3018, T_REG_32, "broadcast_frames_received", }, + { 0x301c, T_REG_32, "multicast_frames_received", }, + { 0x3050, T_REG_32, "unrec_mac_control_received", }, + { 0x3058, T_REG_32, "good_fc_received", }, + { 0x305c, T_REG_32, "bad_fc_received", }, + { 0x3060, T_REG_32, "undersize_received", }, + { 0x3064, T_REG_32, "fragments_received", }, + { 0x3068, T_REG_32, "oversize_received", }, + { 0x306c, T_REG_32, "jabber_received", }, + { 0x3070, T_REG_32, "mac_receive_error", }, + { 0x3074, T_REG_32, "bad_crc_event", }, + { 0x3078, T_REG_32, "collision", }, + { 0x307c, T_REG_32, "late_collision", }, + { 0x2484, T_REG_32, "rx_discard", }, + { 0x2488, T_REG_32, "rx_overrun", }, + { 0x3020, T_REG_32, "frames_64_octets", }, + { 0x3024, T_REG_32, "frames_65_to_127_octets", }, + { 0x3028, T_REG_32, "frames_128_to_255_octets", }, + { 0x302c, T_REG_32, "frames_256_to_511_octets", }, + { 0x3030, T_REG_32, "frames_512_to_1023_octets", }, + { 0x3034, T_REG_32, "frames_1024_to_max_octets", }, + { 0x3038, T_REG_64, "good_octets_sent", }, + { 0x3040, T_REG_32, "good_frames_sent", }, + { 0x3044, T_REG_32, "excessive_collision", }, + { 0x3048, T_REG_32, "multicast_frames_sent", }, + { 0x304c, T_REG_32, "broadcast_frames_sent", }, + { 0x3054, T_REG_32, "fc_sent", }, + { 0x300c, T_REG_32, "internal_mac_transmit_err", }, +}; + struct mvneta_pcpu_stats { struct u64_stats_sync syncp; u64 rx_packets; @@ -285,23 +334,34 @@ struct mvneta_pcpu_stats { u64 tx_bytes; }; +struct mvneta_pcpu_port { + /* Pointer to the shared port */ + struct mvneta_port *pp; + + /* Pointer to the CPU-local NAPI struct */ + struct napi_struct napi; + + /* Cause of the previous interrupt */ + u32 cause_rx_tx; +}; + struct mvneta_port { + struct mvneta_pcpu_port __percpu *ports; + struct mvneta_pcpu_stats __percpu *stats; + int pkt_size; unsigned int frag_size; void __iomem *base; struct mvneta_rx_queue *rxqs; struct mvneta_tx_queue *txqs; struct net_device *dev; - - u32 cause_rx_tx; - struct napi_struct napi; + struct notifier_block cpu_notifier; /* Core clock */ struct clk *clk; u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; - struct mvneta_pcpu_stats *stats; struct mii_bus *mii_bus; struct phy_device *phy_dev; @@ -312,6 +372,8 @@ struct mvneta_port { unsigned int speed; unsigned int tx_csum_limit; int use_inband_status:1; + + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; }; /* The mvneta_tx_desc and mvneta_rx_desc structures describe the @@ -468,7 +530,7 @@ struct mvneta_rx_queue { /* The hardware supports eight (8) rx queues, but we are only allowing * the first one to be used. Therefore, let's just allocate one queue. */ -static int rxq_number = 1; +static int rxq_number = 8; static int txq_number = 8; static int rxq_def; @@ -518,6 +580,8 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp) /* Perform dummy reads from MIB counters */ for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); + dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); + dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); } /* Get System Network Statistics */ @@ -746,7 +810,6 @@ static void mvneta_port_up(struct mvneta_port *pp) u32 q_map; /* Enable all initialized TXs. */ - mvneta_mib_counters_clear(pp); q_map = 0; for (queue = 0; queue < txq_number; queue++) { struct mvneta_tx_queue *txq = &pp->txqs[queue]; @@ -756,14 +819,7 @@ static void mvneta_port_up(struct mvneta_port *pp) mvreg_write(pp, MVNETA_TXQ_CMD, q_map); /* Enable all initialized RXQs. */ - q_map = 0; - for (queue = 0; queue < rxq_number; queue++) { - struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; - if (rxq->descs != NULL) - q_map |= (1 << queue); - } - - mvreg_write(pp, MVNETA_RXQ_CMD, q_map); + mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); } /* Stop the Ethernet port activity */ @@ -1030,6 +1086,8 @@ static void mvneta_defaults_set(struct mvneta_port *pp) mvreg_write(pp, MVNETA_INTR_ENABLE, (MVNETA_RXQ_INTR_ENABLE_ALL_MASK | MVNETA_TXQ_INTR_ENABLE_ALL_MASK)); + + mvneta_mib_counters_clear(pp); } /* Set max sizes for tx queues */ @@ -1426,17 +1484,6 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) return MVNETA_TX_L4_CSUM_NOT; } -/* Returns rx queue pointer (find last set bit) according to causeRxTx - * value - */ -static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp, - u32 cause) -{ - int queue = fls(cause >> 8) - 1; - - return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; -} - /* Drop packets received by the RXQ and free buffers */ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) @@ -1448,9 +1495,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = (void *)rx_desc->buf_cookie; - mvneta_frag_free(pp, data); dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); + mvneta_frag_free(pp, data); } if (rx_done) @@ -1461,6 +1508,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, static int mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; int rx_done; u32 rcvd_pkts = 0; @@ -1515,7 +1563,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, skb->protocol = eth_type_trans(skb, dev); mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&pp->napi, skb); + napi_gro_receive(&port->napi, skb); rcvd_pkts++; rcvd_bytes += rx_bytes; @@ -1554,7 +1602,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, mvneta_rx_csum(pp, rx_status, skb); - napi_gro_receive(&pp->napi, skb); + napi_gro_receive(&port->napi, skb); } if (rcvd_pkts) { @@ -2065,12 +2113,10 @@ static void mvneta_set_rx_mode(struct net_device *dev) /* Interrupt handling - the callback for request_irq() */ static irqreturn_t mvneta_isr(int irq, void *dev_id) { - struct mvneta_port *pp = (struct mvneta_port *)dev_id; - - /* Mask all interrupts */ - mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id; - napi_schedule(&pp->napi); + disable_percpu_irq(port->pp->dev->irq); + napi_schedule(&port->napi); return IRQ_HANDLED; } @@ -2108,11 +2154,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget) { int rx_done = 0; u32 cause_rx_tx; - unsigned long flags; struct mvneta_port *pp = netdev_priv(napi->dev); + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); if (!netif_running(pp->dev)) { - napi_complete(napi); + napi_complete(&port->napi); return rx_done; } @@ -2139,47 +2185,17 @@ static int mvneta_poll(struct napi_struct *napi, int budget) /* For the case where the last mvneta_poll did not process all * RX packets */ - cause_rx_tx |= pp->cause_rx_tx; - if (rxq_number > 1) { - while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) { - int count; - struct mvneta_rx_queue *rxq; - /* get rx queue number from cause_rx_tx */ - rxq = mvneta_rx_policy(pp, cause_rx_tx); - if (!rxq) - break; - - /* process the packet in that rx queue */ - count = mvneta_rx(pp, budget, rxq); - rx_done += count; - budget -= count; - if (budget > 0) { - /* set off the rx bit of the - * corresponding bit in the cause rx - * tx register, so that next iteration - * will find the next rx queue where - * packets are received on - */ - cause_rx_tx &= ~((1 << rxq->id) << 8); - } - } - } else { - rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); - budget -= rx_done; - } + cause_rx_tx |= port->cause_rx_tx; + rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); + budget -= rx_done; if (budget > 0) { cause_rx_tx = 0; - napi_complete(napi); - local_irq_save(flags); - mvreg_write(pp, MVNETA_INTR_NEW_MASK, - MVNETA_RX_INTR_MASK(rxq_number) | - MVNETA_TX_INTR_MASK(txq_number) | - MVNETA_MISCINTR_INTR_MASK); - local_irq_restore(flags); + napi_complete(&port->napi); + enable_percpu_irq(pp->dev->irq, 0); } - pp->cause_rx_tx = cause_rx_tx; + port->cause_rx_tx = cause_rx_tx; return rx_done; } @@ -2383,26 +2399,19 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp) /* Cleanup all Rx queues */ static void mvneta_cleanup_rxqs(struct mvneta_port *pp) { - int queue; - - for (queue = 0; queue < rxq_number; queue++) - mvneta_rxq_deinit(pp, &pp->rxqs[queue]); + mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); } /* Init all Rx queues */ static int mvneta_setup_rxqs(struct mvneta_port *pp) { - int queue; - - for (queue = 0; queue < rxq_number; queue++) { - int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); - if (err) { - netdev_err(pp->dev, "%s: can't create rxq=%d\n", - __func__, queue); - mvneta_cleanup_rxqs(pp); - return err; - } + int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); + if (err) { + netdev_err(pp->dev, "%s: can't create rxq=%d\n", + __func__, rxq_def); + mvneta_cleanup_rxqs(pp); + return err; } return 0; @@ -2428,6 +2437,8 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp) { + unsigned int cpu; + mvneta_max_rx_size_set(pp, pp->pkt_size); mvneta_txq_max_tx_size_set(pp, pp->pkt_size); @@ -2435,7 +2446,11 @@ static void mvneta_start_dev(struct mvneta_port *pp) mvneta_port_enable(pp); /* Enable polling on the port */ - napi_enable(&pp->napi); + for_each_present_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + napi_enable(&port->napi); + } /* Unmask interrupts */ mvreg_write(pp, MVNETA_INTR_NEW_MASK, @@ -2453,9 +2468,15 @@ static void mvneta_start_dev(struct mvneta_port *pp) static void mvneta_stop_dev(struct mvneta_port *pp) { + unsigned int cpu; + phy_stop(pp->phy_dev); - napi_disable(&pp->napi); + for_each_present_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + napi_disable(&port->napi); + } netif_carrier_off(pp->dev); @@ -2695,6 +2716,125 @@ static void mvneta_mdio_remove(struct mvneta_port *pp) pp->phy_dev = NULL; } +static void mvneta_percpu_enable(void *arg) +{ + struct mvneta_port *pp = arg; + + enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); +} + +static void mvneta_percpu_disable(void *arg) +{ + struct mvneta_port *pp = arg; + + disable_percpu_irq(pp->dev->irq); +} + +static void mvneta_percpu_elect(struct mvneta_port *pp) +{ + int online_cpu_idx, cpu, i = 0; + + online_cpu_idx = rxq_def % num_online_cpus(); + + for_each_online_cpu(cpu) { + if (i == online_cpu_idx) + /* Enable per-CPU interrupt on the one CPU we + * just elected + */ + smp_call_function_single(cpu, mvneta_percpu_enable, + pp, true); + else + /* Disable per-CPU interrupt on all the other CPU */ + smp_call_function_single(cpu, mvneta_percpu_disable, + pp, true); + i++; + } +}; + +static int mvneta_percpu_notifier(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct mvneta_port *pp = container_of(nfb, struct mvneta_port, + cpu_notifier); + int cpu = (unsigned long)hcpu, other_cpu; + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + netif_tx_stop_all_queues(pp->dev); + + /* We have to synchronise on tha napi of each CPU + * except the one just being waked up + */ + for_each_online_cpu(other_cpu) { + if (other_cpu != cpu) { + struct mvneta_pcpu_port *other_port = + per_cpu_ptr(pp->ports, other_cpu); + + napi_synchronize(&other_port->napi); + } + } + + /* Mask all ethernet port interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + napi_enable(&port->napi); + + /* Enable per-CPU interrupt on the one CPU we care + * about. + */ + mvneta_percpu_elect(pp); + + /* Unmask all ethernet port interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + netif_tx_start_all_queues(pp->dev); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + netif_tx_stop_all_queues(pp->dev); + /* Mask all ethernet port interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); + mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + + napi_synchronize(&port->napi); + napi_disable(&port->napi); + /* Disable per-CPU interrupts on the CPU that is + * brought down. + */ + smp_call_function_single(cpu, mvneta_percpu_disable, + pp, true); + + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + /* Check if a new CPU must be elected now this on is down */ + mvneta_percpu_elect(pp); + /* Unmask all ethernet port interrupts */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | + MVNETA_MISCINTR_INTR_MASK); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + netif_tx_start_all_queues(pp->dev); + break; + } + + return NOTIFY_OK; +} + static int mvneta_open(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); @@ -2713,13 +2853,29 @@ static int mvneta_open(struct net_device *dev) goto err_cleanup_rxqs; /* Connect to port interrupt line */ - ret = request_irq(pp->dev->irq, mvneta_isr, 0, - MVNETA_DRIVER_NAME, pp); + ret = request_percpu_irq(pp->dev->irq, mvneta_isr, + MVNETA_DRIVER_NAME, pp->ports); if (ret) { netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); goto err_cleanup_txqs; } + /* Even though the documentation says that request_percpu_irq + * doesn't enable the interrupts automatically, it actually + * does so on the local CPU. + * + * Make sure it's disabled. + */ + mvneta_percpu_disable(pp); + + /* Elect a CPU to handle our RX queue interrupt */ + mvneta_percpu_elect(pp); + + /* Register a CPU notifier to handle the case where our CPU + * might be taken offline. + */ + register_cpu_notifier(&pp->cpu_notifier); + /* In default link is down */ netif_carrier_off(pp->dev); @@ -2734,7 +2890,7 @@ static int mvneta_open(struct net_device *dev) return 0; err_free_irq: - free_irq(pp->dev->irq, pp); + free_percpu_irq(pp->dev->irq, pp->ports); err_cleanup_txqs: mvneta_cleanup_txqs(pp); err_cleanup_rxqs: @@ -2746,10 +2902,14 @@ err_cleanup_rxqs: static int mvneta_stop(struct net_device *dev) { struct mvneta_port *pp = netdev_priv(dev); + int cpu; mvneta_stop_dev(pp); mvneta_mdio_remove(pp); - free_irq(dev->irq, pp); + unregister_cpu_notifier(&pp->cpu_notifier); + for_each_present_cpu(cpu) + smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); mvneta_cleanup_rxqs(pp); mvneta_cleanup_txqs(pp); @@ -2879,6 +3039,65 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev, return 0; } +static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) + memcpy(data + i * ETH_GSTRING_LEN, + mvneta_statistics[i].name, ETH_GSTRING_LEN); + } +} + +static void mvneta_ethtool_update_stats(struct mvneta_port *pp) +{ + const struct mvneta_statistic *s; + void __iomem *base = pp->base; + u32 high, low, val; + int i; + + for (i = 0, s = mvneta_statistics; + s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); + s++, i++) { + val = 0; + + switch (s->type) { + case T_REG_32: + val = readl_relaxed(base + s->offset); + break; + case T_REG_64: + /* Docs say to read low 32-bit then high */ + low = readl_relaxed(base + s->offset); + high = readl_relaxed(base + s->offset + 4); + val = (u64)high << 32 | low; + break; + } + + pp->ethtool_stats[i] += val; + } +} + +static void mvneta_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvneta_port *pp = netdev_priv(dev); + int i; + + mvneta_ethtool_update_stats(pp); + + for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++) + *data++ = pp->ethtool_stats[i]; +} + +static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvneta_statistics); + return -EOPNOTSUPP; +} + static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, @@ -2900,6 +3119,9 @@ const struct ethtool_ops mvneta_eth_tool_ops = { .get_drvinfo = mvneta_ethtool_get_drvinfo, .get_ringparam = mvneta_ethtool_get_ringparam, .set_ringparam = mvneta_ethtool_set_ringparam, + .get_strings = mvneta_ethtool_get_strings, + .get_ethtool_stats = mvneta_ethtool_get_stats, + .get_sset_count = mvneta_ethtool_get_sset_count, }; /* Initialize hw */ @@ -2975,6 +3197,7 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp, } mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); } /* Power up the port */ @@ -3034,16 +3257,10 @@ static int mvneta_probe(struct platform_device *pdev) char hw_mac_addr[ETH_ALEN]; const char *mac_from; const char *managed; + int tx_csum_limit; int phy_mode; int err; - - /* Our multiqueue support is not complete, so for now, only - * allow the usage of the first RX queue - */ - if (rxq_def != 0) { - dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def); - return -EINVAL; - } + int cpu; dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number); if (!dev) @@ -3095,6 +3312,7 @@ static int mvneta_probe(struct platform_device *pdev) err = of_property_read_string(dn, "managed", &managed); pp->use_inband_status = (err == 0 && strcmp(managed, "in-band-status") == 0); + pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { @@ -3111,11 +3329,18 @@ static int mvneta_probe(struct platform_device *pdev) goto err_clk; } + /* Alloc per-cpu port structure */ + pp->ports = alloc_percpu(struct mvneta_pcpu_port); + if (!pp->ports) { + err = -ENOMEM; + goto err_clk; + } + /* Alloc per-cpu stats */ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); if (!pp->stats) { err = -ENOMEM; - goto err_clk; + goto err_free_ports; } dt_mac_addr = of_get_mac_address(dn); @@ -3133,8 +3358,21 @@ static int mvneta_probe(struct platform_device *pdev) } } - if (of_device_is_compatible(dn, "marvell,armada-370-neta")) - pp->tx_csum_limit = 1600; + if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) { + if (tx_csum_limit < 0 || + tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + dev_info(&pdev->dev, + "Wrong TX csum limit in DT, set to %dB\n", + MVNETA_TX_CSUM_DEF_SIZE); + } + } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) { + tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE; + } else { + tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE; + } + + pp->tx_csum_limit = tx_csum_limit; pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; @@ -3156,7 +3394,12 @@ static int mvneta_probe(struct platform_device *pdev) if (dram_target_info) mvneta_conf_mbus_windows(pp, dram_target_info); - netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); + for_each_present_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + netif_napi_add(dev, &port->napi, mvneta_poll, NAPI_POLL_WEIGHT); + port->pp = pp; + } dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; @@ -3187,6 +3430,8 @@ static int mvneta_probe(struct platform_device *pdev) err_free_stats: free_percpu(pp->stats); +err_free_ports: + free_percpu(pp->ports); err_clk: clk_disable_unprepare(pp->clk); err_put_phy_node: @@ -3206,6 +3451,7 @@ static int mvneta_remove(struct platform_device *pdev) unregister_netdev(dev); clk_disable_unprepare(pp->clk); + free_percpu(pp->ports); free_percpu(pp->stats); irq_dispose_mapping(dev->irq); of_node_put(pp->phy_node); diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d9884fd15..a4beccf1f 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, } /* Free all buffers from the pool */ -static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) { int i; for (i = 0; i < bm_pool->buf_num; i++) { + dma_addr_t buf_phys_addr; u32 vaddr; /* Get buffer virtual address (indirect access) */ - mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + buf_phys_addr = mvpp2_read(priv, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); + + dma_unmap_single(dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + if (!vaddr) break; dev_kfree_skb_any((struct sk_buff *)vaddr); @@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, { u32 val; - mvpp2_bm_bufs_free(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); if (bm_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); return 0; @@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, MVPP2_BM_LONG_BUF_NUM : MVPP2_BM_SHORT_BUF_NUM; else - mvpp2_bm_bufs_free(port->priv, new_pool); + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool); new_pool->pkt_size = pkt_size; @@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) int pkt_size = MVPP2_RX_PKT_SIZE(mtu); /* Update BM pool with new buffer size */ - mvpp2_bm_bufs_free(port->priv, port_pool); + mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); if (port_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); return -EIO; @@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, mvpp2_txq_inc_get(txq_pcpu); - if (!skb) - continue; - dma_unmap_single(port->dev->dev.parent, buf_phys_addr, skb_headlen(skb), DMA_TO_DEVICE); + if (!skb) + continue; dev_kfree_skb_any(skb); } } @@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; - int rx_received, rx_filled, i; + int rx_received; + int rx_done = 0; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; @@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, if (rx_todo > rx_received) rx_todo = rx_received; - rx_filled = 0; - for (i = 0; i < rx_todo; i++) { + while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; + dma_addr_t phys_addr; u32 bm, rx_status; int pool, rx_bytes, err; - rx_filled++; + rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; + phys_addr = rx_desc->buf_phys_addr; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); @@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) { + err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); + /* Return the buffer to the pool */ mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); continue; @@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, skb = (struct sk_buff *)rx_desc->buf_cookie; + err = mvpp2_rx_refill(port, bm_pool, bm, 0); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + dma_unmap_single(dev->dev.parent, phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + rcvd_pkts++; rcvd_bytes += rx_bytes; atomic_inc(&bm_pool->in_use); @@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, mvpp2_rx_csum(port, rx_status, skb); napi_gro_receive(&port->napi, skb); - - err = mvpp2_rx_refill(port, bm_pool, bm, 0); - if (err) { - netdev_err(port->dev, "failed to refill BM pools\n"); - rx_filled--; - } } if (rcvd_pkts) { @@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, /* Update Rx queue management counters */ wmb(); - mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); return rx_todo; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index d9f449883..5606a0430 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4819,6 +4819,18 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port, memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); + /* if the address is invalid, use a random value */ + if (!is_valid_ether_addr(dev->dev_addr)) { + struct sockaddr sa = { AF_UNSPEC }; + + netdev_warn(dev, + "Invalid MAC address, defaulting to random\n"); + eth_hw_addr_random(dev); + memcpy(sa.sa_data, dev->dev_addr, ETH_ALEN); + if (sky2_set_mac_address(dev, &sa)) + netdev_warn(dev, "Failed to set MAC address.\n"); + } + return dev; } -- cgit v1.2.3-54-g00ecf