diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-dev.c')
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 110 |
1 files changed, 57 insertions, 53 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 21d949751..a4473d8ff 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val) return 0; - DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving"); + netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n", + enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); return 0; @@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val) return 0; - DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving"); + netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n", + enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val); return 0; @@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata, mac_addr[0] = ha->addr[4]; mac_addr[1] = ha->addr[5]; - DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr, - *mac_reg); + netif_dbg(pdata, drv, pdata->netdev, + "adding mac address %pM at %#x\n", + ha->addr, *mac_reg); XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); } @@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, else mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); - /* If the PCS is changing modes, match the MAC speed to it */ - if (((mmd_address >> 16) == MDIO_MMD_PCS) && - ((mmd_address & 0xffff) == MDIO_CTRL2)) { - struct phy_device *phydev = pdata->phydev; - - if (mmd_data & MDIO_PCS_CTRL2_TYPE) { - /* KX mode */ - if (phydev->supported & SUPPORTED_1000baseKX_Full) - xgbe_set_gmii_speed(pdata); - else - xgbe_set_gmii_2500_speed(pdata); - } else { - /* KR mode */ - xgbe_set_xgmii_speed(pdata); - } - } - /* The PCS registers are accessed using mmio. The underlying APB3 * management interface uses indirect addressing to access the MMD * register sets. This requires accessing of the PCS register in two @@ -1124,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, unsigned int rx_usecs = pdata->rx_usecs; unsigned int rx_frames = pdata->rx_frames; unsigned int inte; + dma_addr_t hdr_dma, buf_dma; if (!rx_usecs && !rx_frames) { /* No coalescing, interrupt for every descriptor */ @@ -1143,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata, * Set buffer 2 (hi) address to buffer dma address (hi) and * set control bits OWN and INTE */ - rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma)); - rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma)); - rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma)); - rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma)); + hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off; + buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off; + rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma)); + rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma)); + rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma)); + rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma)); XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte); @@ -1322,7 +1311,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: - DBGPR(" TC%u using SP\n", i); + netif_dbg(pdata, drv, pdata->netdev, + "TC%u using SP\n", i); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP); break; @@ -1330,7 +1320,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) weight = total_weight * ets->tc_tx_bw[i] / 100; weight = clamp(weight, min_weight, total_weight); - DBGPR(" TC%u using DWRR (weight %u)\n", i, weight); + netif_dbg(pdata, drv, pdata->netdev, + "TC%u using DWRR (weight %u)\n", i, weight); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_ETS); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, @@ -1359,7 +1350,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) } mask &= 0xff; - DBGPR(" TC%u PFC mask=%#x\n", tc, mask); + netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", + tc, mask); reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); reg_val = XGMAC_IOREAD(pdata, reg); @@ -1457,8 +1449,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) /* Create a context descriptor if this is a TSO packet */ if (tso_context || vlan_context) { if (tso_context) { - DBGPR(" TSO context descriptor, mss=%u\n", - packet->mss); + netif_dbg(pdata, tx_queued, pdata->netdev, + "TSO context descriptor, mss=%u\n", + packet->mss); /* Set the MSS size */ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2, @@ -1476,8 +1469,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) } if (vlan_context) { - DBGPR(" VLAN context descriptor, ctag=%u\n", - packet->vlan_ctag); + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + packet->vlan_ctag); /* Mark it as a CONTEXT descriptor */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3, @@ -1533,6 +1527,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->tcp_payload_len); XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, packet->tcp_header_len / 4); + + pdata->ext_stats.tx_tso_packets++; } else { /* Enable CRC and Pad Insertion */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); @@ -1594,9 +1590,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) rdesc = rdata->rdesc; XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1); -#ifdef XGMAC_ENABLE_TX_DESC_DUMP - xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1); -#endif + if (netif_msg_tx_queued(pdata)) + xgbe_dump_tx_desc(pdata, ring, start_index, + packet->rdesc_count, 1); /* Make sure ownership is written to the descriptor */ dma_wmb(); @@ -1618,11 +1614,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) static int xgbe_dev_read(struct xgbe_channel *channel) { + struct xgbe_prv_data *pdata = channel->pdata; struct xgbe_ring *ring = channel->rx_ring; struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; - struct net_device *netdev = channel->pdata->netdev; + struct net_device *netdev = pdata->netdev; unsigned int err, etlt, l34t; DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur); @@ -1637,9 +1634,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) /* Make sure descriptor fields are read after reading the OWN bit */ dma_rmb(); -#ifdef XGMAC_ENABLE_RX_DESC_DUMP - xgbe_dump_rx_desc(ring, rdesc, ring->cur); -#endif + if (netif_msg_rx_status(pdata)) + xgbe_dump_rx_desc(pdata, ring, ring->cur); if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) { /* Timestamp Context Descriptor */ @@ -1661,9 +1657,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel) CONTEXT_NEXT, 1); /* Get the header length */ - if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) + if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, HL); + if (rdata->rx.hdr_len) + pdata->ext_stats.rx_split_header_packets++; + } /* Get the RSS hash */ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) { @@ -1700,14 +1699,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel) INCOMPLETE, 0); /* Set checksum done indicator as appropriate */ - if (channel->pdata->netdev->features & NETIF_F_RXCSUM) + if (netdev->features & NETIF_F_RXCSUM) XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 1); /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT); - DBGPR(" err=%u, etlt=%#x\n", err, etlt); + netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt); if (!err || !etlt) { /* No error if err is 0 or etlt is 0 */ @@ -1718,7 +1717,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel) packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0, RX_NORMAL_DESC0, OVT); - DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + packet->vlan_ctag); } } else { if ((etlt == 0x05) || (etlt == 0x06)) @@ -2026,9 +2026,9 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->tx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size); - netdev_notice(pdata->netdev, - "%d Tx hardware queues, %d byte fifo per queue\n", - pdata->tx_q_count, ((fifo_size + 1) * 256)); + netif_info(pdata, drv, pdata->netdev, + "%d Tx hardware queues, %d byte fifo per queue\n", + pdata->tx_q_count, ((fifo_size + 1) * 256)); } static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) @@ -2042,9 +2042,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata) for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size); - netdev_notice(pdata->netdev, - "%d Rx hardware queues, %d byte fifo per queue\n", - pdata->rx_q_count, ((fifo_size + 1) * 256)); + netif_info(pdata, drv, pdata->netdev, + "%d Rx hardware queues, %d byte fifo per queue\n", + pdata->rx_q_count, ((fifo_size + 1) * 256)); } static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) @@ -2063,14 +2063,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { for (j = 0; j < qptc; j++) { - DBGPR(" TXq%u mapped to TC%u\n", queue, i); + netif_dbg(pdata, drv, pdata->netdev, + "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; } if (i < qptc_extra) { - DBGPR(" TXq%u mapped to TC%u\n", queue, i); + netif_dbg(pdata, drv, pdata->netdev, + "TXq%u mapped to TC%u\n", queue, i); XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, Q2TCMAP, i); pdata->q2tc_map[queue++] = i; @@ -2088,13 +2090,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata) for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { - DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } if (i < ppq_extra) { - DBGPR(" PRIO%u mapped to RXq%u\n", prio, i); + netif_dbg(pdata, drv, pdata->netdev, + "PRIO%u mapped to RXq%u\n", prio, i); mask |= (1 << prio); pdata->prio2q_map[prio++] = i; } |