summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amd')
-rw-r--r--drivers/net/ethernet/amd/7990.c666
-rw-r--r--drivers/net/ethernet/amd/7990.h250
-rw-r--r--drivers/net/ethernet/amd/Kconfig206
-rw-r--r--drivers/net/ethernet/amd/Makefile20
-rw-r--r--drivers/net/ethernet/amd/a2065.c783
-rw-r--r--drivers/net/ethernet/amd/a2065.h173
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c768
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h145
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c1971
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h813
-rw-r--r--drivers/net/ethernet/amd/ariadne.c792
-rw-r--r--drivers/net/ethernet/amd/ariadne.h415
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1169
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1469
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h133
-rw-r--r--drivers/net/ethernet/amd/declance.c1376
-rw-r--r--drivers/net/ethernet/amd/hplance.c232
-rw-r--r--drivers/net/ethernet/amd/hplance.h26
-rw-r--r--drivers/net/ethernet/amd/lance.c1312
-rw-r--r--drivers/net/ethernet/amd/mvme147.c200
-rw-r--r--drivers/net/ethernet/amd/ni65.c1252
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1512
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2989
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c956
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1533
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1145
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dcb.c269
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c373
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c636
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2943
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2227
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c601
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c631
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c312
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c279
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h867
38 files changed, 31573 insertions, 0 deletions
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
new file mode 100644
index 000000000..98a10d555
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.c
@@ -0,0 +1,666 @@
+/*
+ * 7990.c -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ * NB: this was made easy by the fact that Jes Sorensen had cleaned up
+ * most of a2025 and sunlance with the aim of merging them, so the
+ * common code was pretty obvious.
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/route.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <asm/irq.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#ifdef CONFIG_HP300
+#include <asm/blinken.h>
+#endif
+
+#include "7990.h"
+
+#define WRITERAP(lp, x) out_be16(lp->base + LANCE_RAP, (x))
+#define WRITERDP(lp, x) out_be16(lp->base + LANCE_RDP, (x))
+#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
+
+#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
+#include "hplance.h"
+
+#undef WRITERAP
+#undef WRITERDP
+#undef READRDP
+
+#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
+
+/* Lossage Factor Nine, Mr Sulu. */
+#define WRITERAP(lp, x) (lp->writerap(lp, x))
+#define WRITERDP(lp, x) (lp->writerdp(lp, x))
+#define READRDP(lp) (lp->readrdp(lp))
+
+#else
+
+/* These inlines can be used if only CONFIG_HPLANCE is defined */
+static inline void WRITERAP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline void WRITERDP(struct lance_private *lp, __u16 value)
+{
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static inline __u16 READRDP(struct lance_private *lp)
+{
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+#endif
+#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
+
+/* debugging output macros, various flavours */
+/* #define TEST_HITS */
+#ifdef UNDEF
+#define PRINT_RINGS() \
+do { \
+ int t; \
+ for (t = 0; t < RX_RING_SIZE; t++) { \
+ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
+ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
+ ib->brx_ring[t].length, \
+ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
+ } \
+ for (t = 0; t < TX_RING_SIZE; t++) { \
+ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
+ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
+ ib->btx_ring[t].length, \
+ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
+ } \
+} while (0)
+#else
+#define PRINT_RINGS()
+#endif
+
+/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr;
+
+ leptr = LANCE_ADDR(aib);
+
+ WRITERAP(lp, LE_CSR1); /* load address of init block */
+ WRITERDP(lp, leptr & 0xFFFF);
+ WRITERAP(lp, LE_CSR2);
+ WRITERDP(lp, leptr >> 16);
+ WRITERAP(lp, LE_CSR3);
+ WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
+
+ /* Point back to csr0 */
+ WRITERAP(lp, LE_CSR0);
+}
+
+/* #define to 0 or 1 appropriately */
+#define DEBUG_IRING 0
+/* Set up the Lance Rx and Tx rings and the init block */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ aib = lp->lance_init_block;
+
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
+
+ /* Copy the ethernet address to the lance init block
+ * Notice that we do a byteswap if we're big endian.
+ * [I think this is the right criterion; at least, sunlance,
+ * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
+ * However, the datasheet says that the BSWAP bit doesn't affect
+ * the init block, so surely it should be low byte first for
+ * everybody? Um.]
+ * We could define the ib->physaddr as three 16bit values and
+ * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
+ */
+#ifdef __BIG_ENDIAN
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
+#else
+ for (i = 0; i < 6; i++)
+ ib->phys_addr[i] = dev->dev_addr[i];
+#endif
+
+ if (DEBUG_IRING)
+ printk("TX rings:\n");
+
+ lp->tx_full = 0;
+ /* Setup the Tx ring entries */
+ for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ if (DEBUG_IRING)
+ printk("RX rings:\n");
+ for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ /* 0xf000 == bits that must be one (reserved, presumably) */
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (DEBUG_IRING)
+ printk("%d: 0x%8.8x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("RX ptr: %8.8x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ if (DEBUG_IRING)
+ printk("TX ptr: %8.8x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+ PRINT_RINGS();
+}
+
+/* LANCE must be STOPped before we do this, too... */
+static int init_restart_lance(struct lance_private *lp)
+{
+ int i;
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_INIT);
+
+ /* Need a hook here for sunlance ledma stuff */
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ WRITERDP(lp, LE_C0_IDON);
+ WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
+
+ return 0;
+}
+
+static int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ load_csrs(lp);
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
+#ifdef DEBUG_DRIVER
+ printk("Lance restart=%d\n", status);
+#endif
+ return status;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+#ifdef TEST_HITS
+ int i;
+#endif
+
+#ifdef TEST_HITS
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s",
+ ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
+#endif
+#ifdef CONFIG_HP300
+ blinken_leds(0x40, 0);
+#endif
+ WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
+ for (rd = &ib->brx_ring[lp->rx_new]; /* For each Rx ring we own... */
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ int len = (rd->mblength & 0xfff) - 4;
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+#ifdef CONFIG_HP300
+ blinken_leds(0x80, 0);
+#endif
+ /* csr0 is 2f3 */
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk("%s: Carrier Lost, trying %s\n",
+ dev->name,
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off the transmitter */
+ /* Restart the adapter */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
+ return 0;
+}
+
+static irqreturn_t
+lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ spin_lock(&lp->devlock);
+
+ WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
+ csr0 = READRDP(lp);
+
+ PRINT_RINGS();
+
+ if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
+ spin_unlock(&lp->devlock);
+ return IRQ_NONE; /* been generated by the Lance. */
+ }
+
+ /* Acknowledge all the interrupt sources ASAP */
+ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ WRITERDP(lp, LE_C0_STRT);
+ }
+
+ if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
+
+ spin_unlock(&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int res;
+
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
+ return -EAGAIN;
+
+ res = lance_reset(dev);
+ spin_lock_init(&lp->devlock);
+ netif_start_queue(dev);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(lance_open);
+
+int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ /* Stop the LANCE */
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+
+ free_irq(lp->irq, dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lance_close);
+
+void lance_tx_timeout(struct net_device *dev)
+{
+ printk("lance_tx_timeout\n");
+ lance_reset(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+EXPORT_SYMBOL_GPL(lance_tx_timeout);
+
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen, len;
+ static int outs;
+ unsigned long flags;
+
+ if (!TX_BUFFS_AVAIL)
+ return NETDEV_TX_LOCKED;
+
+ netif_stop_queue(dev);
+
+ skblen = skb->len;
+
+#ifdef DEBUG_DRIVER
+ /* dump the packet */
+ {
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ if ((i % 16) == 0)
+ printk("\n");
+ printk("%2.2x ", skb->data[i]);
+ }
+ }
+#endif
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-len) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+
+ if (skb->len < ETH_ZLEN)
+ memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
+
+ outs++;
+ /* Kick the lance: transmit now */
+ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
+ dev_consume_skb_any(skb);
+
+ spin_lock_irqsave(&lp->devlock, flags);
+ if (TX_BUFFS_AVAIL)
+ netif_start_queue(dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore(&lp->devlock, flags);
+
+ return NETDEV_TX_OK;
+}
+EXPORT_SYMBOL_GPL(lance_start_xmit);
+
+/* taken from the depca driver via a2065.c */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+}
+
+
+void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ int stopped;
+
+ stopped = netif_queue_stopped(dev);
+ if (!stopped)
+ netif_stop_queue(dev);
+
+ while (lp->tx_old != lp->tx_new)
+ schedule();
+
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STOP);
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+
+ if (!stopped)
+ netif_start_queue(dev);
+}
+EXPORT_SYMBOL_GPL(lance_set_multicast);
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void lance_poll(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ spin_lock(&lp->devlock);
+ WRITERAP(lp, LE_CSR0);
+ WRITERDP(lp, LE_C0_STRT);
+ spin_unlock(&lp->devlock);
+ lance_interrupt(dev->irq, dev);
+}
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
new file mode 100644
index 000000000..e9e0be313
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.h
@@ -0,0 +1,250 @@
+/*
+ * 7990.h -- LANCE ethernet IC generic routines.
+ * This is an attempt to separate out the bits of various ethernet
+ * drivers that are common because they all use the AMD 7990 LANCE
+ * (Local Area Network Controller for Ethernet) chip.
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ *
+ * Most of this stuff was obtained by looking at other LANCE drivers,
+ * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
+ */
+
+#ifndef _7990_H
+#define _7990_H
+
+/* The lance only has two register locations. We communicate mostly via memory. */
+#define LANCE_RDP 0 /* Register Data Port */
+#define LANCE_RAP 2 /* Register Address Port */
+
+/* Transmit/receive ring definitions.
+ * We allow the specific drivers to override these defaults if they want to.
+ * NB: according to lance.c, increasing the number of buffers is a waste
+ * of space and reduces the chance that an upper layer will be able to
+ * reorder queued Tx packets based on priority. [Clearly there is a minimum
+ * limit too: too small and we drop rx packets and can't tx at full speed.]
+ * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5.
+ */
+
+/* Blast! This won't work. The problem is that we can't specify a default
+ * setting because that would cause the lance_init_block struct to be
+ * too long (and overflow the RAM on shared-memory cards like the HP LANCE.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+#endif
+
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define PKT_BUFF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUFF_SIZE
+#define TX_BUFF_SIZE PKT_BUFF_SIZE
+
+/* Each receive buffer is described by a receive message descriptor (RMD) */
+struct lance_rx_desc {
+ volatile unsigned short rmd0; /* low address of packet */
+ volatile unsigned char rmd1_bits; /* descriptor bits */
+ volatile unsigned char rmd1_hadr; /* high address of packet */
+ volatile short length; /* This length is 2s complement (negative)!
+ * Buffer length */
+ volatile unsigned short mblength; /* Actual number of bytes received */
+};
+
+/* Ditto for TMD: */
+struct lance_tx_desc {
+ volatile unsigned short tmd0; /* low address of packet */
+ volatile unsigned char tmd1_bits; /* descriptor bits */
+ volatile unsigned char tmd1_hadr; /* high address of packet */
+ volatile short length; /* Length is 2s complement (negative)! */
+ volatile unsigned short misc;
+};
+
+/* There are three memory structures accessed by the LANCE:
+ * the initialization block, the receive and transmit descriptor rings,
+ * and the data buffers themselves. In fact we might as well put the
+ * init block,the Tx and Rx rings and the buffers together in memory:
+ */
+struct lance_init_block {
+ volatile unsigned short mode; /* Pre-set mode (reg. 15) */
+ volatile unsigned char phys_addr[6]; /* Physical ethernet address */
+ volatile unsigned filter[2]; /* Multicast filter (64 bits) */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ volatile unsigned short rx_ptr; /* receive descriptor addr */
+ volatile unsigned short rx_len; /* receive len and high addr */
+ volatile unsigned short tx_ptr; /* transmit descriptor addr */
+ volatile unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
+ * This will be true if this whole struct is 8-byte aligned.
+ */
+ volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
+ volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
+
+ volatile char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+ volatile char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ /* we use this just to make the struct big enough that we can move its startaddr
+ * in order to force alignment to an eight byte boundary.
+ */
+};
+
+/* This is where we keep all the stuff the driver needs to know about.
+ * I'm definitely unhappy about the mechanism for allowing specific
+ * drivers to add things...
+ */
+struct lance_private {
+ const char *name;
+ unsigned long base;
+ volatile struct lance_init_block *init_block; /* CPU address of RAM */
+ volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ int tpe; /* TPE is selected */
+ int auto_select; /* cable-selection is by carrier */
+ unsigned short busmaster_regval;
+
+ unsigned int irq; /* IRQ to register */
+
+ /* This is because the HP LANCE is disgusting and you have to check
+ * a DIO-specific register every time you read/write the LANCE regs :-<
+ * [could we get away with making these some sort of macro?]
+ */
+ void (*writerap)(void *, unsigned short);
+ void (*writerdp)(void *, unsigned short);
+ unsigned short (*readrdp)(void *);
+ spinlock_t devlock;
+ char tx_full;
+};
+
+/*
+ * Am7990 Control and Status Registers
+ */
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
+#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
+#define LE_CSR3 0x0003 /* Misc */
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag
+ = BABL | MISS | MERR | RINT | TINT | IDON */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+#define LE_C3_BSWP 0x0004 /* Byte Swap (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
+ * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
+ */
+#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
+#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
+#define LE_MO_DLNKTST 0x1000 /* disable link status */
+#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
+#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
+#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
+#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
+#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
+/* and this one is from the C-LANCE data sheet... */
+#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
+ (C-LANCE, not original LANCE) */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+/*
+ * Receive Flags
+ */
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+/*
+ * Error Flags
+ */
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+/* Miscellaneous useful macros */
+
+#define TX_BUFFS_AVAIL ((lp->tx_old <= lp->tx_new) ? \
+ lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new : \
+ lp->tx_old - lp->tx_new - 1)
+
+/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Now the prototypes we export */
+int lance_open(struct net_device *dev);
+int lance_close(struct net_device *dev);
+int lance_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void lance_set_multicast(struct net_device *dev);
+void lance_tx_timeout(struct net_device *dev);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+void lance_poll(struct net_device *dev);
+#endif
+
+#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
new file mode 100644
index 000000000..426916036
--- /dev/null
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -0,0 +1,206 @@
+#
+# AMD network device configuration
+#
+
+config NET_VENDOR_AMD
+ bool "AMD devices"
+ default y
+ depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
+ SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
+ ---help---
+ If you have a network (Ethernet) chipset belonging to this class,
+ say Y.
+
+ Note that the answer to this question does not directly affect
+ the kernel: saying N will just case the configurator to skip all
+ the questions regarding AMD chipsets. If you say Y, you will be asked
+ for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_AMD
+
+config A2065
+ tristate "A2065 support"
+ depends on ZORRO
+ select CRC32
+ ---help---
+ If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
+ say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called a2065.
+
+config AMD8111_ETH
+ tristate "AMD 8111 (new PCI LANCE) support"
+ depends on PCI
+ select CRC32
+ select MII
+ ---help---
+ If you have an AMD 8111-based PCI LANCE ethernet card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called amd8111e.
+
+config LANCE
+ tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
+ depends on ISA && ISA_DMA_API && !ARM
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
+ of this type.
+
+ To compile this driver as a module, choose M here: the module
+ will be called lance. This is recommended.
+
+config PCNET32
+ tristate "AMD PCnet32 PCI support"
+ depends on PCI
+ select CRC32
+ select MII
+ ---help---
+ If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
+ answer Y here and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called pcnet32.
+
+config ARIADNE
+ tristate "Ariadne support"
+ depends on ZORRO
+ ---help---
+ If you have a Village Tronic Ariadne Ethernet adapter, say Y.
+ Otherwise, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ariadne.
+
+config ARM_AM79C961A
+ bool "ARM EBSA110 AM79C961A support"
+ depends on ARM && ARCH_EBSA110
+ select CRC32
+ ---help---
+ If you wish to compile a kernel for the EBSA-110, then you should
+ always answer Y to this.
+
+config ATARILANCE
+ tristate "Atari LANCE support"
+ depends on ATARI
+ ---help---
+ Say Y to include support for several Atari Ethernet adapters based
+ on the AMD LANCE chipset: RieblCard (with or without battery), or
+ PAMCard VME (also the version by Rhotron, with different addresses).
+
+config DECLANCE
+ tristate "DEC LANCE ethernet controller support"
+ depends on MACH_DECSTATION
+ select CRC32
+ ---help---
+ This driver is for the series of Ethernet controllers produced by
+ DEC (now Compaq) based on the AMD LANCE chipset, including the
+ DEPCA series. (This chipset is better known via the NE2100 cards.)
+
+config HPLANCE
+ bool "HP on-board LANCE support"
+ depends on DIO
+ select CRC32
+ ---help---
+ If you want to use the builtin "LANCE" Ethernet controller on an
+ HP300 machine, say Y here.
+
+config MIPS_AU1X00_ENET
+ tristate "MIPS AU1000 Ethernet support"
+ depends on MIPS_ALCHEMY
+ select PHYLIB
+ select CRC32
+ ---help---
+ If you have an Alchemy Semi AU1X00 based system
+ say Y. Otherwise, say N.
+
+config MVME147_NET
+ tristate "MVME147 (LANCE) Ethernet support"
+ depends on MVME147
+ select CRC32
+ ---help---
+ Support for the on-board Ethernet interface on the Motorola MVME147
+ single-board computer. Say Y here to include the
+ driver for this chip in your kernel.
+ To compile this driver as a module, choose M here.
+
+config PCMCIA_NMCLAN
+ tristate "New Media PCMCIA support"
+ depends on PCMCIA
+ help
+ Say Y here if you intend to attach a New Media Ethernet or LiveWire
+ PCMCIA (PC-card) Ethernet card to your computer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called nmclan_cs. If unsure, say N.
+
+config NI65
+ tristate "NI6510 support"
+ depends on ISA && ISA_DMA_API && !ARM
+ ---help---
+ If you have a network (Ethernet) card of this type, say Y and read
+ the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ni65.
+
+config SUN3LANCE
+ tristate "Sun3/Sun3x on-board LANCE support"
+ depends on (SUN3 || SUN3X)
+ ---help---
+ Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
+ featured an AMD LANCE 10Mbit Ethernet controller on board; say Y
+ here to compile in the Linux driver for this and enable Ethernet.
+ General Linux information on the Sun 3 and 3x series (now
+ discontinued) is at
+ <http://www.angelfire.com/ca2/tech68k/sun3.html>.
+
+ If you're not building a kernel for a Sun 3, say N.
+
+config SUNLANCE
+ tristate "Sun LANCE support"
+ depends on SBUS
+ select CRC32
+ ---help---
+ This driver supports the "le" interface present on all 32-bit Sparc
+ systems, on some older Ultra systems and as an Sbus option. These
+ cards are based on the AMD LANCE chipset, which is better known
+ via the NE2100 cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sunlance.
+
+config AMD_XGBE
+ tristate "AMD 10GbE Ethernet driver"
+ depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+ depends on ARM64 || COMPILE_TEST
+ select PHYLIB
+ select AMD_XGBE_PHY
+ select BITREVERSE
+ select CRC32
+ select PTP_1588_CLOCK
+ ---help---
+ This driver supports the AMD 10GbE Ethernet device found on an
+ AMD SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amd-xgbe.
+
+config AMD_XGBE_DCB
+ bool "Data Center Bridging (DCB) support"
+ default n
+ depends on AMD_XGBE && DCB
+ ---help---
+ Say Y here to enable Data Center Bridging (DCB) support in the
+ driver.
+
+ If unsure, say N.
+
+endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
new file mode 100644
index 000000000..a38a2dce3
--- /dev/null
+++ b/drivers/net/ethernet/amd/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the AMD network device drivers.
+#
+
+obj-$(CONFIG_A2065) += a2065.o
+obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
+obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
+obj-$(CONFIG_ARIADNE) += ariadne.o
+obj-$(CONFIG_ATARILANCE) += atarilance.o
+obj-$(CONFIG_DECLANCE) += declance.o
+obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
+obj-$(CONFIG_LANCE) += lance.o
+obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
+obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
+obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
+obj-$(CONFIG_NI65) += ni65.o
+obj-$(CONFIG_PCNET32) += pcnet32.o
+obj-$(CONFIG_SUN3LANCE) += sun3lance.o
+obj-$(CONFIG_SUNLANCE) += sunlance.o
+obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
new file mode 100644
index 000000000..56139184b
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -0,0 +1,783 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * Fixes and tips by:
+ * - Janos Farkas (CHEXUM@sparta.banki.hu)
+ * - Jes Degn Soerensen (jds@kom.auc.dk)
+ * - Matt Domsch (Matt_Domsch@dell.com)
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+/*#define DEBUG*/
+/*#define TEST_HITS*/
+
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+
+#include "a2065.h"
+
+/* Transmit/Receive Ring Definitions */
+
+#define LANCE_LOG_TX_BUFFERS (2)
+#define LANCE_LOG_RX_BUFFERS (4)
+
+#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
+#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
+
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SIZE (1544)
+#define RX_BUFF_SIZE PKT_BUF_SIZE
+#define TX_BUFF_SIZE PKT_BUF_SIZE
+
+/* Layout of the Lance's RAM Buffer */
+
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode (reg. 15) */
+ unsigned char phys_addr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
+ char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
+};
+
+/* Private Device Data */
+
+struct lance_private {
+ char *name;
+ volatile struct lance_regs *ll;
+ volatile struct lance_init_block *init_block; /* Hosts view */
+ volatile struct lance_init_block *lance_init_block; /* Lance view */
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ int lance_log_rx_bufs, lance_log_tx_bufs;
+ int rx_ring_mod_mask, tx_ring_mod_mask;
+
+ int tpe; /* cable-selection is TPE */
+ int auto_select; /* cable-selection by carrier */
+ unsigned short busmaster_regval;
+
+#ifdef CONFIG_SUNLANCE
+ struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
+ int burst_sizes; /* ledma SBus burst sizes */
+#endif
+ struct timer_list multicast_timer;
+};
+
+#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ int leptr = LANCE_ADDR(aib);
+
+ ll->rap = LE_CSR1;
+ ll->rdp = (leptr & 0xFFFF);
+ ll->rap = LE_CSR2;
+ ll->rdp = leptr >> 16;
+ ll->rap = LE_CSR3;
+ ll->rdp = lp->busmaster_regval;
+
+ /* Point back to csr0 */
+ ll->rap = LE_CSR0;
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_init_block *aib = lp->lance_init_block;
+ /* for LANCE_ADDR computations */
+ int leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ ib->mode = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr[0] = dev->dev_addr[1];
+ ib->phys_addr[1] = dev->dev_addr[0];
+ ib->phys_addr[2] = dev->dev_addr[3];
+ ib->phys_addr[3] = dev->dev_addr[2];
+ ib->phys_addr[4] = dev->dev_addr[5];
+ ib->phys_addr[5] = dev->dev_addr[4];
+
+ /* Setup the Tx ring entries */
+ netdev_dbg(dev, "TX rings:\n");
+ for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
+ leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
+ ib->btx_ring[i].tmd0 = leptr;
+ ib->btx_ring[i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring[i].tmd1_bits = 0;
+ ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring[i].misc = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
+ }
+
+ /* Setup the Rx ring entries */
+ netdev_dbg(dev, "RX rings:\n");
+ for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
+ leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
+
+ ib->brx_ring[i].rmd0 = leptr;
+ ib->brx_ring[i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring[i].mblength = 0;
+ if (i < 3)
+ netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->brx_ring);
+ ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+ netdev_dbg(dev, "RX ptr: %08x\n", leptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(&aib->btx_ring);
+ ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+ netdev_dbg(dev, "TX ptr: %08x\n", leptr);
+
+ /* Clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_INIT;
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
+ barrier();
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
+ return -EIO;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ ll->rdp = LE_C0_IDON;
+ ll->rdp = LE_C0_INEA | LE_C0_STRT;
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_rx_desc *rd;
+ unsigned char bits;
+
+#ifdef TEST_HITS
+ int i;
+ char buf[RX_RING_SIZE + 1];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
+ if (i == lp->rx_new)
+ buf[i] = r1_own ? '_' : 'X';
+ else
+ buf[i] = r1_own ? '.' : '1';
+ }
+ buf[RX_RING_SIZE] = 0;
+
+ pr_debug("RxRing TestHits: [%s]\n", buf);
+#endif
+
+ ll->rdp = LE_C0_RINT | LE_C0_INEA;
+ for (rd = &ib->brx_ring[lp->rx_new];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring[lp->rx_new]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ continue;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ int len = (rd->mblength & 0xfff) - 4;
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
+
+ if (!skb) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ return 0;
+ }
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&ib->rx_buf[lp->rx_new][0],
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
+ }
+ return 0;
+}
+
+static int lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_tx_desc *td;
+ int i, j;
+ int status;
+
+ /* csr0 is 2f3 */
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ /* csr0 is 73 */
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ td = &ib->btx_ring[i];
+
+ /* If we hit a packet not owned by us, stop */
+ if (td->tmd1_bits & LE_T1_OWN)
+ break;
+
+ if (td->tmd1_bits & LE_T1_ERR) {
+ status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ netdev_err(dev, "Carrier Lost, trying %s\n",
+ lp->tpe ? "TPE" : "AUI");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ }
+
+ /* buffer errors and underflows turn off
+ * the transmitter, so restart the adapter
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ return 0;
+ }
+ } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
+ /* So we don't count the packet more than once. */
+ td->tmd1_bits &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (td->tmd1_bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (td->tmd1_bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = (j + 1) & lp->tx_ring_mod_mask;
+ }
+ lp->tx_old = j;
+ ll->rdp = LE_C0_TINT | LE_C0_INEA;
+ return 0;
+}
+
+static int lance_tx_buffs_avail(struct lance_private *lp)
+{
+ if (lp->tx_old <= lp->tx_new)
+ return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
+ return lp->tx_old - lp->tx_new - 1;
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ ll->rap = LE_CSR0; /* LANCE Controller Status */
+ csr0 = ll->rdp;
+
+ if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
+ return IRQ_NONE; /* been generated by the Lance. */
+
+ /* Acknowledge all the interrupt sources ASAP */
+ ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
+ LE_C0_INIT);
+
+ if (csr0 & LE_C0_ERR) {
+ /* Clear the error condition */
+ ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ /* Log misc errors. */
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & LE_C0_MERR) {
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip. */
+ ll->rdp = LE_C0_STRT;
+ }
+
+ if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
+ netif_wake_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
+ LE_C0_IDON | LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int ret;
+
+ /* Stop the Lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (ret)
+ return ret;
+
+ load_csrs(lp);
+ lance_init_ring(dev);
+
+ netif_start_queue(dev);
+
+ return init_restart_lance(lp);
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+
+ load_csrs(lp);
+
+ lance_init_ring(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ netdev_dbg(dev, "Lance restart=%d\n", status);
+
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile struct lance_init_block *ib = lp->init_block;
+ int entry, skblen;
+ int status = NETDEV_TX_OK;
+ unsigned long flags;
+
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ skblen = max_t(unsigned, skb->len, ETH_ZLEN);
+
+ local_irq_save(flags);
+
+ if (!lance_tx_buffs_avail(lp)) {
+ local_irq_restore(flags);
+ return NETDEV_TX_LOCKED;
+ }
+
+#ifdef DEBUG
+ /* dump the packet */
+ print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
+ 16, 1, skb->data, 64, true);
+#endif
+ entry = lp->tx_new & lp->tx_ring_mod_mask;
+ ib->btx_ring[entry].length = (-skblen) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+
+ skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
+
+ /* Now, give the packet to the lance */
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
+ dev->stats.tx_bytes += skblen;
+
+ if (lance_tx_buffs_avail(lp) <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ dev_kfree_skb(skb);
+
+ local_irq_restore(flags);
+
+ return status;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile u16 *mcast_table = (u16 *)&ib->filter;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = 0xffffffff;
+ ib->filter[1] = 0xffffffff;
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= 1 << (crc & 0xf);
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_init_block *ib = lp->init_block;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ ll->rap = LE_CSR0;
+ ll->rdp = LE_C0_STOP;
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ ib->mode |= LE_MO_PROM;
+ } else {
+ ib->mode &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static int a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent);
+static void a2065_remove_one(struct zorro_dev *z);
+
+
+static struct zorro_device_id a2065_zorro_tbl[] = {
+ { ZORRO_PROD_CBM_A2065_1 },
+ { ZORRO_PROD_CBM_A2065_2 },
+ { ZORRO_PROD_AMERISTAR_A2065 },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
+
+static struct zorro_driver a2065_driver = {
+ .name = "a2065",
+ .id_table = a2065_zorro_tbl,
+ .probe = a2065_init_one,
+ .remove = a2065_remove_one,
+};
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int a2065_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ struct net_device *dev;
+ struct lance_private *priv;
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + A2065_LANCE;
+ unsigned long mem_start = board + A2065_RAM;
+ struct resource *r1, *r2;
+ u32 serial;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
+ "Am7990");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(dev);
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
+ dev->dev_addr[0] = 0x00;
+ if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
+ dev->dev_addr[1] = 0x80;
+ dev->dev_addr[2] = 0x10;
+ } else { /* Ameristar */
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x9f;
+ }
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
+
+ priv->ll = (volatile struct lance_regs *)dev->base_addr;
+ priv->init_block = (struct lance_init_block *)dev->mem_start;
+ priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
+ priv->auto_select = 0;
+ priv->busmaster_regval = LE_C3_BSWP;
+
+ priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
+ priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+ dev->dma = 0;
+
+ init_timer(&priv->multicast_timer);
+ priv->multicast_timer.data = (unsigned long) dev;
+ priv->multicast_timer.function =
+ (void (*)(unsigned long))lance_set_multicast;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct lance_regs));
+ release_mem_region(mem_start, A2065_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+
+static void a2065_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr),
+ sizeof(struct lance_regs));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static int __init a2065_init_module(void)
+{
+ return zorro_register_driver(&a2065_driver);
+}
+
+static void __exit a2065_cleanup_module(void)
+{
+ zorro_unregister_driver(&a2065_driver);
+}
+
+module_init(a2065_init_module);
+module_exit(a2065_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/a2065.h b/drivers/net/ethernet/amd/a2065.h
new file mode 100644
index 000000000..5117759d4
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.h
@@ -0,0 +1,173 @@
+/*
+ * Amiga Linux/68k A2065 Ethernet Driver
+ *
+ * (C) Copyright 1995 by Geert Uytterhoeven <geert@linux-m68k.org>
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
+ * (C) Copyright 1995 by Geert Uytterhoeven,
+ * Peter De Schrijver
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
+ *
+ * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
+ * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
+ */
+
+
+/*
+ * Am7990 Local Area Network Controller for Ethernet (LANCE)
+ */
+
+struct lance_regs {
+ unsigned short rdp; /* Register Data Port */
+ unsigned short rap; /* Register Address Port */
+};
+
+
+/*
+ * Am7990 Control and Status Registers
+ */
+
+#define LE_CSR0 0x0000 /* LANCE Controller Status */
+#define LE_CSR1 0x0001 /* IADR[15:0] */
+#define LE_CSR2 0x0002 /* IADR[23:16] */
+#define LE_CSR3 0x0003 /* Misc */
+
+
+/*
+ * Bit definitions for CSR0 (LANCE Controller Status)
+ */
+
+#define LE_C0_ERR 0x8000 /* Error */
+#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
+#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
+#define LE_C0_MISS 0x1000 /* Missed Frame */
+#define LE_C0_MERR 0x0800 /* Memory Error */
+#define LE_C0_RINT 0x0400 /* Receive Interrupt */
+#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
+#define LE_C0_IDON 0x0100 /* Initialization Done */
+#define LE_C0_INTR 0x0080 /* Interrupt Flag */
+#define LE_C0_INEA 0x0040 /* Interrupt Enable */
+#define LE_C0_RXON 0x0020 /* Receive On */
+#define LE_C0_TXON 0x0010 /* Transmit On */
+#define LE_C0_TDMD 0x0008 /* Transmit Demand */
+#define LE_C0_STOP 0x0004 /* Stop */
+#define LE_C0_STRT 0x0002 /* Start */
+#define LE_C0_INIT 0x0001 /* Initialize */
+
+
+/*
+ * Bit definitions for CSR3
+ */
+
+#define LE_C3_BSWP 0x0004 /* Byte Swap
+ (on for big endian byte order) */
+#define LE_C3_ACON 0x0002 /* ALE Control
+ (on for active low ALE) */
+#define LE_C3_BCON 0x0001 /* Byte Control */
+
+
+/*
+ * Mode Flags
+ */
+
+#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
+#define LE_MO_INTL 0x0040 /* Internal Loopback */
+#define LE_MO_DRTY 0x0020 /* Disable Retry */
+#define LE_MO_FCOLL 0x0010 /* Force Collision */
+#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
+#define LE_MO_LOOP 0x0004 /* Loopback Enable */
+#define LE_MO_DTX 0x0002 /* Disable Transmitter */
+#define LE_MO_DRX 0x0001 /* Disable Receiver */
+
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned char rmd1_bits; /* descriptor bits */
+ unsigned char rmd1_hadr; /* high address of packet */
+ short length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ unsigned short mblength; /* Aactual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned char tmd1_bits; /* descriptor bits */
+ unsigned char tmd1_hadr; /* high address of packet */
+ short length; /* Length is 2s complement (negative)! */
+ unsigned short misc;
+};
+
+
+/*
+ * Receive Flags
+ */
+
+#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_R1_ERR 0x40 /* Error */
+#define LE_R1_FRA 0x20 /* Framing Error */
+#define LE_R1_OFL 0x10 /* Overflow Error */
+#define LE_R1_CRC 0x08 /* CRC Error */
+#define LE_R1_BUF 0x04 /* Buffer Error */
+#define LE_R1_SOP 0x02 /* Start of Packet */
+#define LE_R1_EOP 0x01 /* End of Packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Transmit Flags
+ */
+
+#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
+#define LE_T1_ERR 0x40 /* Error */
+#define LE_T1_RES 0x20 /* Reserved,
+ LANCE writes this with a zero */
+#define LE_T1_EMORE 0x10 /* More than one retry needed */
+#define LE_T1_EONE 0x08 /* One retry needed */
+#define LE_T1_EDEF 0x04 /* Deferred */
+#define LE_T1_SOP 0x02 /* Start of Packet */
+#define LE_T1_EOP 0x01 /* End of Packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+
+/*
+ * Error Flags
+ */
+
+#define LE_T3_BUF 0x8000 /* Buffer Error */
+#define LE_T3_UFL 0x4000 /* Underflow Error */
+#define LE_T3_LCOL 0x1000 /* Late Collision */
+#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
+#define LE_T3_RTY 0x0400 /* Retry Error */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
+
+
+/*
+ * A2065 Expansion Board Structure
+ */
+
+#define A2065_LANCE 0x4000
+
+#define A2065_RAM 0x8000
+#define A2065_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
new file mode 100644
index 000000000..87e727b92
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -0,0 +1,768 @@
+/*
+ * linux/drivers/net/ethernet/amd/am79c961a.c
+ *
+ * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from various things including skeleton.c
+ *
+ * This is a special driver for the am79c961A Lance chip used in the
+ * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
+ * note that this can not be built as a module (it doesn't make sense).
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+#include <linux/bitops.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+
+#define TX_BUFFERS 15
+#define RX_BUFFERS 25
+
+#include "am79c961a.h"
+
+static irqreturn_t
+am79c961_interrupt (int irq, void *dev_id);
+
+static unsigned int net_debug = NET_DEBUG;
+
+static const char version[] =
+ "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
+
+/* --------------------------------------------------------------------------- */
+
+#ifdef __arm__
+static void write_rreg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #-4] @ NET_RDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_rreg(u_long base_addr, u_int reg)
+{
+ unsigned short v;
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "ldr%?h %0, [%2, #-4] @ NET_RDP"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+static inline void write_ireg(u_long base, u_int reg, u_int val)
+{
+ asm volatile(
+ "str%?h %1, [%2] @ NET_RAP\n\t"
+ "str%?h %0, [%2, #8] @ NET_IDP"
+ :
+ : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
+}
+
+static inline unsigned short read_ireg(u_long base_addr, u_int reg)
+{
+ u_short v;
+ asm volatile(
+ "str%?h %1, [%2] @ NAT_RAP\n\t"
+ "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
+ : "=r" (v)
+ : "r" (reg), "r" (ISAIO_BASE + 0x0464));
+ return v;
+}
+
+#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
+#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
+
+static void
+am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ asm volatile("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3");
+ asm volatile(
+ "ldm%?ia %0!, {%1, %2}"
+ : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
+ length -= 8;
+ asm volatile(
+ "str%?h %1, [%0], #4\n\t"
+ "mov%? %1, %1, lsr #16\n\t"
+ "str%?h %1, [%0], #4\n\t"
+ "str%?h %2, [%0], #4\n\t"
+ "mov%? %2, %2, lsr #16\n\t"
+ "str%?h %2, [%0], #4"
+ : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
+ }
+ while (length > 0) {
+ asm volatile("str%?h %2, [%0], #4"
+ : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
+ buf += 2;
+ length -= 2;
+ }
+}
+
+static void
+am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
+{
+ offset = ISAMEM_BASE + (offset << 1);
+ length = (length + 1) & ~1;
+ if ((int)buf & 2) {
+ unsigned int tmp;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
+ length -= 2;
+ }
+ while (length > 8) {
+ register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "ldr%?h %3, [%0], #4\n\t"
+ "orr%? %2, %2, %4, lsl #16\n\t"
+ "ldr%?h %4, [%0], #4\n\t"
+ "orr%? %3, %3, %4, lsl #16\n\t"
+ "stm%?ia %1!, {%2, %3}"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
+ : "0" (offset), "1" (buf));
+ length -= 8;
+ }
+ while (length > 0) {
+ unsigned int tmp;
+ asm volatile(
+ "ldr%?h %2, [%0], #4\n\t"
+ "str%?b %2, [%1], #1\n\t"
+ "mov%? %2, %2, lsr #8\n\t"
+ "str%?b %2, [%1], #1"
+ : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
+ length -= 2;
+ }
+}
+#else
+#error Not compatible
+#endif
+
+static int
+am79c961_ramtest(struct net_device *dev, unsigned int val)
+{
+ unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
+ int i, error = 0, errorcount = 0;
+
+ if (!buffer)
+ return 0;
+ memset (buffer, val, 65536);
+ am_writebuffer(dev, 0, buffer, 65536);
+ memset (buffer, val ^ 255, 65536);
+ am_readbuffer(dev, 0, buffer, 65536);
+ for (i = 0; i < 65536; i++) {
+ if (buffer[i] != val && !error) {
+ printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
+ error = 1;
+ errorcount ++;
+ } else if (error && buffer[i] == val) {
+ printk ("%05X\n", i);
+ error = 0;
+ }
+ }
+ if (error)
+ printk ("10000\n");
+ kfree (buffer);
+ return errorcount;
+}
+
+static void am79c961_mc_hash(char *addr, u16 *hash)
+{
+ int idx, bit;
+ u32 crc;
+
+ crc = ether_crc_le(ETH_ALEN, addr);
+
+ idx = crc >> 30;
+ bit = (crc >> 26) & 15;
+
+ hash[idx] |= 1 << bit;
+}
+
+static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
+{
+ unsigned int mode = MODE_PORT_10BT;
+
+ if (dev->flags & IFF_PROMISC) {
+ mode |= MODE_PROMISC;
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else if (dev->flags & IFF_ALLMULTI) {
+ memset(hash, 0xff, 4 * sizeof(*hash));
+ } else {
+ struct netdev_hw_addr *ha;
+
+ memset(hash, 0, 4 * sizeof(*hash));
+
+ netdev_for_each_mc_addr(ha, dev)
+ am79c961_mc_hash(ha->addr, hash);
+ }
+
+ return mode;
+}
+
+static void
+am79c961_init_for_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ unsigned char *p;
+ u_int hdr_addr, first_free_addr;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i;
+
+ /*
+ * Stop the chip.
+ */
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
+ write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
+ write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
+ write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
+
+ for (i = LADRL; i <= LADRH; i++)
+ write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
+
+ for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
+ write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
+
+ write_rreg (dev->base_addr, MODE, mode);
+ write_rreg (dev->base_addr, POLLINT, 0);
+ write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
+ write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
+
+ first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
+ hdr_addr = 0;
+
+ priv->rxhead = 0;
+ priv->rxtail = 0;
+ priv->rxhdr = hdr_addr;
+
+ for (i = 0; i < RX_BUFFERS; i++) {
+ priv->rxbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, RMD_OWN);
+ am_writeword (dev, hdr_addr + 4, (-1600));
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+ priv->txhead = 0;
+ priv->txtail = 0;
+ priv->txhdr = hdr_addr;
+ for (i = 0; i < TX_BUFFERS; i++) {
+ priv->txbuffer[i] = first_free_addr;
+ am_writeword (dev, hdr_addr, first_free_addr);
+ am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
+ am_writeword (dev, hdr_addr + 4, 0xf000);
+ am_writeword (dev, hdr_addr + 6, 0);
+ first_free_addr += 1600;
+ hdr_addr += 8;
+ }
+
+ write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, BASETXL, priv->txhdr);
+ write_rreg (dev->base_addr, BASERXH, 0);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
+ write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
+ write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
+}
+
+static void am79c961_timer(unsigned long data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int lnkstat, carrier;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ carrier = netif_carrier_ok(dev);
+
+ if (lnkstat && !carrier) {
+ netif_carrier_on(dev);
+ printk("%s: link up\n", dev->name);
+ } else if (!lnkstat && carrier) {
+ netif_carrier_off(dev);
+ printk("%s: link down\n", dev->name);
+ }
+
+ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
+}
+
+/*
+ * Open/initialize the board.
+ */
+static int
+am79c961_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ am79c961_init_for_open(dev);
+
+ netif_carrier_off(dev);
+
+ priv->timer.expires = jiffies;
+ add_timer(&priv->timer);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/*
+ * The inverse routine to am79c961_open().
+ */
+static int
+am79c961_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&priv->timer);
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ free_irq (dev->irq, dev);
+
+ return 0;
+}
+
+/*
+ * Set or clear promiscuous/multicast mode filter for this adapter.
+ */
+static void am79c961_setmulticastlist (struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+ u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
+ int i, stopped;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+
+ stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
+
+ if (!stopped) {
+ /*
+ * Put the chip into suspend mode
+ */
+ write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
+
+ /*
+ * Spin waiting for chip to report suspend mode
+ */
+ while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+ nop();
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ }
+ }
+
+ /*
+ * Update the multicast hash table
+ */
+ for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
+ write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
+
+ /*
+ * Write the mode register
+ */
+ write_rreg(dev->base_addr, MODE, mode);
+
+ if (!stopped) {
+ /*
+ * Put the chip back into running mode
+ */
+ write_rreg(dev->base_addr, CTRL1, 0);
+ }
+
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+}
+
+static void am79c961_timeout(struct net_device *dev)
+{
+ printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
+ dev->name);
+
+ /*
+ * ought to do some setup of the tx side here
+ */
+
+ netif_wake_queue(dev);
+}
+
+/*
+ * Transmit a packet
+ */
+static int
+am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ unsigned int hdraddr, bufaddr;
+ unsigned int head;
+ unsigned long flags;
+
+ head = priv->txhead;
+ hdraddr = priv->txhdr + (head << 3);
+ bufaddr = priv->txbuffer[head];
+ head += 1;
+ if (head >= TX_BUFFERS)
+ head = 0;
+
+ am_writebuffer (dev, bufaddr, skb->data, skb->len);
+ am_writeword (dev, hdraddr + 4, -skb->len);
+ am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
+ priv->txhead = head;
+
+ spin_lock_irqsave(&priv->chip_lock, flags);
+ write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
+ spin_unlock_irqrestore(&priv->chip_lock, flags);
+
+ /*
+ * If the next packet is owned by the ethernet device,
+ * then the tx ring is full and we can't add another
+ * packet.
+ */
+ if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
+ netif_stop_queue(dev);
+
+ dev_consume_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/*
+ * If we have a good packet(s), get it/them out of the buffers.
+ */
+static void
+am79c961_rx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ struct sk_buff *skb;
+ u_int hdraddr;
+ u_int pktaddr;
+ u_int status;
+ int len;
+
+ hdraddr = priv->rxhdr + (priv->rxtail << 3);
+ pktaddr = priv->rxbuffer[priv->rxtail];
+
+ status = am_readword (dev, hdraddr + 2);
+ if (status & RMD_OWN) /* do we own it? */
+ break;
+
+ priv->rxtail ++;
+ if (priv->rxtail >= RX_BUFFERS)
+ priv->rxtail = 0;
+
+ if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ dev->stats.rx_errors++;
+ if (status & RMD_ERR) {
+ if (status & RMD_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RMD_CRC)
+ dev->stats.rx_crc_errors++;
+ } else if (status & RMD_STP)
+ dev->stats.rx_length_errors++;
+ continue;
+ }
+
+ len = am_readword(dev, hdraddr + 6);
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb) {
+ skb_reserve(skb, 2);
+
+ am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
+ am_writeword(dev, hdraddr + 2, RMD_OWN);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_bytes += len;
+ dev->stats.rx_packets++;
+ } else {
+ am_writeword (dev, hdraddr + 2, RMD_OWN);
+ dev->stats.rx_dropped++;
+ break;
+ }
+ } while (1);
+}
+
+/*
+ * Update stats for the transmitted packet
+ */
+static void
+am79c961_tx(struct net_device *dev, struct dev_priv *priv)
+{
+ do {
+ short len;
+ u_int hdraddr;
+ u_int status;
+
+ hdraddr = priv->txhdr + (priv->txtail << 3);
+ status = am_readword (dev, hdraddr + 2);
+ if (status & TMD_OWN)
+ break;
+
+ priv->txtail ++;
+ if (priv->txtail >= TX_BUFFERS)
+ priv->txtail = 0;
+
+ if (status & TMD_ERR) {
+ u_int status2;
+
+ dev->stats.tx_errors++;
+
+ status2 = am_readword (dev, hdraddr + 6);
+
+ /*
+ * Clear the error byte
+ */
+ am_writeword (dev, hdraddr + 6, 0);
+
+ if (status2 & TST_RTRY)
+ dev->stats.collisions += 16;
+ if (status2 & TST_LCOL)
+ dev->stats.tx_window_errors++;
+ if (status2 & TST_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (status2 & TST_UFLO)
+ dev->stats.tx_fifo_errors++;
+ continue;
+ }
+ dev->stats.tx_packets++;
+ len = am_readword (dev, hdraddr + 4);
+ dev->stats.tx_bytes += -len;
+ } while (priv->txtail != priv->txhead);
+
+ netif_wake_queue(dev);
+}
+
+static irqreturn_t
+am79c961_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ u_int status, n = 100;
+ int handled = 0;
+
+ do {
+ status = read_rreg(dev->base_addr, CSR0);
+ write_rreg(dev->base_addr, CSR0, status &
+ (CSR0_IENA|CSR0_TINT|CSR0_RINT|
+ CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
+
+ if (status & CSR0_RINT) {
+ handled = 1;
+ am79c961_rx(dev, priv);
+ }
+ if (status & CSR0_TINT) {
+ handled = 1;
+ am79c961_tx(dev, priv);
+ }
+ if (status & CSR0_MISS) {
+ handled = 1;
+ dev->stats.rx_dropped++;
+ }
+ if (status & CSR0_CERR) {
+ handled = 1;
+ mod_timer(&priv->timer, jiffies);
+ }
+ } while (--n && status & (CSR0_RINT | CSR0_TINT));
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void am79c961_poll_controller(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ am79c961_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Initialise the chip. Note that we always expect
+ * to be entered with interrupts enabled.
+ */
+static int
+am79c961_hw_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ spin_lock_irq(&priv->chip_lock);
+ write_rreg (dev->base_addr, CSR0, CSR0_STOP);
+ write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
+ spin_unlock_irq(&priv->chip_lock);
+
+ am79c961_ramtest(dev, 0x66);
+ am79c961_ramtest(dev, 0x99);
+
+ return 0;
+}
+
+static void __init am79c961_banner(void)
+{
+ static unsigned version_printed;
+
+ if (net_debug && version_printed++ == 0)
+ printk(KERN_INFO "%s", version);
+}
+static const struct net_device_ops am79c961_netdev_ops = {
+ .ndo_open = am79c961_open,
+ .ndo_stop = am79c961_close,
+ .ndo_start_xmit = am79c961_sendpacket,
+ .ndo_set_rx_mode = am79c961_setmulticastlist,
+ .ndo_tx_timeout = am79c961_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = am79c961_poll_controller,
+#endif
+};
+
+static int am79c961_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct net_device *dev;
+ struct dev_priv *priv;
+ int i, ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return -ENODEV;
+
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ ret = -ENOMEM;
+ if (!dev)
+ goto out;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ /*
+ * Fixed address and IRQ lines here.
+ * The PNP initialisation should have been
+ * done by the ether bootp loader.
+ */
+ dev->base_addr = res->start;
+ ret = platform_get_irq(pdev, 0);
+
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto nodev;
+ }
+ dev->irq = ret;
+
+ ret = -ENODEV;
+ if (!request_region(dev->base_addr, 0x18, dev->name))
+ goto nodev;
+
+ /*
+ * Reset the device.
+ */
+ inb(dev->base_addr + NET_RESET);
+ udelay(5);
+
+ /*
+ * Check the manufacturer part of the
+ * ether address.
+ */
+ if (inb(dev->base_addr) != 0x08 ||
+ inb(dev->base_addr + 2) != 0x00 ||
+ inb(dev->base_addr + 4) != 0x2b)
+ goto release;
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
+
+ am79c961_banner();
+
+ spin_lock_init(&priv->chip_lock);
+ init_timer(&priv->timer);
+ priv->timer.data = (unsigned long)dev;
+ priv->timer.function = am79c961_timer;
+
+ if (am79c961_hw_init(dev))
+ goto release;
+
+ dev->netdev_ops = &am79c961_netdev_ops;
+
+ ret = register_netdev(dev);
+ if (ret == 0) {
+ printk(KERN_INFO "%s: ether address %pM\n",
+ dev->name, dev->dev_addr);
+ return 0;
+ }
+
+release:
+ release_region(dev->base_addr, 0x18);
+nodev:
+ free_netdev(dev);
+out:
+ return ret;
+}
+
+static struct platform_driver am79c961_driver = {
+ .probe = am79c961_probe,
+ .driver = {
+ .name = "am79c961",
+ },
+};
+
+static int __init am79c961_init(void)
+{
+ return platform_driver_register(&am79c961_driver);
+}
+
+__initcall(am79c961_init);
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
new file mode 100644
index 000000000..9f384b795
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -0,0 +1,145 @@
+/*
+ * linux/drivers/net/ethernet/amd/am79c961a.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_am79c961a_H
+#define _LINUX_am79c961a_H
+
+/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
+#define DEBUG_TX 2
+#define DEBUG_RX 4
+#define DEBUG_INT 8
+#define DEBUG_IC 16
+#ifndef NET_DEBUG
+#define NET_DEBUG 0
+#endif
+
+#define NET_UID 0
+#define NET_RDP 0x10
+#define NET_RAP 0x12
+#define NET_RESET 0x14
+#define NET_IDP 0x16
+
+/*
+ * RAP registers
+ */
+#define CSR0 0
+#define CSR0_INIT 0x0001
+#define CSR0_STRT 0x0002
+#define CSR0_STOP 0x0004
+#define CSR0_TDMD 0x0008
+#define CSR0_TXON 0x0010
+#define CSR0_RXON 0x0020
+#define CSR0_IENA 0x0040
+#define CSR0_INTR 0x0080
+#define CSR0_IDON 0x0100
+#define CSR0_TINT 0x0200
+#define CSR0_RINT 0x0400
+#define CSR0_MERR 0x0800
+#define CSR0_MISS 0x1000
+#define CSR0_CERR 0x2000
+#define CSR0_BABL 0x4000
+#define CSR0_ERR 0x8000
+
+#define CSR3 3
+#define CSR3_EMBA 0x0008
+#define CSR3_DXMT2PD 0x0010
+#define CSR3_LAPPEN 0x0020
+#define CSR3_DXSUFLO 0x0040
+#define CSR3_IDONM 0x0100
+#define CSR3_TINTM 0x0200
+#define CSR3_RINTM 0x0400
+#define CSR3_MERRM 0x0800
+#define CSR3_MISSM 0x1000
+#define CSR3_BABLM 0x4000
+#define CSR3_MASKALL 0x5F00
+
+#define CSR4 4
+#define CSR4_JABM 0x0001
+#define CSR4_JAB 0x0002
+#define CSR4_TXSTRTM 0x0004
+#define CSR4_TXSTRT 0x0008
+#define CSR4_RCVCCOM 0x0010
+#define CSR4_RCVCCO 0x0020
+#define CSR4_MFCOM 0x0100
+#define CSR4_MFCO 0x0200
+#define CSR4_ASTRP_RCV 0x0400
+#define CSR4_APAD_XMIT 0x0800
+
+#define CTRL1 5
+#define CTRL1_SPND 0x0001
+
+#define LADRL 8
+#define LADRM1 9
+#define LADRM2 10
+#define LADRH 11
+#define PADRL 12
+#define PADRM 13
+#define PADRH 14
+
+#define MODE 15
+#define MODE_DISRX 0x0001
+#define MODE_DISTX 0x0002
+#define MODE_LOOP 0x0004
+#define MODE_DTCRC 0x0008
+#define MODE_COLL 0x0010
+#define MODE_DRETRY 0x0020
+#define MODE_INTLOOP 0x0040
+#define MODE_PORT_AUI 0x0000
+#define MODE_PORT_10BT 0x0080
+#define MODE_DRXPA 0x2000
+#define MODE_DRXBA 0x4000
+#define MODE_PROMISC 0x8000
+
+#define BASERXL 24
+#define BASERXH 25
+#define BASETXL 30
+#define BASETXH 31
+
+#define POLLINT 47
+
+#define SIZERXR 76
+#define SIZETXR 78
+
+#define CSR_MFC 112
+
+#define RMD_ENP 0x0100
+#define RMD_STP 0x0200
+#define RMD_CRC 0x0800
+#define RMD_FRAM 0x2000
+#define RMD_ERR 0x4000
+#define RMD_OWN 0x8000
+
+#define TMD_ENP 0x0100
+#define TMD_STP 0x0200
+#define TMD_MORE 0x1000
+#define TMD_ERR 0x4000
+#define TMD_OWN 0x8000
+
+#define TST_RTRY 0x0400
+#define TST_LCAR 0x0800
+#define TST_LCOL 0x1000
+#define TST_UFLO 0x4000
+#define TST_BUFF 0x8000
+
+#define ISALED0 0x0004
+#define ISALED0_LNKST 0x8000
+
+struct dev_priv {
+ unsigned long rxbuffer[RX_BUFFERS];
+ unsigned long txbuffer[TX_BUFFERS];
+ unsigned char txhead;
+ unsigned char txtail;
+ unsigned char rxhead;
+ unsigned char rxtail;
+ unsigned long rxhdr;
+ unsigned long txhdr;
+ spinlock_t chip_lock;
+ struct timer_list timer;
+};
+
+#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
new file mode 100644
index 000000000..94960055f
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -0,0 +1,1971 @@
+
+/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2004 Advanced Micro Devices
+ *
+ *
+ * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
+ * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
+ * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.[ pcnet32.c ]
+ * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+Module Name:
+
+ amd8111e.c
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller Driver.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+ 1. Dynamic interrupt coalescing.
+ 2. Removed prev_stats.
+ 3. MII support.
+ 4. Dynamic IPG support
+ 3.0.2 05/29/2003
+ 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
+ 2. Bug fix: Fixed VLAN support failure.
+ 3. Bug fix: Fixed receive interrupt coalescing bug.
+ 4. Dynamic IPG support is disabled by default.
+ 3.0.3 06/05/2003
+ 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
+ 3.0.4 12/09/2003
+ 1. Added set_mac_address routine for bonding driver support.
+ 2. Tested the driver for bonding support
+ 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ indicated to the h/w.
+ 4. Modified amd8111e_rx() routine to receive all the received packets
+ in the first interrupt.
+ 5. Bug fix: Corrected rx_errors reported in get_stats() function.
+ 3.0.5 03/22/2004
+ 1. Added NAPI support
+
+*/
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/ctype.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define AMD8111E_VLAN_TAG_USED 1
+#else
+#define AMD8111E_VLAN_TAG_USED 0
+#endif
+
+#include "amd8111e.h"
+#define MODULE_NAME "amd8111e"
+#define MODULE_VERS "3.0.7"
+MODULE_AUTHOR("Advanced Micro Devices, Inc.");
+MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
+MODULE_LICENSE("GPL");
+module_param_array(speed_duplex, int, NULL, 0);
+MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
+module_param_array(coalesce, bool, NULL, 0);
+MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
+module_param_array(dynamic_ipg, bool, NULL, 0);
+MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
+
+/* This function will read the PHY registers. */
+static int amd8111e_read_phy(struct amd8111e_priv *lp,
+ int phy_id, int reg, u32 *val)
+{
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+ unsigned int repeat= REPEAT_CNT;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_read;
+
+ *val = reg_val & 0xffff;
+ return 0;
+err_phy_read:
+ *val = 0;
+ return -EINVAL;
+
+}
+
+/* This function will write into PHY registers. */
+static int amd8111e_write_phy(struct amd8111e_priv *lp,
+ int phy_id, int reg, u32 val)
+{
+ unsigned int repeat = REPEAT_CNT;
+ void __iomem *mmio = lp->mmio;
+ unsigned int reg_val;
+
+ reg_val = readl(mmio + PHY_ACCESS);
+ while (reg_val & PHY_CMD_ACTIVE)
+ reg_val = readl( mmio + PHY_ACCESS );
+
+ writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
+ ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
+
+ do{
+ reg_val = readl(mmio + PHY_ACCESS);
+ udelay(30); /* It takes 30 us to read/write the data */
+ } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
+
+ if(reg_val & PHY_RD_ERR)
+ goto err_phy_write;
+
+ return 0;
+
+err_phy_write:
+ return -EINVAL;
+
+}
+
+/* This is the mii register read function provided to the mii interface. */
+static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ unsigned int reg_val;
+
+ amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
+ return reg_val;
+
+}
+
+/* This is the mii register write function provided to the mii interface. */
+static void amd8111e_mdio_write(struct net_device *dev,
+ int phy_id, int reg_num, int val)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ amd8111e_write_phy(lp, phy_id, reg_num, val);
+}
+
+/* This function will set PHY speed. During initialization sets
+ * the original speed to 100 full
+ */
+static void amd8111e_set_ext_phy(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 bmcr,advert,tmp;
+
+ /* Determine mii register values to set the speed */
+ advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
+ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+ switch (lp->ext_phy_option){
+
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
+ ADVERTISE_100HALF|ADVERTISE_100FULL) ;
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
+ }
+
+ if(advert != tmp)
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
+ /* Restart auto negotiation */
+ bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
+
+}
+
+/* This function will unmap skb->data space and will free
+ * all transmit and receive skbuffs.
+ */
+static int amd8111e_free_skbs(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct sk_buff *rx_skbuff;
+ int i;
+
+ /* Freeing transmit skbs */
+ for(i = 0; i < NUM_TX_BUFFERS; i++){
+ if(lp->tx_skbuff[i]){
+ pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
+ dev_kfree_skb (lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+ }
+ /* Freeing previously allocated receive buffers */
+ for (i = 0; i < NUM_RX_BUFFERS; i++){
+ rx_skbuff = lp->rx_skbuff[i];
+ if(rx_skbuff != NULL){
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
+ lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* This will set the receive buffer length corresponding
+ * to the mtu size of networkinterface.
+ */
+static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ unsigned int mtu = dev->mtu;
+
+ if (mtu > ETH_DATA_LEN){
+ /* MTU + ethernet header + FCS
+ * + optional VLAN tag + skb reserve space 2
+ */
+ lp->rx_buff_len = mtu + ETH_HLEN + 10;
+ lp->options |= OPTION_JUMBO_ENABLE;
+ } else{
+ lp->rx_buff_len = PKT_BUFF_SZ;
+ lp->options &= ~OPTION_JUMBO_ENABLE;
+ }
+}
+
+/* This function will free all the previously allocated buffers,
+ * determine new receive buffer length and will allocate new receive buffers.
+ * This function also allocates and initializes both the transmitter
+ * and receive hardware descriptors.
+ */
+static int amd8111e_init_ring(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ lp->rx_idx = lp->tx_idx = 0;
+ lp->tx_complete_idx = 0;
+ lp->tx_ring_idx = 0;
+
+
+ if(lp->opened)
+ /* Free previously allocated transmit and receive skbs */
+ amd8111e_free_skbs(dev);
+
+ else{
+ /* allocate the tx and rx descriptors */
+ if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
+ &lp->tx_ring_dma_addr)) == NULL)
+
+ goto err_no_mem;
+
+ if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
+ &lp->rx_ring_dma_addr)) == NULL)
+
+ goto err_free_tx_ring;
+
+ }
+ /* Set new receive buff size */
+ amd8111e_set_rx_buff_len(dev);
+
+ /* Allocating receive skbs */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+
+ lp->rx_skbuff[i] = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!lp->rx_skbuff[i]) {
+ /* Release previos allocated skbs */
+ for(--i; i >= 0 ;i--)
+ dev_kfree_skb(lp->rx_skbuff[i]);
+ goto err_free_rx_ring;
+ }
+ skb_reserve(lp->rx_skbuff[i],2);
+ }
+ /* Initilaizing receive descriptors */
+ for (i = 0; i < NUM_RX_BUFFERS; i++) {
+ lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
+ lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+
+ lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
+ }
+
+ /* Initializing transmit descriptors */
+ for (i = 0; i < NUM_TX_RING_DR; i++) {
+ lp->tx_ring[i].buff_phy_addr = 0;
+ lp->tx_ring[i].tx_flags = 0;
+ lp->tx_ring[i].buff_count = 0;
+ }
+
+ return 0;
+
+err_free_rx_ring:
+
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+err_free_tx_ring:
+
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
+ lp->tx_ring_dma_addr);
+
+err_no_mem:
+ return -ENOMEM;
+}
+
+/* This function will set the interrupt coalescing according
+ * to the input arguments
+ */
+static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
+{
+ unsigned int timeout;
+ unsigned int event_count;
+
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
+
+
+ switch(cmod)
+ {
+ case RX_INTR_COAL :
+ timeout = coal_conf->rx_timeout;
+ event_count = coal_conf->rx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_A);
+ break;
+
+ case TX_INTR_COAL :
+ timeout = coal_conf->tx_timeout;
+ event_count = coal_conf->tx_event_count;
+ if( timeout > MAX_TIMEOUT ||
+ event_count > MAX_EVENT_COUNT )
+ return -EINVAL;
+
+
+ timeout = timeout * DELAY_TIMER_CONV;
+ writel(VAL0|STINTEN,mmio+INTEN0);
+ writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
+ mmio+DLY_INT_B);
+ break;
+
+ case DISABLE_COAL:
+ writel(0,mmio+STVAL);
+ writel(STINTEN, mmio+INTEN0);
+ writel(0, mmio +DLY_INT_B);
+ writel(0, mmio+DLY_INT_A);
+ break;
+ case ENABLE_COAL:
+ /* Start the timer */
+ writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
+ writel(VAL0|STINTEN, mmio+INTEN0);
+ break;
+ default:
+ break;
+
+ }
+ return 0;
+
+}
+
+/* This function initializes the device registers and starts the device. */
+static int amd8111e_restart(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ int i,reg_val;
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ if(amd8111e_init_ring(dev))
+ return -ENOMEM;
+
+ /* enable the port manager and set auto negotiation always */
+ writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
+ writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
+
+ amd8111e_set_ext_phy(dev);
+
+ /* set control registers */
+ reg_val = readl(mmio + CTRL1);
+ reg_val &= ~XMTSP_MASK;
+ writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
+
+ /* enable interrupt */
+ writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
+ APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
+ SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
+
+ writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
+
+ /* initialize tx and rx ring base addresses */
+ writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
+ writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
+
+ writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
+ writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
+
+ /* set default IPG to 96 */
+ writew((u32)DEFAULT_IPG,mmio+IPG);
+ writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
+
+ if(lp->options & OPTION_JUMBO_ENABLE){
+ writel((u32)VAL2|JUMBO, mmio + CMD3);
+ /* Reset REX_UFLO */
+ writel( REX_UFLO, mmio + CMD2);
+ /* Should not set REX_UFLO for jumbo frames */
+ writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
+ }else{
+ writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
+ writel((u32)JUMBO, mmio + CMD3);
+ }
+
+#if AMD8111E_VLAN_TAG_USED
+ writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
+#endif
+ writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
+
+ /* Setting the MAC address to the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ writeb( dev->dev_addr[i], mmio + PADR + i );
+
+ /* Enable interrupt coalesce */
+ if(lp->options & OPTION_INTR_COAL_ENABLE){
+ netdev_info(dev, "Interrupt Coalescing Enabled.\n");
+ amd8111e_set_coalesce(dev,ENABLE_COAL);
+ }
+
+ /* set RUN bit to start the chip */
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ writel(VAL0 | INTREN | RUN, mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(mmio+CMD0);
+ return 0;
+}
+
+/* This function clears necessary the device registers. */
+static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
+{
+ unsigned int reg_val;
+ unsigned int logic_filter[2] ={0,};
+ void __iomem *mmio = lp->mmio;
+
+
+ /* stop the chip */
+ writel(RUN, mmio + CMD0);
+
+ /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
+ writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
+
+ /* Clear RCV_RING_BASE_ADDR */
+ writel(0, mmio + RCV_RING_BASE_ADDR0);
+
+ /* Clear XMT_RING_BASE_ADDR */
+ writel(0, mmio + XMT_RING_BASE_ADDR0);
+ writel(0, mmio + XMT_RING_BASE_ADDR1);
+ writel(0, mmio + XMT_RING_BASE_ADDR2);
+ writel(0, mmio + XMT_RING_BASE_ADDR3);
+
+ /* Clear CMD0 */
+ writel(CMD0_CLEAR,mmio + CMD0);
+
+ /* Clear CMD2 */
+ writel(CMD2_CLEAR, mmio +CMD2);
+
+ /* Clear CMD7 */
+ writel(CMD7_CLEAR , mmio + CMD7);
+
+ /* Clear DLY_INT_A and DLY_INT_B */
+ writel(0x0, mmio + DLY_INT_A);
+ writel(0x0, mmio + DLY_INT_B);
+
+ /* Clear FLOW_CONTROL */
+ writel(0x0, mmio + FLOW_CONTROL);
+
+ /* Clear INT0 write 1 to clear register */
+ reg_val = readl(mmio + INT0);
+ writel(reg_val, mmio + INT0);
+
+ /* Clear STVAL */
+ writel(0x0, mmio + STVAL);
+
+ /* Clear INTEN0 */
+ writel( INTEN0_CLEAR, mmio + INTEN0);
+
+ /* Clear LADRF */
+ writel(0x0 , mmio + LADRF);
+
+ /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
+ writel( 0x80010,mmio + SRAM_SIZE);
+
+ /* Clear RCV_RING0_LEN */
+ writel(0x0, mmio + RCV_RING_LEN0);
+
+ /* Clear XMT_RING0/1/2/3_LEN */
+ writel(0x0, mmio + XMT_RING_LEN0);
+ writel(0x0, mmio + XMT_RING_LEN1);
+ writel(0x0, mmio + XMT_RING_LEN2);
+ writel(0x0, mmio + XMT_RING_LEN3);
+
+ /* Clear XMT_RING_LIMIT */
+ writel(0x0, mmio + XMT_RING_LIMIT);
+
+ /* Clear MIB */
+ writew(MIB_CLEAR, mmio + MIB_ADDR);
+
+ /* Clear LARF */
+ amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
+
+ /* SRAM_SIZE register */
+ reg_val = readl(mmio + SRAM_SIZE);
+
+ if(lp->options & OPTION_JUMBO_ENABLE)
+ writel( VAL2|JUMBO, mmio + CMD3);
+#if AMD8111E_VLAN_TAG_USED
+ writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
+#endif
+ /* Set default value to CTRL1 Register */
+ writel(CTRL1_DEFAULT, mmio + CTRL1);
+
+ /* To avoid PCI posting bug */
+ readl(mmio + CMD2);
+
+}
+
+/* This function disables the interrupt and clears all the pending
+ * interrupts in INT0
+ */
+static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
+{
+ u32 intr0;
+
+ /* Disable interrupt */
+ writel(INTREN, lp->mmio + CMD0);
+
+ /* Clear INT0 */
+ intr0 = readl(lp->mmio + INT0);
+ writel(intr0, lp->mmio + INT0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + INT0);
+
+}
+
+/* This function stops the chip. */
+static void amd8111e_stop_chip(struct amd8111e_priv *lp)
+{
+ writel(RUN, lp->mmio + CMD0);
+
+ /* To avoid PCI posting bug */
+ readl(lp->mmio + CMD0);
+}
+
+/* This function frees the transmiter and receiver descriptor rings. */
+static void amd8111e_free_ring(struct amd8111e_priv *lp)
+{
+ /* Free transmit and receive descriptor rings */
+ if(lp->rx_ring){
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
+ lp->rx_ring, lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+
+ if(lp->tx_ring){
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+ lp->tx_ring = NULL;
+ }
+
+}
+
+/* This function will free all the transmit skbs that are actually
+ * transmitted by the device. It will check the ownership of the
+ * skb before freeing the skb.
+ */
+static int amd8111e_tx(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ int status;
+ /* Complete all the transmit packet */
+ while (lp->tx_complete_idx != lp->tx_idx){
+ tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
+
+ if(status & OWN_BIT)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[tx_index].buff_phy_addr = 0;
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[tx_index]) {
+ pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
+ lp->tx_skbuff[tx_index]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
+ lp->tx_skbuff[tx_index] = NULL;
+ lp->tx_dma_addr[tx_index] = 0;
+ }
+ lp->tx_complete_idx++;
+ /*COAL update tx coalescing parameters */
+ lp->coal_conf.tx_packets++;
+ lp->coal_conf.tx_bytes +=
+ le16_to_cpu(lp->tx_ring[tx_index].buff_count);
+
+ if (netif_queue_stopped(dev) &&
+ lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
+ /* The ring is no longer full, clear tbusy. */
+ /* lp->tx_full = 0; */
+ netif_wake_queue (dev);
+ }
+ }
+ return 0;
+}
+
+/* This function handles the driver receive operation in polling mode */
+static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
+ struct net_device *dev = lp->amd8111e_net_dev;
+ int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
+ void __iomem *mmio = lp->mmio;
+ struct sk_buff *skb,*new_skb;
+ int min_pkt_len, status;
+ unsigned int intr0;
+ int num_rx_pkt = 0;
+ short pkt_len;
+#if AMD8111E_VLAN_TAG_USED
+ short vtag;
+#endif
+ int rx_pkt_limit = budget;
+ unsigned long flags;
+
+ if (rx_pkt_limit <= 0)
+ goto rx_not_empty;
+
+ do{
+ /* process receive packets until we use the quota.
+ * If we own the next entry, it's a new packet. Send it up.
+ */
+ while(1) {
+ status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+ if (status & OWN_BIT)
+ break;
+
+ /* There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with
+ * full-sized * buffers it's possible for a
+ * jabber packet to use two buffers, with only
+ * the last correctly noting the error.
+ */
+ if(status & ERR_BIT) {
+ /* resetting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ /* check for STP and ENP */
+ if(!((status & STP_BIT) && (status & ENP_BIT))){
+ /* resetting flags */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ goto err_next_pkt;
+ }
+ pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+
+#if AMD8111E_VLAN_TAG_USED
+ vtag = status & TT_MASK;
+ /*MAC will strip vlan tag*/
+ if (vtag != 0)
+ min_pkt_len =MIN_PKT_LEN - 4;
+ else
+#endif
+ min_pkt_len =MIN_PKT_LEN;
+
+ if (pkt_len < min_pkt_len) {
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+ if(--rx_pkt_limit < 0)
+ goto rx_not_empty;
+ new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
+ if (!new_skb) {
+ /* if allocation fail,
+ * ignore that pkt and go to next one
+ */
+ lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+ lp->drv_rx_errors++;
+ goto err_next_pkt;
+ }
+
+ skb_reserve(new_skb, 2);
+ skb = lp->rx_skbuff[rx_index];
+ pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+ lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[rx_index] = new_skb;
+ lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+ new_skb->data,
+ lp->rx_buff_len-2,
+ PCI_DMA_FROMDEVICE);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ if (vtag == TT_VLAN_TAGGED){
+ u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+ }
+#endif
+ netif_receive_skb(skb);
+ /*COAL update rx coalescing parameters*/
+ lp->coal_conf.rx_packets++;
+ lp->coal_conf.rx_bytes += pkt_len;
+ num_rx_pkt++;
+
+ err_next_pkt:
+ lp->rx_ring[rx_index].buff_phy_addr
+ = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+ lp->rx_ring[rx_index].buff_count =
+ cpu_to_le16(lp->rx_buff_len-2);
+ wmb();
+ lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+ rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+ }
+ /* Check the interrupt status register for more packets in the
+ * mean time. Process them since we have not used up our quota.
+ */
+ intr0 = readl(mmio + INT0);
+ /*Ack receive packets */
+ writel(intr0 & RINT0,mmio + INT0);
+
+ } while(intr0 & RINT0);
+
+ if (rx_pkt_limit > 0) {
+ /* Receive descriptor is empty now */
+ spin_lock_irqsave(&lp->lock, flags);
+ __napi_complete(napi);
+ writel(VAL0|RINTEN0, mmio + INTEN0);
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+
+rx_not_empty:
+ return num_rx_pkt;
+}
+
+/* This function will indicate the link status to the kernel. */
+static int amd8111e_link_change(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int status0,speed;
+
+ /* read the link change */
+ status0 = readl(lp->mmio + STAT0);
+
+ if(status0 & LINK_STATS){
+ if(status0 & AUTONEG_COMPLETE)
+ lp->link_config.autoneg = AUTONEG_ENABLE;
+ else
+ lp->link_config.autoneg = AUTONEG_DISABLE;
+
+ if(status0 & FULL_DPLX)
+ lp->link_config.duplex = DUPLEX_FULL;
+ else
+ lp->link_config.duplex = DUPLEX_HALF;
+ speed = (status0 & SPEED_MASK) >> 7;
+ if(speed == PHY_SPEED_10)
+ lp->link_config.speed = SPEED_10;
+ else if(speed == PHY_SPEED_100)
+ lp->link_config.speed = SPEED_100;
+
+ netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
+ (lp->link_config.speed == SPEED_100) ?
+ "100" : "10",
+ (lp->link_config.duplex == DUPLEX_FULL) ?
+ "Full" : "Half");
+
+ netif_carrier_on(dev);
+ }
+ else{
+ lp->link_config.speed = SPEED_INVALID;
+ lp->link_config.duplex = DUPLEX_INVALID;
+ lp->link_config.autoneg = AUTONEG_INVALID;
+ netdev_info(dev, "Link is Down.\n");
+ netif_carrier_off(dev);
+ }
+
+ return 0;
+}
+
+/* This function reads the mib counters. */
+static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
+{
+ unsigned int status;
+ unsigned int data;
+ unsigned int repeat = REPEAT_CNT;
+
+ writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
+ do {
+ status = readw(mmio + MIB_ADDR);
+ udelay(2); /* controller takes MAX 2 us to get mib data */
+ }
+ while (--repeat && (status & MIB_CMD_ACTIVE));
+
+ data = readl(mmio + MIB_DATA);
+ return data;
+}
+
+/* This function reads the mib registers and returns the hardware statistics.
+ * It updates previous internal driver statistics with new values.
+ */
+static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned long flags;
+ struct net_device_stats *new_stats = &dev->stats;
+
+ if (!lp->opened)
+ return new_stats;
+ spin_lock_irqsave (&lp->lock, flags);
+
+ /* stats.rx_packets */
+ new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
+ amd8111e_read_mib(mmio, rcv_multicast_pkts)+
+ amd8111e_read_mib(mmio, rcv_unicast_pkts);
+
+ /* stats.tx_packets */
+ new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
+
+ /*stats.rx_bytes */
+ new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
+
+ /* stats.tx_bytes */
+ new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
+
+ /* stats.rx_errors */
+ /* hw errors + errors driver reported */
+ new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_fragments)+
+ amd8111e_read_mib(mmio, rcv_jabbers)+
+ amd8111e_read_mib(mmio, rcv_alignment_errors)+
+ amd8111e_read_mib(mmio, rcv_fcs_errors)+
+ amd8111e_read_mib(mmio, rcv_miss_pkts)+
+ lp->drv_rx_errors;
+
+ /* stats.tx_errors */
+ new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.rx_dropped*/
+ new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_dropped*/
+ new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.multicast*/
+ new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
+
+ /* stats.collisions*/
+ new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
+
+ /* stats.rx_length_errors*/
+ new_stats->rx_length_errors =
+ amd8111e_read_mib(mmio, rcv_undersize_pkts)+
+ amd8111e_read_mib(mmio, rcv_oversize_pkts);
+
+ /* stats.rx_over_errors*/
+ new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_crc_errors*/
+ new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
+
+ /* stats.rx_frame_errors*/
+ new_stats->rx_frame_errors =
+ amd8111e_read_mib(mmio, rcv_alignment_errors);
+
+ /* stats.rx_fifo_errors */
+ new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.rx_missed_errors */
+ new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
+
+ /* stats.tx_aborted_errors*/
+ new_stats->tx_aborted_errors =
+ amd8111e_read_mib(mmio, xmt_excessive_collision);
+
+ /* stats.tx_carrier_errors*/
+ new_stats->tx_carrier_errors =
+ amd8111e_read_mib(mmio, xmt_loss_carrier);
+
+ /* stats.tx_fifo_errors*/
+ new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
+
+ /* stats.tx_window_errors*/
+ new_stats->tx_window_errors =
+ amd8111e_read_mib(mmio, xmt_late_collision);
+
+ /* Reset the mibs for collecting new statistics */
+ /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
+
+ spin_unlock_irqrestore (&lp->lock, flags);
+
+ return new_stats;
+}
+
+/* This function recalculate the interrupt coalescing mode on every interrupt
+ * according to the datarate and the packet rate.
+ */
+static int amd8111e_calc_coalesce(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
+ int tx_pkt_rate;
+ int rx_pkt_rate;
+ int tx_data_rate;
+ int rx_data_rate;
+ int rx_pkt_size;
+ int tx_pkt_size;
+
+ tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
+ coal_conf->tx_prev_packets = coal_conf->tx_packets;
+
+ tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
+ coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
+
+ rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
+ coal_conf->rx_prev_packets = coal_conf->rx_packets;
+
+ rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
+ coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
+
+ if(rx_pkt_rate < 800){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0x0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ rx_pkt_size = rx_data_rate/rx_pkt_rate;
+ if (rx_pkt_size < 128){
+ if(coal_conf->rx_coal_type != NO_COALESCE){
+
+ coal_conf->rx_timeout = 0;
+ coal_conf->rx_event_count = 0;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
+
+ if(coal_conf->rx_coal_type != LOW_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = LOW_COALESCE;
+ }
+ }
+ else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
+
+ if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
+ coal_conf->rx_timeout = 1;
+ coal_conf->rx_event_count = 4;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(rx_pkt_size >= 1024){
+ if(coal_conf->rx_coal_type != HIGH_COALESCE){
+ coal_conf->rx_timeout = 2;
+ coal_conf->rx_event_count = 3;
+ amd8111e_set_coalesce(dev,RX_INTR_COAL);
+ coal_conf->rx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ /* NOW FOR TX INTR COALESC */
+ if(tx_pkt_rate < 800){
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0x0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+ }
+ else{
+
+ tx_pkt_size = tx_data_rate/tx_pkt_rate;
+ if (tx_pkt_size < 128){
+
+ if(coal_conf->tx_coal_type != NO_COALESCE){
+
+ coal_conf->tx_timeout = 0;
+ coal_conf->tx_event_count = 0;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = NO_COALESCE;
+ }
+
+ }
+ else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
+
+ if(coal_conf->tx_coal_type != LOW_COALESCE){
+ coal_conf->tx_timeout = 1;
+ coal_conf->tx_event_count = 2;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = LOW_COALESCE;
+
+ }
+ }
+ else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
+
+ if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
+ coal_conf->tx_timeout = 2;
+ coal_conf->tx_event_count = 5;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = MEDIUM_COALESCE;
+ }
+
+ }
+ else if(tx_pkt_size >= 1024){
+ if (tx_pkt_size >= 1024){
+ if(coal_conf->tx_coal_type != HIGH_COALESCE){
+ coal_conf->tx_timeout = 4;
+ coal_conf->tx_event_count = 8;
+ amd8111e_set_coalesce(dev,TX_INTR_COAL);
+ coal_conf->tx_coal_type = HIGH_COALESCE;
+ }
+ }
+ }
+ }
+ return 0;
+
+}
+
+/* This is device interrupt function. It handles transmit,
+ * receive,link change and hardware timer interrupts.
+ */
+static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
+{
+
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ void __iomem *mmio = lp->mmio;
+ unsigned int intr0, intren0;
+ unsigned int handled = 1;
+
+ if(unlikely(dev == NULL))
+ return IRQ_NONE;
+
+ spin_lock(&lp->lock);
+
+ /* disabling interrupt */
+ writel(INTREN, mmio + CMD0);
+
+ /* Read interrupt status */
+ intr0 = readl(mmio + INT0);
+ intren0 = readl(mmio + INTEN0);
+
+ /* Process all the INT event until INTR bit is clear. */
+
+ if (!(intr0 & INTR)){
+ handled = 0;
+ goto err_no_interrupt;
+ }
+
+ /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
+ writel(intr0, mmio + INT0);
+
+ /* Check if Receive Interrupt has occurred. */
+ if (intr0 & RINT0) {
+ if (napi_schedule_prep(&lp->napi)) {
+ /* Disable receive interupts */
+ writel(RINTEN0, mmio + INTEN0);
+ /* Schedule a polling routine */
+ __napi_schedule(&lp->napi);
+ } else if (intren0 & RINTEN0) {
+ netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
+ /* Fix by disable receive interrupts */
+ writel(RINTEN0, mmio + INTEN0);
+ }
+ }
+
+ /* Check if Transmit Interrupt has occurred. */
+ if (intr0 & TINT0)
+ amd8111e_tx(dev);
+
+ /* Check if Link Change Interrupt has occurred. */
+ if (intr0 & LCINT)
+ amd8111e_link_change(dev);
+
+ /* Check if Hardware Timer Interrupt has occurred. */
+ if (intr0 & STINT)
+ amd8111e_calc_coalesce(dev);
+
+err_no_interrupt:
+ writel( VAL0 | INTREN,mmio + CMD0);
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void amd8111e_poll(struct net_device *dev)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ amd8111e_interrupt(0, dev);
+ local_irq_restore(flags);
+}
+#endif
+
+
+/* This function closes the network interface and updates
+ * the statistics so that most recent statistics will be
+ * available after the interface is down.
+ */
+static int amd8111e_close(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ netif_stop_queue(dev);
+
+ napi_disable(&lp->napi);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_disable_interrupt(lp);
+ amd8111e_stop_chip(lp);
+
+ /* Free transmit and receive skbs */
+ amd8111e_free_skbs(lp->amd8111e_net_dev);
+
+ netif_carrier_off(lp->amd8111e_net_dev);
+
+ /* Delete ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+
+ spin_unlock_irq(&lp->lock);
+ free_irq(dev->irq, dev);
+ amd8111e_free_ring(lp);
+
+ /* Update the statistics before closing */
+ amd8111e_get_stats(dev);
+ lp->opened = 0;
+ return 0;
+}
+
+/* This function opens new interface.It requests irq for the device,
+ * initializes the device,buffers and descriptors, and starts the device.
+ */
+static int amd8111e_open(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
+ dev->name, dev))
+ return -EAGAIN;
+
+ napi_enable(&lp->napi);
+
+ spin_lock_irq(&lp->lock);
+
+ amd8111e_init_hw_default(lp);
+
+ if(amd8111e_restart(dev)){
+ spin_unlock_irq(&lp->lock);
+ napi_disable(&lp->napi);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
+ return -ENOMEM;
+ }
+ /* Start ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ add_timer(&lp->ipg_data.ipg_timer);
+ netdev_info(dev, "Dynamic IPG Enabled\n");
+ }
+
+ lp->opened = 1;
+
+ spin_unlock_irq(&lp->lock);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* This function checks if there is any transmit descriptors
+ * available to queue more packet.
+ */
+static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
+{
+ int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
+ if (lp->tx_skbuff[tx_index])
+ return -1;
+ else
+ return 0;
+
+}
+
+/* This function will queue the transmit packets to the
+ * descriptors and will trigger the send operation. It also
+ * initializes the transmit descriptors with buffer physical address,
+ * byte count, ownership to hardware etc.
+ */
+static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int tx_index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
+
+ lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
+
+ lp->tx_skbuff[tx_index] = skb;
+ lp->tx_ring[tx_index].tx_flags = 0;
+
+#if AMD8111E_VLAN_TAG_USED
+ if (skb_vlan_tag_present(skb)) {
+ lp->tx_ring[tx_index].tag_ctrl_cmd |=
+ cpu_to_le16(TCC_VLAN_INSERT);
+ lp->tx_ring[tx_index].tag_ctrl_info =
+ cpu_to_le16(skb_vlan_tag_get(skb));
+
+ }
+#endif
+ lp->tx_dma_addr[tx_index] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_ring[tx_index].buff_phy_addr =
+ cpu_to_le32(lp->tx_dma_addr[tx_index]);
+
+ /* Set FCS and LTINT bits */
+ wmb();
+ lp->tx_ring[tx_index].tx_flags |=
+ cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
+
+ lp->tx_idx++;
+
+ /* Trigger an immediate send poll. */
+ writel( VAL1 | TDMD0, lp->mmio + CMD0);
+ writel( VAL2 | RDMD0,lp->mmio + CMD0);
+
+ if(amd8111e_tx_queue_avail(lp) < 0){
+ netif_stop_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+/* This function returns all the memory mapped registers of the device. */
+static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
+{
+ void __iomem *mmio = lp->mmio;
+ /* Read only necessary registers */
+ buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
+ buf[1] = readl(mmio + XMT_RING_LEN0);
+ buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
+ buf[3] = readl(mmio + RCV_RING_LEN0);
+ buf[4] = readl(mmio + CMD0);
+ buf[5] = readl(mmio + CMD2);
+ buf[6] = readl(mmio + CMD3);
+ buf[7] = readl(mmio + CMD7);
+ buf[8] = readl(mmio + INT0);
+ buf[9] = readl(mmio + INTEN0);
+ buf[10] = readl(mmio + LADRF);
+ buf[11] = readl(mmio + LADRF+4);
+ buf[12] = readl(mmio + STAT0);
+}
+
+
+/* This function sets promiscuos mode, all-multi mode or the multicast address
+ * list to the device.
+ */
+static void amd8111e_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ u32 mc_filter[2] ;
+ int bit_num;
+
+ if(dev->flags & IFF_PROMISC){
+ writel( VAL2 | PROM, lp->mmio + CMD2);
+ return;
+ }
+ else
+ writel( PROM, lp->mmio + CMD2);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
+ /* get all multicast packet */
+ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
+ return;
+ }
+ if (netdev_mc_empty(dev)) {
+ /* get only own packets */
+ mc_filter[1] = mc_filter[0] = 0;
+ lp->options &= ~OPTION_MULTICAST_ENABLE;
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
+ /* disable promiscuous mode */
+ writel(PROM, lp->mmio + CMD2);
+ return;
+ }
+ /* load all the multicast addresses in the logic filter */
+ lp->options |= OPTION_MULTICAST_ENABLE;
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
+ mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
+ }
+ amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD2);
+
+}
+
+static void amd8111e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct pci_dev *pci_dev = lp->pci_dev;
+ strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u", chip_version);
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+}
+
+static int amd8111e_get_regs_len(struct net_device *dev)
+{
+ return AMD8111E_REG_DUMP_LEN;
+}
+
+static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ regs->version = 0;
+ amd8111e_read_regs(lp, buf);
+}
+
+static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ spin_lock_irq(&lp->lock);
+ mii_ethtool_gset(&lp->mii_if, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int res;
+ spin_lock_irq(&lp->lock);
+ res = mii_ethtool_sset(&lp->mii_if, ecmd);
+ spin_unlock_irq(&lp->lock);
+ return res;
+}
+
+static int amd8111e_nway_reset(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_nway_restart(&lp->mii_if);
+}
+
+static u32 amd8111e_get_link(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ return mii_link_ok(&lp->mii_if);
+}
+
+static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ wol_info->supported = WAKE_MAGIC|WAKE_PHY;
+ if (lp->options & OPTION_WOL_ENABLE)
+ wol_info->wolopts = WAKE_MAGIC;
+}
+
+static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
+ return -EINVAL;
+ spin_lock_irq(&lp->lock);
+ if (wol_info->wolopts & WAKE_MAGIC)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
+ else if(wol_info->wolopts & WAKE_PHY)
+ lp->options |=
+ (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
+ else
+ lp->options &= ~OPTION_WOL_ENABLE;
+ spin_unlock_irq(&lp->lock);
+ return 0;
+}
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = amd8111e_get_drvinfo,
+ .get_regs_len = amd8111e_get_regs_len,
+ .get_regs = amd8111e_get_regs,
+ .get_settings = amd8111e_get_settings,
+ .set_settings = amd8111e_set_settings,
+ .nway_reset = amd8111e_nway_reset,
+ .get_link = amd8111e_get_link,
+ .get_wol = amd8111e_get_wol,
+ .set_wol = amd8111e_set_wol,
+};
+
+/* This function handles all the ethtool ioctls. It gives driver info,
+ * gets/sets driver speed, gets memory mapped register values, forces
+ * auto negotiation, sets/gets WOL options for ethtool application.
+ */
+static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
+{
+ struct mii_ioctl_data *data = if_mii(ifr);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+ u32 mii_regval;
+
+ switch(cmd) {
+ case SIOCGMIIPHY:
+ data->phy_id = lp->ext_phy_addr;
+
+ /* fallthru */
+ case SIOCGMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_read_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
+ spin_unlock_irq(&lp->lock);
+
+ data->val_out = mii_regval;
+ return err;
+
+ case SIOCSMIIREG:
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_write_phy(lp, data->phy_id,
+ data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
+ spin_unlock_irq(&lp->lock);
+
+ return err;
+
+ default:
+ /* do nothing */
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+static int amd8111e_set_mac_address(struct net_device *dev, void *p)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ spin_lock_irq(&lp->lock);
+ /* Setting the MAC address to the device */
+ for (i = 0; i < ETH_ALEN; i++)
+ writeb( dev->dev_addr[i], lp->mmio + PADR + i );
+
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+/* This function changes the mtu of the device. It restarts the device to
+ * initialize the descriptor with new receive buffers.
+ */
+static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+
+ if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
+ return -EINVAL;
+
+ if (!netif_running(dev)) {
+ /* new_mtu will be used
+ * when device starts netxt time
+ */
+ dev->mtu = new_mtu;
+ return 0;
+ }
+
+ spin_lock_irq(&lp->lock);
+
+ /* stop the chip */
+ writel(RUN, lp->mmio + CMD0);
+
+ dev->mtu = new_mtu;
+
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_start_queue(dev);
+ return err;
+}
+
+static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
+{
+ writel( VAL1|MPPLBA, lp->mmio + CMD3);
+ writel( VAL0|MPEN_SW, lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
+{
+
+ /* Adapter is already stoped/suspended/interrupt-disabled */
+ writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
+
+ /* To eliminate PCI posting bug */
+ readl(lp->mmio + CMD7);
+ return 0;
+}
+
+/* This function is called when a packet transmission fails to complete
+ * within a reasonable period, on the assumption that an interrupt have
+ * failed or the interface is locked up. This function will reinitialize
+ * the hardware.
+ */
+static void amd8111e_tx_timeout(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int err;
+
+ netdev_err(dev, "transmit timed out, resetting\n");
+
+ spin_lock_irq(&lp->lock);
+ err = amd8111e_restart(dev);
+ spin_unlock_irq(&lp->lock);
+ if(!err)
+ netif_wake_queue(dev);
+}
+static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ /* disable the interrupt */
+ spin_lock_irq(&lp->lock);
+ amd8111e_disable_interrupt(lp);
+ spin_unlock_irq(&lp->lock);
+
+ netif_device_detach(dev);
+
+ /* stop chip */
+ spin_lock_irq(&lp->lock);
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ del_timer_sync(&lp->ipg_data.ipg_timer);
+ amd8111e_stop_chip(lp);
+ spin_unlock_irq(&lp->lock);
+
+ if(lp->options & OPTION_WOL_ENABLE){
+ /* enable wol */
+ if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
+ amd8111e_enable_magicpkt(lp);
+ if(lp->options & OPTION_WAKE_PHY_ENABLE)
+ amd8111e_enable_link_change(lp);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 1);
+ pci_enable_wake(pci_dev, PCI_D3cold, 1);
+
+ }
+ else{
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0);
+ }
+
+ pci_save_state(pci_dev);
+ pci_set_power_state(pci_dev, PCI_D3hot);
+
+ return 0;
+}
+static int amd8111e_resume(struct pci_dev *pci_dev)
+{
+ struct net_device *dev = pci_get_drvdata(pci_dev);
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return 0;
+
+ pci_set_power_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
+
+ pci_enable_wake(pci_dev, PCI_D3hot, 0);
+ pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
+
+ netif_device_attach(dev);
+
+ spin_lock_irq(&lp->lock);
+ amd8111e_restart(dev);
+ /* Restart ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE)
+ mod_timer(&lp->ipg_data.ipg_timer,
+ jiffies + IPG_CONVERGE_JIFFIES);
+ spin_unlock_irq(&lp->lock);
+
+ return 0;
+}
+
+static void amd8111e_config_ipg(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ struct ipg_info *ipg_data = &lp->ipg_data;
+ void __iomem *mmio = lp->mmio;
+ unsigned int prev_col_cnt = ipg_data->col_cnt;
+ unsigned int total_col_cnt;
+ unsigned int tmp_ipg;
+
+ if(lp->link_config.duplex == DUPLEX_FULL){
+ ipg_data->ipg = DEFAULT_IPG;
+ return;
+ }
+
+ if(ipg_data->ipg_state == SSTATE){
+
+ if(ipg_data->timer_tick == IPG_STABLE_TIME){
+
+ ipg_data->timer_tick = 0;
+ ipg_data->ipg = MIN_IPG - IPG_STEP;
+ ipg_data->current_ipg = MIN_IPG;
+ ipg_data->diff_col_cnt = 0xFFFFFFFF;
+ ipg_data->ipg_state = CSTATE;
+ }
+ else
+ ipg_data->timer_tick++;
+ }
+
+ if(ipg_data->ipg_state == CSTATE){
+
+ /* Get the current collision count */
+
+ total_col_cnt = ipg_data->col_cnt =
+ amd8111e_read_mib(mmio, xmt_collisions);
+
+ if ((total_col_cnt - prev_col_cnt) <
+ (ipg_data->diff_col_cnt)){
+
+ ipg_data->diff_col_cnt =
+ total_col_cnt - prev_col_cnt ;
+
+ ipg_data->ipg = ipg_data->current_ipg;
+ }
+
+ ipg_data->current_ipg += IPG_STEP;
+
+ if (ipg_data->current_ipg <= MAX_IPG)
+ tmp_ipg = ipg_data->current_ipg;
+ else{
+ tmp_ipg = ipg_data->ipg;
+ ipg_data->ipg_state = SSTATE;
+ }
+ writew((u32)tmp_ipg, mmio + IPG);
+ writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
+ }
+ mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
+ return;
+
+}
+
+static void amd8111e_probe_ext_phy(struct net_device *dev)
+{
+ struct amd8111e_priv *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0x1e; i >= 0; i--) {
+ u32 id1, id2;
+
+ if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
+ continue;
+ if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
+ continue;
+ lp->ext_phy_id = (id1 << 16) | id2;
+ lp->ext_phy_addr = i;
+ return;
+ }
+ lp->ext_phy_id = 0;
+ lp->ext_phy_addr = 1;
+}
+
+static const struct net_device_ops amd8111e_netdev_ops = {
+ .ndo_open = amd8111e_open,
+ .ndo_stop = amd8111e_close,
+ .ndo_start_xmit = amd8111e_start_xmit,
+ .ndo_tx_timeout = amd8111e_tx_timeout,
+ .ndo_get_stats = amd8111e_get_stats,
+ .ndo_set_rx_mode = amd8111e_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = amd8111e_set_mac_address,
+ .ndo_do_ioctl = amd8111e_ioctl,
+ .ndo_change_mtu = amd8111e_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = amd8111e_poll,
+#endif
+};
+
+static int amd8111e_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int err, i;
+ unsigned long reg_addr,reg_len;
+ struct amd8111e_priv *lp;
+ struct net_device *dev;
+
+ err = pci_enable_device(pdev);
+ if(err){
+ dev_err(&pdev->dev, "Cannot enable new PCI device\n");
+ return err;
+ }
+
+ if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
+ dev_err(&pdev->dev, "Cannot find PCI base address\n");
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, MODULE_NAME);
+ if(err){
+ dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
+ goto err_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ /* Find power-management capability. */
+ if (!pdev->pm_cap) {
+ dev_err(&pdev->dev, "No Power Management capability\n");
+ err = -ENODEV;
+ goto err_free_reg;
+ }
+
+ /* Initialize DMA */
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
+ dev_err(&pdev->dev, "DMA not supported\n");
+ err = -ENODEV;
+ goto err_free_reg;
+ }
+
+ reg_addr = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+
+ dev = alloc_etherdev(sizeof(struct amd8111e_priv));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_free_reg;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX ;
+#endif
+
+ lp = netdev_priv(dev);
+ lp->pci_dev = pdev;
+ lp->amd8111e_net_dev = dev;
+ lp->pm_cap = pdev->pm_cap;
+
+ spin_lock_init(&lp->lock);
+
+ lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
+ if (!lp->mmio) {
+ dev_err(&pdev->dev, "Cannot map device registers\n");
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
+ /* Initializing MAC address */
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+
+ /* Setting user defined parametrs */
+ lp->ext_phy_option = speed_duplex[card_idx];
+ if(coalesce[card_idx])
+ lp->options |= OPTION_INTR_COAL_ENABLE;
+ if(dynamic_ipg[card_idx++])
+ lp->options |= OPTION_DYN_IPG_ENABLE;
+
+
+ /* Initialize driver entry points */
+ dev->netdev_ops = &amd8111e_netdev_ops;
+ dev->ethtool_ops = &ops;
+ dev->irq =pdev->irq;
+ dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
+ netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
+
+#if AMD8111E_VLAN_TAG_USED
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+#endif
+ /* Probe the external PHY */
+ amd8111e_probe_ext_phy(dev);
+
+ /* setting mii default values */
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = amd8111e_mdio_read;
+ lp->mii_if.mdio_write = amd8111e_mdio_write;
+ lp->mii_if.phy_id = lp->ext_phy_addr;
+
+ /* Set receive buffer length and set jumbo option*/
+ amd8111e_set_rx_buff_len(dev);
+
+
+ err = register_netdev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot register net device\n");
+ goto err_free_dev;
+ }
+
+ pci_set_drvdata(pdev, dev);
+
+ /* Initialize software ipg timer */
+ if(lp->options & OPTION_DYN_IPG_ENABLE){
+ init_timer(&lp->ipg_data.ipg_timer);
+ lp->ipg_data.ipg_timer.data = (unsigned long) dev;
+ lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
+ lp->ipg_data.ipg_timer.expires = jiffies +
+ IPG_CONVERGE_JIFFIES;
+ lp->ipg_data.ipg = DEFAULT_IPG;
+ lp->ipg_data.ipg_state = CSTATE;
+ }
+
+ /* display driver and device information */
+ chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
+ dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
+ dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
+ chip_version, dev->dev_addr);
+ if (lp->ext_phy_id)
+ dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
+ lp->ext_phy_id, lp->ext_phy_addr);
+ else
+ dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
+
+ return 0;
+
+err_free_dev:
+ free_netdev(dev);
+
+err_free_reg:
+ pci_release_regions(pdev);
+
+err_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+
+}
+
+static void amd8111e_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ unregister_netdev(dev);
+ free_netdev(dev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ }
+}
+
+static const struct pci_device_id amd8111e_pci_tbl[] = {
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD8111E_7462,
+ },
+ {
+ .vendor = 0,
+ }
+};
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+
+static struct pci_driver amd8111e_driver = {
+ .name = MODULE_NAME,
+ .id_table = amd8111e_pci_tbl,
+ .probe = amd8111e_probe_one,
+ .remove = amd8111e_remove_one,
+ .suspend = amd8111e_suspend,
+ .resume = amd8111e_resume
+};
+
+module_pci_driver(amd8111e_driver);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
new file mode 100644
index 000000000..7cdb18512
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -0,0 +1,813 @@
+/*
+ * Advanced Micro Devices Inc. AMD8111E Linux Network Driver
+ * Copyright (C) 2003 Advanced Micro Devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+Module Name:
+
+ amd8111e.h
+
+Abstract:
+
+ AMD8111 based 10/100 Ethernet Controller driver definitions.
+
+Environment:
+
+ Kernel Mode
+
+Revision History:
+ 3.0.0
+ Initial Revision.
+ 3.0.1
+*/
+
+#ifndef _AMD811E_H
+#define _AMD811E_H
+
+/* Command style register access
+
+Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
+
+eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered.
+
+*/
+
+/* Offset for Memory Mapped Registers. */
+/* 32 bit registers */
+
+#define ASF_STAT 0x00 /* ASF status register */
+#define CHIPID 0x04 /* Chip ID regsiter */
+#define MIB_DATA 0x10 /* MIB data register */
+#define MIB_ADDR 0x14 /* MIB address register */
+#define STAT0 0x30 /* Status0 register */
+#define INT0 0x38 /* Interrupt0 register */
+#define INTEN0 0x40 /* Interrupt0 enable register*/
+#define CMD0 0x48 /* Command0 register */
+#define CMD2 0x50 /* Command2 register */
+#define CMD3 0x54 /* Command3 resiter */
+#define CMD7 0x64 /* Command7 register */
+
+#define CTRL1 0x6C /* Control1 register */
+#define CTRL2 0x70 /* Control2 register */
+
+#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */
+
+#define AUTOPOLL0 0x88 /* Auto-poll0 register */
+#define AUTOPOLL1 0x8A /* Auto-poll1 register */
+#define AUTOPOLL2 0x8C /* Auto-poll2 register */
+#define AUTOPOLL3 0x8E /* Auto-poll3 register */
+#define AUTOPOLL4 0x90 /* Auto-poll4 register */
+#define AUTOPOLL5 0x92 /* Auto-poll5 register */
+
+#define AP_VALUE 0x98 /* Auto-poll value register */
+#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */
+#define DLY_INT_B 0xAC /* Group B delayed interrupt register */
+
+#define FLOW_CONTROL 0xC8 /* Flow control register */
+#define PHY_ACCESS 0xD0 /* PHY access register */
+
+#define STVAL 0xD8 /* Software timer value register */
+
+#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */
+#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */
+#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */
+#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */
+
+#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */
+
+#define PMAT0 0x190 /* OnNow pattern register0 */
+#define PMAT1 0x194 /* OnNow pattern register1 */
+
+/* 16bit registers */
+
+#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */
+#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */
+#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
+#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
+
+#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */
+
+#define SRAM_SIZE 0x178 /* SRAM size register */
+#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
+
+/* 48bit register */
+
+#define PADR 0x160 /* Physical address register */
+
+#define IFS1 0x18C /* Inter-frame spacing Part1 register */
+#define IFS 0x18D /* Inter-frame spacing register */
+#define IPG 0x18E /* Inter-frame gap register */
+/* 64bit register */
+
+#define LADRF 0x168 /* Logical address filter register */
+
+
+/* Register Bit Definitions */
+typedef enum {
+
+ ASF_INIT_DONE = (1 << 1),
+ ASF_INIT_PRESENT = (1 << 0),
+
+}STAT_ASF_BITS;
+
+typedef enum {
+
+ MIB_CMD_ACTIVE = (1 << 15 ),
+ MIB_RD_CMD = (1 << 13 ),
+ MIB_CLEAR = (1 << 12 ),
+ MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)|
+ (1 << 4) | (1 << 5),
+}MIB_ADDR_BITS;
+
+
+typedef enum {
+
+ PMAT_DET = (1 << 12),
+ MP_DET = (1 << 11),
+ LC_DET = (1 << 10),
+ SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7),
+ FULL_DPLX = (1 << 6),
+ LINK_STATS = (1 << 5),
+ AUTONEG_COMPLETE = (1 << 4),
+ MIIPD = (1 << 3),
+ RX_SUSPENDED = (1 << 2),
+ TX_SUSPENDED = (1 << 1),
+ RUNNING = (1 << 0),
+
+}STAT0_BITS;
+
+#define PHY_SPEED_10 0x2
+#define PHY_SPEED_100 0x3
+
+/* INT0 0x38, 32bit register */
+typedef enum {
+
+ INTR = (1 << 31),
+ PCSINT = (1 << 28),
+ LCINT = (1 << 27),
+ APINT5 = (1 << 26),
+ APINT4 = (1 << 25),
+ APINT3 = (1 << 24),
+ TINT_SUM = (1 << 23),
+ APINT2 = (1 << 22),
+ APINT1 = (1 << 21),
+ APINT0 = (1 << 20),
+ MIIPDTINT = (1 << 19),
+ MCCINT = (1 << 17),
+ MREINT = (1 << 16),
+ RINT_SUM = (1 << 15),
+ SPNDINT = (1 << 14),
+ MPINT = (1 << 13),
+ SINT = (1 << 12),
+ TINT3 = (1 << 11),
+ TINT2 = (1 << 10),
+ TINT1 = (1 << 9),
+ TINT0 = (1 << 8),
+ UINT = (1 << 7),
+ STINT = (1 << 4),
+ RINT0 = (1 << 0),
+
+}INT0_BITS;
+
+typedef enum {
+
+ VAL3 = (1 << 31), /* VAL bit for byte 3 */
+ VAL2 = (1 << 23), /* VAL bit for byte 2 */
+ VAL1 = (1 << 15), /* VAL bit for byte 1 */
+ VAL0 = (1 << 7), /* VAL bit for byte 0 */
+
+}VAL_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ LCINTEN = (1 << 27),
+ APINT5EN = (1 << 26),
+ APINT4EN = (1 << 25),
+ APINT3EN = (1 << 24),
+ /* VAL2 */
+ APINT2EN = (1 << 22),
+ APINT1EN = (1 << 21),
+ APINT0EN = (1 << 20),
+ MIIPDTINTEN = (1 << 19),
+ MCCIINTEN = (1 << 18),
+ MCCINTEN = (1 << 17),
+ MREINTEN = (1 << 16),
+ /* VAL1 */
+ SPNDINTEN = (1 << 14),
+ MPINTEN = (1 << 13),
+ TINTEN3 = (1 << 11),
+ SINTEN = (1 << 12),
+ TINTEN2 = (1 << 10),
+ TINTEN1 = (1 << 9),
+ TINTEN0 = (1 << 8),
+ /* VAL0 */
+ STINTEN = (1 << 4),
+ RINTEN0 = (1 << 0),
+
+ INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */
+
+}INTEN0_BITS;
+
+typedef enum {
+ /* VAL2 */
+ RDMD0 = (1 << 16),
+ /* VAL1 */
+ TDMD3 = (1 << 11),
+ TDMD2 = (1 << 10),
+ TDMD1 = (1 << 9),
+ TDMD0 = (1 << 8),
+ /* VAL0 */
+ UINTCMD = (1 << 6),
+ RX_FAST_SPND = (1 << 5),
+ TX_FAST_SPND = (1 << 4),
+ RX_SPND = (1 << 3),
+ TX_SPND = (1 << 2),
+ INTREN = (1 << 1),
+ RUN = (1 << 0),
+
+ CMD0_CLEAR = 0x000F0F7F, /* Command style register */
+
+}CMD0_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ CONDUIT_MODE = (1 << 29),
+ /* VAL2 */
+ RPA = (1 << 19),
+ DRCVPA = (1 << 18),
+ DRCVBC = (1 << 17),
+ PROM = (1 << 16),
+ /* VAL1 */
+ ASTRP_RCV = (1 << 13),
+ RCV_DROP0 = (1 << 12),
+ EMBA = (1 << 11),
+ DXMT2PD = (1 << 10),
+ LTINTEN = (1 << 9),
+ DXMTFCS = (1 << 8),
+ /* VAL0 */
+ APAD_XMT = (1 << 6),
+ DRTY = (1 << 5),
+ INLOOP = (1 << 4),
+ EXLOOP = (1 << 3),
+ REX_RTRY = (1 << 2),
+ REX_UFLO = (1 << 1),
+ REX_LCOL = (1 << 0),
+
+ CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */
+
+}CMD2_BITS;
+
+typedef enum {
+
+ /* VAL3 */
+ ASF_INIT_DONE_ALIAS = (1 << 29),
+ /* VAL2 */
+ JUMBO = (1 << 21),
+ VSIZE = (1 << 20),
+ VLONLY = (1 << 19),
+ VL_TAG_DEL = (1 << 18),
+ /* VAL1 */
+ EN_PMGR = (1 << 14),
+ INTLEVEL = (1 << 13),
+ FORCE_FULL_DUPLEX = (1 << 12),
+ FORCE_LINK_STATUS = (1 << 11),
+ APEP = (1 << 10),
+ MPPLBA = (1 << 9),
+ /* VAL0 */
+ RESET_PHY_PULSE = (1 << 2),
+ RESET_PHY = (1 << 1),
+ PHY_RST_POL = (1 << 0),
+
+}CMD3_BITS;
+
+
+typedef enum {
+
+ /* VAL0 */
+ PMAT_SAVE_MATCH = (1 << 4),
+ PMAT_MODE = (1 << 3),
+ MPEN_SW = (1 << 1),
+ LCMODE_SW = (1 << 0),
+
+ CMD7_CLEAR = 0x0000001B /* Command style register */
+
+}CMD7_BITS;
+
+
+typedef enum {
+
+ RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */
+ XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */
+ XMTSP_128 = (1 << 9), /* 9 */
+ XMTSP_64 = (1 << 8),
+ CACHE_ALIGN = (1 << 4),
+ BURST_LIMIT_MASK = (0xF << 0 ),
+ CTRL1_DEFAULT = 0x00010111,
+
+}CTRL1_BITS;
+
+typedef enum {
+
+ FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */
+ XPHYRST = (1 << 7),
+ XPHYANE = (1 << 6),
+ XPHYFD = (1 << 5),
+ XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */
+ APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */
+
+}CTRL2_BITS;
+
+/* XMT_RING_LIMIT 0x7C, 32bit register */
+typedef enum {
+
+ XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */
+ XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */
+ XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */
+
+}XMT_RING_LIMIT_BITS;
+
+typedef enum {
+
+ AP_REG0_EN = (1 << 15),
+ AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL0_BITS;
+
+/* AUTOPOLL1 0x8A, 16bit register */
+typedef enum {
+
+ AP_REG1_EN = (1 << 15),
+ AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP1 = (1 << 6),
+ AP_PHY1_DFLT = (1 << 5),
+ AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL1_BITS;
+
+
+typedef enum {
+
+ AP_REG2_EN = (1 << 15),
+ AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP2 = (1 << 6),
+ AP_PHY2_DFLT = (1 << 5),
+ AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL2_BITS;
+
+typedef enum {
+
+ AP_REG3_EN = (1 << 15),
+ AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP3 = (1 << 6),
+ AP_PHY3_DFLT = (1 << 5),
+ AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL3_BITS;
+
+
+typedef enum {
+
+ AP_REG4_EN = (1 << 15),
+ AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP4 = (1 << 6),
+ AP_PHY4_DFLT = (1 << 5),
+ AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL4_BITS;
+
+
+typedef enum {
+
+ AP_REG5_EN = (1 << 15),
+ AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
+ AP_PRE_SUP5 = (1 << 6),
+ AP_PHY5_DFLT = (1 << 5),
+ AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
+
+}AUTOPOLL5_BITS;
+
+
+
+
+/* AP_VALUE 0x98, 32bit ragister */
+typedef enum {
+
+ AP_VAL_ACTIVE = (1 << 31),
+ AP_VAL_RD_CMD = ( 1 << 29),
+ AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */
+ AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) |
+ (0xF << 12), /* 15:0 */
+
+}AP_VALUE_BITS;
+
+typedef enum {
+
+ DLY_INT_A_R3 = (1 << 31),
+ DLY_INT_A_R2 = (1 << 30),
+ DLY_INT_A_R1 = (1 << 29),
+ DLY_INT_A_R0 = (1 << 28),
+ DLY_INT_A_T3 = (1 << 27),
+ DLY_INT_A_T2 = (1 << 26),
+ DLY_INT_A_T1 = (1 << 25),
+ DLY_INT_A_T0 = ( 1 << 24),
+ EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+
+}DLY_INT_A_BITS;
+
+typedef enum {
+
+ DLY_INT_B_R3 = (1 << 31),
+ DLY_INT_B_R2 = (1 << 30),
+ DLY_INT_B_R1 = (1 << 29),
+ DLY_INT_B_R0 = (1 << 28),
+ DLY_INT_B_T3 = (1 << 27),
+ DLY_INT_B_T2 = (1 << 26),
+ DLY_INT_B_T1 = (1 << 25),
+ DLY_INT_B_T0 = ( 1 << 24),
+ EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */
+ MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)|
+ (1 << 9) | (1 << 10), /* 10:0 */
+}DLY_INT_B_BITS;
+
+
+/* FLOW_CONTROL 0xC8, 32bit register */
+typedef enum {
+
+ PAUSE_LEN_CHG = (1 << 30),
+ FTPE = (1 << 22),
+ FRPE = (1 << 21),
+ NAPA = (1 << 20),
+ NPA = (1 << 19),
+ FIXP = ( 1 << 18),
+ FCCMD = ( 1 << 16),
+ PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */
+
+}FLOW_CONTROL_BITS;
+
+/* PHY_ ACCESS 0xD0, 32bit register */
+typedef enum {
+
+ PHY_CMD_ACTIVE = (1 << 31),
+ PHY_WR_CMD = (1 << 30),
+ PHY_RD_CMD = (1 << 29),
+ PHY_RD_ERR = (1 << 28),
+ PHY_PRE_SUP = (1 << 27),
+ PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)|
+ (1 << 24) |(1 << 25),/* 25:21 */
+ PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */
+ PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)|
+ (0xF << 12),/* 15:0 */
+
+}PHY_ACCESS_BITS;
+
+
+/* PMAT0 0x190, 32bit register */
+typedef enum {
+ PMR_ACTIVE = (1 << 31),
+ PMR_WR_CMD = (1 << 30),
+ PMR_RD_CMD = (1 << 29),
+ PMR_BANK = (1 <<28),
+ PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)|
+ (1 << 22),/* 22:16 */
+ PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */
+}PMAT0_BITS;
+
+
+/* PMAT1 0x194, 32bit register */
+typedef enum {
+ PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */
+ PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */
+ PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */
+ PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */
+}PMAT1_BITS;
+
+/************************************************************************/
+/* */
+/* MIB counter definitions */
+/* */
+/************************************************************************/
+
+#define rcv_miss_pkts 0x00
+#define rcv_octets 0x01
+#define rcv_broadcast_pkts 0x02
+#define rcv_multicast_pkts 0x03
+#define rcv_undersize_pkts 0x04
+#define rcv_oversize_pkts 0x05
+#define rcv_fragments 0x06
+#define rcv_jabbers 0x07
+#define rcv_unicast_pkts 0x08
+#define rcv_alignment_errors 0x09
+#define rcv_fcs_errors 0x0A
+#define rcv_good_octets 0x0B
+#define rcv_mac_ctrl 0x0C
+#define rcv_flow_ctrl 0x0D
+#define rcv_pkts_64_octets 0x0E
+#define rcv_pkts_65to127_octets 0x0F
+#define rcv_pkts_128to255_octets 0x10
+#define rcv_pkts_256to511_octets 0x11
+#define rcv_pkts_512to1023_octets 0x12
+#define rcv_pkts_1024to1518_octets 0x13
+#define rcv_unsupported_opcode 0x14
+#define rcv_symbol_errors 0x15
+#define rcv_drop_pkts_ring1 0x16
+#define rcv_drop_pkts_ring2 0x17
+#define rcv_drop_pkts_ring3 0x18
+#define rcv_drop_pkts_ring4 0x19
+#define rcv_jumbo_pkts 0x1A
+
+#define xmt_underrun_pkts 0x20
+#define xmt_octets 0x21
+#define xmt_packets 0x22
+#define xmt_broadcast_pkts 0x23
+#define xmt_multicast_pkts 0x24
+#define xmt_collisions 0x25
+#define xmt_unicast_pkts 0x26
+#define xmt_one_collision 0x27
+#define xmt_multiple_collision 0x28
+#define xmt_deferred_transmit 0x29
+#define xmt_late_collision 0x2A
+#define xmt_excessive_defer 0x2B
+#define xmt_loss_carrier 0x2C
+#define xmt_excessive_collision 0x2D
+#define xmt_back_pressure 0x2E
+#define xmt_flow_ctrl 0x2F
+#define xmt_pkts_64_octets 0x30
+#define xmt_pkts_65to127_octets 0x31
+#define xmt_pkts_128to255_octets 0x32
+#define xmt_pkts_256to511_octets 0x33
+#define xmt_pkts_512to1023_octets 0x34
+#define xmt_pkts_1024to1518_octet 0x35
+#define xmt_oversize_pkts 0x36
+#define xmt_jumbo_pkts 0x37
+
+
+/* Driver definitions */
+
+#define PCI_VENDOR_ID_AMD 0x1022
+#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
+
+#define MAX_UNITS 8 /* Maximum number of devices possible */
+
+#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
+#define NUM_RX_BUFFERS 32 /* Number of receive buffers */
+
+#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */
+#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */
+
+#define NUM_TX_RING_DR 32
+#define NUM_RX_RING_DR 32
+
+#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */
+#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */
+
+#define MAX_FILTER_SIZE 64 /* Maximum multicast address */
+#define AMD8111E_MIN_MTU 60
+#define AMD8111E_MAX_MTU 9000
+
+#define PKT_BUFF_SZ 1536
+#define MIN_PKT_LEN 60
+
+#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
+#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
+#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion.
+ Only 500 usec resolution */
+#define OPTION_VLAN_ENABLE 0x0001
+#define OPTION_JUMBO_ENABLE 0x0002
+#define OPTION_MULTICAST_ENABLE 0x0004
+#define OPTION_WOL_ENABLE 0x0008
+#define OPTION_WAKE_MAGIC_ENABLE 0x0010
+#define OPTION_WAKE_PHY_ENABLE 0x0020
+#define OPTION_INTR_COAL_ENABLE 0x0040
+#define OPTION_DYN_IPG_ENABLE 0x0080
+
+#define PHY_REG_ADDR_MASK 0x1f
+
+/* ipg parameters */
+#define DEFAULT_IPG 0x60
+#define IFS1_DELTA 36
+#define IPG_CONVERGE_JIFFIES (HZ/2)
+#define IPG_STABLE_TIME 5
+#define MIN_IPG 96
+#define MAX_IPG 255
+#define IPG_STEP 16
+#define CSTATE 1
+#define SSTATE 2
+
+/* Assume contoller gets data 10 times the maximum processing time */
+#define REPEAT_CNT 10
+
+/* amd8111e descriptor flag definitions */
+typedef enum {
+
+ OWN_BIT = (1 << 15),
+ ADD_FCS_BIT = (1 << 13),
+ LTINT_BIT = (1 << 12),
+ STP_BIT = (1 << 9),
+ ENP_BIT = (1 << 8),
+ KILL_BIT = (1 << 6),
+ TCC_VLAN_INSERT = (1 << 1),
+ TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0),
+
+}TX_FLAG_BITS;
+
+typedef enum {
+ ERR_BIT = (1 << 14),
+ FRAM_BIT = (1 << 13),
+ OFLO_BIT = (1 << 12),
+ CRC_BIT = (1 << 11),
+ PAM_BIT = (1 << 6),
+ LAFM_BIT = (1 << 5),
+ BAM_BIT = (1 << 4),
+ TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */
+ TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */
+
+}RX_FLAG_BITS;
+
+#define RESET_RX_FLAGS 0x0000
+#define TT_MASK 0x000c
+#define TCC_MASK 0x0003
+
+/* driver ioctl parameters */
+#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
+
+/* amd8111e desriptor format */
+
+struct amd8111e_tx_dr{
+
+ __le16 buff_count; /* Size of the buffer pointed by this descriptor */
+
+ __le16 tx_flags;
+
+ __le16 tag_ctrl_info;
+
+ __le16 tag_ctrl_cmd;
+
+ __le32 buff_phy_addr;
+
+ __le32 reserved;
+};
+
+struct amd8111e_rx_dr{
+
+ __le32 reserved;
+
+ __le16 msg_count; /* Received message len */
+
+ __le16 tag_ctrl_info;
+
+ __le16 buff_count; /* Len of the buffer pointed by descriptor. */
+
+ __le16 rx_flags;
+
+ __le32 buff_phy_addr;
+
+};
+struct amd8111e_link_config{
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+#define AUTONEG_INVALID 0xff
+
+ unsigned long orig_phy_option;
+ u16 speed;
+ u8 duplex;
+ u8 autoneg;
+ u8 reserved; /* 32bit alignment */
+};
+
+enum coal_type{
+
+ NO_COALESCE,
+ LOW_COALESCE,
+ MEDIUM_COALESCE,
+ HIGH_COALESCE,
+
+};
+
+enum coal_mode{
+ RX_INTR_COAL,
+ TX_INTR_COAL,
+ DISABLE_COAL,
+ ENABLE_COAL,
+
+};
+#define MAX_TIMEOUT 40
+#define MAX_EVENT_COUNT 31
+struct amd8111e_coalesce_conf{
+
+ unsigned int rx_timeout;
+ unsigned int rx_event_count;
+ unsigned long rx_packets;
+ unsigned long rx_prev_packets;
+ unsigned long rx_bytes;
+ unsigned long rx_prev_bytes;
+ unsigned int rx_coal_type;
+
+ unsigned int tx_timeout;
+ unsigned int tx_event_count;
+ unsigned long tx_packets;
+ unsigned long tx_prev_packets;
+ unsigned long tx_bytes;
+ unsigned long tx_prev_bytes;
+ unsigned int tx_coal_type;
+
+};
+struct ipg_info{
+
+ unsigned int ipg_state;
+ unsigned int ipg;
+ unsigned int current_ipg;
+ unsigned int col_cnt;
+ unsigned int diff_col_cnt;
+ unsigned int timer_tick;
+ unsigned int prev_ipg;
+ struct timer_list ipg_timer;
+};
+
+struct amd8111e_priv{
+
+ struct amd8111e_tx_dr* tx_ring;
+ struct amd8111e_rx_dr* rx_ring;
+ dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */
+ dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */
+ const char *name;
+ struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
+ struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
+ /* Transmit and receive skbs */
+ struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
+ struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
+ /* Transmit and receive dma mapped addr */
+ dma_addr_t tx_dma_addr[NUM_TX_BUFFERS];
+ dma_addr_t rx_dma_addr[NUM_RX_BUFFERS];
+ /* Reg memory mapped address */
+ void __iomem *mmio;
+
+ struct napi_struct napi;
+
+ spinlock_t lock; /* Guard lock */
+ unsigned long rx_idx, tx_idx; /* The next free ring entry */
+ unsigned long tx_complete_idx;
+ unsigned long tx_ring_complete_idx;
+ unsigned long tx_ring_idx;
+ unsigned int rx_buff_len; /* Buffer length of rx buffers */
+ int options; /* Options enabled/disabled for the device */
+
+ unsigned long ext_phy_option;
+ int ext_phy_addr;
+ u32 ext_phy_id;
+
+ struct amd8111e_link_config link_config;
+ int pm_cap;
+
+ struct net_device *next;
+ int mii;
+ struct mii_if_info mii_if;
+ char opened;
+ unsigned int drv_rx_errors;
+ struct amd8111e_coalesce_conf coal_conf;
+
+ struct ipg_info ipg_data;
+
+};
+
+/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register.
+BUG? */
+#define amd8111e_writeq(_UlData,_memMap) \
+ writel(*(u32*)(&_UlData), _memMap); \
+ writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4)
+
+/* maps the external speed options to internal value */
+typedef enum {
+ SPEED_AUTONEG,
+ SPEED10_HALF,
+ SPEED10_FULL,
+ SPEED100_HALF,
+ SPEED100_FULL,
+}EXT_PHY_OPTION;
+
+static int card_idx;
+static int speed_duplex[MAX_UNITS] = { 0, };
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
+static unsigned int chip_version;
+
+#endif /* _AMD8111E_H */
+
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
new file mode 100644
index 000000000..968b7bfac
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -0,0 +1,792 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver (p2@mind.be)
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/*#define DEBUG*/
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/zorro.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/amigaints.h>
+#include <asm/amigahw.h>
+#include <asm/irq.h>
+
+#include "ariadne.h"
+
+#ifdef ARIADNE_DEBUG
+int ariadne_debug = ARIADNE_DEBUG;
+#else
+int ariadne_debug = 1;
+#endif
+
+/* Macros to Fix Endianness problems */
+
+/* Swap the Bytes in a WORD */
+#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
+/* Get the Low BYTE in a WORD */
+#define lowb(x) (x & 0xff)
+/* Get the Swapped High WORD in a LONG */
+#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
+/* Get the Swapped Low WORD in a LONG */
+#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
+
+/* Transmit/Receive Ring Definitions */
+
+#define TX_RING_SIZE 5
+#define RX_RING_SIZE 16
+
+#define PKT_BUF_SIZE 1520
+
+/* Private Device Data */
+
+struct ariadne_private {
+ volatile struct TDRE *tx_ring[TX_RING_SIZE];
+ volatile struct RDRE *rx_ring[RX_RING_SIZE];
+ volatile u_short *tx_buff[TX_RING_SIZE];
+ volatile u_short *rx_buff[RX_RING_SIZE];
+ int cur_tx, cur_rx; /* The next free ring entry */
+ int dirty_tx; /* The ring entries to be free()ed */
+ char tx_full;
+};
+
+/* Structure Created in the Ariadne's RAM Buffer */
+
+struct lancedata {
+ struct TDRE tx_ring[TX_RING_SIZE];
+ struct RDRE rx_ring[RX_RING_SIZE];
+ u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+ u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
+};
+
+static void memcpyw(volatile u_short *dest, u_short *src, int len)
+{
+ while (len >= 2) {
+ *(dest++) = *(src++);
+ len -= 2;
+ }
+ if (len == 1)
+ *dest = (*(u_char *)src) << 8;
+}
+
+static void ariadne_init_ring(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
+ int i;
+
+ netif_stop_queue(dev);
+
+ priv->tx_full = 0;
+ priv->cur_rx = priv->cur_tx = 0;
+ priv->dirty_tx = 0;
+
+ /* Set up TX Ring */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ volatile struct TDRE *t = &lancedata->tx_ring[i];
+ t->TMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i]));
+ t->TMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, tx_buff[i])) |
+ TF_STP | TF_ENP;
+ t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ t->TMD3 = 0;
+ priv->tx_ring[i] = &lancedata->tx_ring[i];
+ priv->tx_buff[i] = lancedata->tx_buff[i];
+ netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
+ }
+
+ /* Set up RX Ring */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ volatile struct RDRE *r = &lancedata->rx_ring[i];
+ r->RMD0 = swloww(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i]));
+ r->RMD1 = swhighw(ARIADNE_RAM +
+ offsetof(struct lancedata, rx_buff[i])) |
+ RF_OWN;
+ r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
+ r->RMD3 = 0x0000;
+ priv->rx_ring[i] = &lancedata->rx_ring[i];
+ priv->rx_buff[i] = lancedata->rx_buff[i];
+ netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
+ i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
+ }
+}
+
+static int ariadne_rx(struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ int entry = priv->cur_rx % RX_RING_SIZE;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up */
+ while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
+ int status = lowb(priv->rx_ring[entry]->RMD1);
+
+ if (status != (RF_STP | RF_ENP)) { /* There was an error */
+ /* There is a tricky error noted by
+ * John Murphy <murf@perftech.com> to Russ Nelson:
+ * Even with full-sized buffers it's possible for a
+ * jabber packet to use two buffers, with only the
+ * last correctly noting the error
+ */
+ /* Only count a general error at the end of a packet */
+ if (status & RF_ENP)
+ dev->stats.rx_errors++;
+ if (status & RF_FRAM)
+ dev->stats.rx_frame_errors++;
+ if (status & RF_OFLO)
+ dev->stats.rx_over_errors++;
+ if (status & RF_CRC)
+ dev->stats.rx_crc_errors++;
+ if (status & RF_BUFF)
+ dev->stats.rx_fifo_errors++;
+ priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
+ } else {
+ /* Malloc up new buffer, compatible with net-3 */
+ short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skb == NULL) {
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ priv->cur_rx++;
+ }
+ break;
+ }
+
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (const void *)priv->rx_buff[entry],
+ pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ skb->data, skb->len);
+
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+
+ priv->rx_ring[entry]->RMD1 |= RF_OWN;
+ entry = (++priv->cur_rx) % RX_RING_SIZE;
+ }
+
+ priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
+
+ /* We should check that at least two ring entries are free.
+ * If not, we should free one and mark stats->rx_dropped++
+ */
+
+ return 0;
+}
+
+static irqreturn_t ariadne_interrupt(int irq, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ struct ariadne_private *priv;
+ int csr0, boguscnt;
+ int handled = 0;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
+ return IRQ_NONE; /* generated by the board */
+
+ priv = netdev_priv(dev);
+
+ boguscnt = 10;
+ while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP */
+ lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
+
+#ifdef DEBUG
+ if (ariadne_debug > 5) {
+ netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
+ csr0, lance->RDP);
+ if (csr0 & INTR)
+ pr_cont(" INTR");
+ if (csr0 & INEA)
+ pr_cont(" INEA");
+ if (csr0 & RXON)
+ pr_cont(" RXON");
+ if (csr0 & TXON)
+ pr_cont(" TXON");
+ if (csr0 & TDMD)
+ pr_cont(" TDMD");
+ if (csr0 & STOP)
+ pr_cont(" STOP");
+ if (csr0 & STRT)
+ pr_cont(" STRT");
+ if (csr0 & INIT)
+ pr_cont(" INIT");
+ if (csr0 & ERR)
+ pr_cont(" ERR");
+ if (csr0 & BABL)
+ pr_cont(" BABL");
+ if (csr0 & CERR)
+ pr_cont(" CERR");
+ if (csr0 & MISS)
+ pr_cont(" MISS");
+ if (csr0 & MERR)
+ pr_cont(" MERR");
+ if (csr0 & RINT)
+ pr_cont(" RINT");
+ if (csr0 & TINT)
+ pr_cont(" TINT");
+ if (csr0 & IDON)
+ pr_cont(" IDON");
+ pr_cont(" ]\n");
+ }
+#endif
+
+ if (csr0 & RINT) { /* Rx interrupt */
+ handled = 1;
+ ariadne_rx(dev);
+ }
+
+ if (csr0 & TINT) { /* Tx-done interrupt */
+ int dirty_tx = priv->dirty_tx;
+
+ handled = 1;
+ while (dirty_tx < priv->cur_tx) {
+ int entry = dirty_tx % TX_RING_SIZE;
+ int status = lowb(priv->tx_ring[entry]->TMD1);
+
+ if (status & TF_OWN)
+ break; /* It still hasn't been Txed */
+
+ priv->tx_ring[entry]->TMD1 &= 0xff00;
+
+ if (status & TF_ERR) {
+ /* There was an major error, log it */
+ int err_status = priv->tx_ring[entry]->TMD3;
+ dev->stats.tx_errors++;
+ if (err_status & EF_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & EF_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & EF_LCOL)
+ dev->stats.tx_window_errors++;
+ if (err_status & EF_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ netdev_err(dev, "Tx FIFO error! Status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ } else {
+ if (status & (TF_MORE | TF_ONE))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, priv->cur_tx,
+ priv->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (priv->tx_full && netif_queue_stopped(dev) &&
+ dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full */
+ priv->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ priv->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors */
+ if (csr0 & BABL) {
+ handled = 1;
+ dev->stats.tx_errors++; /* Tx babble */
+ }
+ if (csr0 & MISS) {
+ handled = 1;
+ dev->stats.rx_errors++; /* Missed a Rx frame */
+ }
+ if (csr0 & MERR) {
+ handled = 1;
+ netdev_err(dev, "Bus master arbitration failure, status %04x\n",
+ csr0);
+ /* Restart the chip */
+ lance->RDP = STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
+
+ if (ariadne_debug > 4)
+ netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
+ lance->RAP, lance->RDP);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int ariadne_open(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ u_short in;
+ u_long version;
+ int i;
+
+ /* Reset the LANCE */
+ in = lance->Reset;
+
+ /* Stop the LANCE */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+
+ /* Check the LANCE version */
+ lance->RAP = CSR88; /* Chip ID */
+ version = swapw(lance->RDP);
+ lance->RAP = CSR89; /* Chip ID */
+ version |= swapw(lance->RDP) << 16;
+ if ((version & 0x00000fff) != 0x00000003) {
+ pr_warn("Couldn't find AMD Ethernet Chip\n");
+ return -EAGAIN;
+ }
+ if ((version & 0x0ffff000) != 0x00003000) {
+ pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
+ (version & 0x0ffff000) >> 12);
+ return -EAGAIN;
+ }
+
+ netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
+ (version & 0xf0000000) >> 28);
+
+ ariadne_init_ring(dev);
+
+ /* Miscellaneous Stuff */
+ lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR4; /* Test and Features Control */
+ lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
+
+ /* Set the Multicast Table */
+ lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
+ lance->RDP = 0x0000;
+ lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
+ lance->RDP = 0x0000;
+
+ /* Set the Ethernet Hardware Address */
+ lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
+ lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
+ lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
+ lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
+
+ /* Set the Init Block Mode */
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000;
+
+ /* Set the Transmit Descriptor Ring Pointer */
+ lance->RAP = CSR30; /* Base Address of Transmit Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+ lance->RAP = CSR31; /* Base Address of transmit Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
+
+ /* Set the Receive Descriptor Ring Pointer */
+ lance->RAP = CSR24; /* Base Address of Receive Ring */
+ lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+ lance->RAP = CSR25; /* Base Address of Receive Ring */
+ lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
+
+ /* Set the Number of RX and TX Ring Entries */
+ lance->RAP = CSR76; /* Receive Ring Length */
+ lance->RDP = swapw(((u_short)-RX_RING_SIZE));
+ lance->RAP = CSR78; /* Transmit Ring Length */
+ lance->RDP = swapw(((u_short)-TX_RING_SIZE));
+
+ /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
+ lance->RAP = ISACSR2; /* Miscellaneous Configuration */
+ lance->IDP = ASEL;
+
+ /* LED Control */
+ lance->RAP = ISACSR5; /* LED1 Status */
+ lance->IDP = PSE|XMTE;
+ lance->RAP = ISACSR6; /* LED2 Status */
+ lance->IDP = PSE|COLE;
+ lance->RAP = ISACSR7; /* LED3 Status */
+ lance->IDP = PSE|RCVE;
+
+ netif_start_queue(dev);
+
+ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (i)
+ return i;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT;
+
+ return 0;
+}
+
+static int ariadne_close(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+
+ if (ariadne_debug > 1) {
+ netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
+ lance->RDP);
+ netdev_dbg(dev, "%lu packets missed\n",
+ dev->stats.rx_missed_errors);
+ }
+
+ /* We stop the LANCE here -- it occasionally polls memory if we don't */
+ lance->RDP = STOP;
+
+ free_irq(IRQ_AMIGA_PORTS, dev);
+
+ return 0;
+}
+
+static inline void ariadne_reset(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP;
+ ariadne_init_ring(dev);
+ lance->RDP = INEA | STRT;
+ netif_start_queue(dev);
+}
+
+static void ariadne_tx_timeout(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ netdev_err(dev, "transmit timed out, status %04x, resetting\n",
+ lance->RDP);
+ ariadne_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ariadne_private *priv = netdev_priv(dev);
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ int entry;
+ unsigned long flags;
+ int len = skb->len;
+
+#if 0
+ if (ariadne_debug > 3) {
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
+ lance->RDP = 0x0000;
+ }
+#endif
+
+ /* FIXME: is the 79C960 new enough to do its own padding right ? */
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
+ }
+
+ /* Fill in a Tx ring entry */
+
+ netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n",
+ ((u_short *)skb->data)[6],
+ skb->data + 6, skb->data,
+ skb->data, skb->len);
+
+ local_irq_save(flags);
+
+ entry = priv->cur_tx % TX_RING_SIZE;
+
+ /* Caution: the write order is important here, set the base address with
+ the "ownership" bits last */
+
+ priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
+ priv->tx_ring[entry]->TMD3 = 0x0000;
+ memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
+
+#ifdef DEBUG
+ print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
+ (void *)priv->tx_buff[entry],
+ skb->len > 64 ? 64 : skb->len, true);
+#endif
+
+ priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
+ | TF_OWN | TF_STP | TF_ENP;
+
+ dev_kfree_skb(skb);
+
+ priv->cur_tx++;
+ if ((priv->cur_tx >= TX_RING_SIZE) &&
+ (priv->dirty_tx >= TX_RING_SIZE)) {
+
+ netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
+ priv->cur_tx, priv->dirty_tx);
+
+ priv->cur_tx -= TX_RING_SIZE;
+ priv->dirty_tx -= TX_RING_SIZE;
+ }
+ dev->stats.tx_bytes += len;
+
+ /* Trigger an immediate send poll */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | TDMD;
+
+ if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
+ netif_stop_queue(dev);
+ priv->tx_full = 1;
+ }
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ saved_addr = lance->RAP;
+ lance->RAP = CSR112; /* Missed Frame Count */
+ dev->stats.rx_missed_errors = swapw(lance->RDP);
+ lance->RAP = saved_addr;
+ local_irq_restore(flags);
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * num_addrs == -1 Promiscuous mode, receive all packets
+ * num_addrs == 0 Normal mode, clear multicast list
+ * num_addrs > 0 Multicast mode, receive normal and MC packets,
+ * and do best-effort filtering.
+ */
+static void set_multicast_list(struct net_device *dev)
+{
+ volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
+
+ if (!netif_running(dev))
+ return;
+
+ netif_stop_queue(dev);
+
+ /* We take the simple way out and always enable promiscuous mode */
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = STOP; /* Temporarily stop the lance */
+ ariadne_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = PROM; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table,
+ * but rely on upper-layer filtering
+ */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ lance->RAP = CSR8 + (i << 8);
+ /* Logical Address Filter */
+ lance->RDP = swapw(multicast_table[i]);
+ }
+ lance->RAP = CSR15; /* Mode Register */
+ lance->RDP = 0x0000; /* Unset promiscuous mode */
+ }
+
+ lance->RAP = CSR0; /* PCnet-ISA Controller Status */
+ lance->RDP = INEA | STRT | IDON;/* Resume normal operation */
+
+ netif_wake_queue(dev);
+}
+
+
+static void ariadne_remove_one(struct zorro_dev *z)
+{
+ struct net_device *dev = zorro_get_drvdata(z);
+
+ unregister_netdev(dev);
+ release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
+ release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+}
+
+static struct zorro_device_id ariadne_zorro_tbl[] = {
+ { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
+ { 0 }
+};
+MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
+
+static const struct net_device_ops ariadne_netdev_ops = {
+ .ndo_open = ariadne_open,
+ .ndo_stop = ariadne_close,
+ .ndo_start_xmit = ariadne_start_xmit,
+ .ndo_tx_timeout = ariadne_tx_timeout,
+ .ndo_get_stats = ariadne_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int ariadne_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
+{
+ unsigned long board = z->resource.start;
+ unsigned long base_addr = board + ARIADNE_LANCE;
+ unsigned long mem_start = board + ARIADNE_RAM;
+ struct resource *r1, *r2;
+ struct net_device *dev;
+ u32 serial;
+ int err;
+
+ r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
+ if (!r1)
+ return -EBUSY;
+ r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
+ if (!r2) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ return -EBUSY;
+ }
+
+ dev = alloc_etherdev(sizeof(struct ariadne_private));
+ if (dev == NULL) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ return -ENOMEM;
+ }
+
+ r1->name = dev->name;
+ r2->name = dev->name;
+
+ serial = be32_to_cpu(z->rom.er_SerialNumber);
+ dev->dev_addr[0] = 0x00;
+ dev->dev_addr[1] = 0x60;
+ dev->dev_addr[2] = 0x30;
+ dev->dev_addr[3] = (serial >> 16) & 0xff;
+ dev->dev_addr[4] = (serial >> 8) & 0xff;
+ dev->dev_addr[5] = serial & 0xff;
+ dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
+ dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
+ dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
+
+ dev->netdev_ops = &ariadne_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+
+ err = register_netdev(dev);
+ if (err) {
+ release_mem_region(base_addr, sizeof(struct Am79C960));
+ release_mem_region(mem_start, ARIADNE_RAM_SIZE);
+ free_netdev(dev);
+ return err;
+ }
+ zorro_set_drvdata(z, dev);
+
+ netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
+ board, dev->dev_addr);
+
+ return 0;
+}
+
+static struct zorro_driver ariadne_driver = {
+ .name = "ariadne",
+ .id_table = ariadne_zorro_tbl,
+ .probe = ariadne_init_one,
+ .remove = ariadne_remove_one,
+};
+
+static int __init ariadne_init_module(void)
+{
+ return zorro_register_driver(&ariadne_driver);
+}
+
+static void __exit ariadne_cleanup_module(void)
+{
+ zorro_unregister_driver(&ariadne_driver);
+}
+
+module_init(ariadne_init_module);
+module_exit(ariadne_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ariadne.h b/drivers/net/ethernet/amd/ariadne.h
new file mode 100644
index 000000000..727be5cdd
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.h
@@ -0,0 +1,415 @@
+/*
+ * Amiga Linux/m68k Ariadne Ethernet Driver
+ *
+ * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org)
+ * Peter De Schrijver
+ * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be)
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This program is based on
+ *
+ * lance.c: An AMD LANCE ethernet driver for linux.
+ * Written 1993-94 by Donald Becker.
+ *
+ * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
+ * Advanced Micro Devices
+ * Publication #16907, Rev. B, Amendment/0, May 1994
+ *
+ * MC68230: Parallel Interface/Timer (PI/T)
+ * Motorola Semiconductors, December, 1983
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ *
+ * ----------------------------------------------------------------------------------
+ *
+ * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
+ *
+ * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
+ * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
+ *
+ * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
+ */
+
+
+ /*
+ * Am79C960 PCnet-ISA
+ */
+
+struct Am79C960 {
+ volatile u_short AddressPROM[8];
+ /* IEEE Address PROM (Unused in the Ariadne) */
+ volatile u_short RDP; /* Register Data Port */
+ volatile u_short RAP; /* Register Address Port */
+ volatile u_short Reset; /* Reset Chip on Read Access */
+ volatile u_short IDP; /* ISACSR Data Port */
+};
+
+
+ /*
+ * Am79C960 Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ *
+ * Only registers marked with a `-' are intended for network software
+ * access
+ */
+
+#define CSR0 0x0000 /* - PCnet-ISA Controller Status */
+#define CSR1 0x0100 /* - IADR[15:0] */
+#define CSR2 0x0200 /* - IADR[23:16] */
+#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */
+#define CSR4 0x0400 /* - Test and Features Control */
+#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */
+#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */
+#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */
+#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */
+#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */
+#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */
+#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */
+#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */
+#define CSR15 0x0f00 /* - Mode Register */
+#define CSR16 0x1000 /* Initialization Block Address Lower */
+#define CSR17 0x1100 /* Initialization Block Address Upper */
+#define CSR18 0x1200 /* Current Receive Buffer Address */
+#define CSR19 0x1300 /* Current Receive Buffer Address */
+#define CSR20 0x1400 /* Current Transmit Buffer Address */
+#define CSR21 0x1500 /* Current Transmit Buffer Address */
+#define CSR22 0x1600 /* Next Receive Buffer Address */
+#define CSR23 0x1700 /* Next Receive Buffer Address */
+#define CSR24 0x1800 /* - Base Address of Receive Ring */
+#define CSR25 0x1900 /* - Base Address of Receive Ring */
+#define CSR26 0x1a00 /* Next Receive Descriptor Address */
+#define CSR27 0x1b00 /* Next Receive Descriptor Address */
+#define CSR28 0x1c00 /* Current Receive Descriptor Address */
+#define CSR29 0x1d00 /* Current Receive Descriptor Address */
+#define CSR30 0x1e00 /* - Base Address of Transmit Ring */
+#define CSR31 0x1f00 /* - Base Address of transmit Ring */
+#define CSR32 0x2000 /* Next Transmit Descriptor Address */
+#define CSR33 0x2100 /* Next Transmit Descriptor Address */
+#define CSR34 0x2200 /* Current Transmit Descriptor Address */
+#define CSR35 0x2300 /* Current Transmit Descriptor Address */
+#define CSR36 0x2400 /* Next Next Receive Descriptor Address */
+#define CSR37 0x2500 /* Next Next Receive Descriptor Address */
+#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */
+#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */
+#define CSR40 0x2800 /* Current Receive Status and Byte Count */
+#define CSR41 0x2900 /* Current Receive Status and Byte Count */
+#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */
+#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */
+#define CSR44 0x2c00 /* Next Receive Status and Byte Count */
+#define CSR45 0x2d00 /* Next Receive Status and Byte Count */
+#define CSR46 0x2e00 /* Poll Time Counter */
+#define CSR47 0x2f00 /* Polling Interval */
+#define CSR48 0x3000 /* Temporary Storage */
+#define CSR49 0x3100 /* Temporary Storage */
+#define CSR50 0x3200 /* Temporary Storage */
+#define CSR51 0x3300 /* Temporary Storage */
+#define CSR52 0x3400 /* Temporary Storage */
+#define CSR53 0x3500 /* Temporary Storage */
+#define CSR54 0x3600 /* Temporary Storage */
+#define CSR55 0x3700 /* Temporary Storage */
+#define CSR56 0x3800 /* Temporary Storage */
+#define CSR57 0x3900 /* Temporary Storage */
+#define CSR58 0x3a00 /* Temporary Storage */
+#define CSR59 0x3b00 /* Temporary Storage */
+#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */
+#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */
+#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */
+#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */
+#define CSR64 0x4000 /* Next Transmit Buffer Address */
+#define CSR65 0x4100 /* Next Transmit Buffer Address */
+#define CSR66 0x4200 /* Next Transmit Status and Byte Count */
+#define CSR67 0x4300 /* Next Transmit Status and Byte Count */
+#define CSR68 0x4400 /* Transmit Status Temporary Storage */
+#define CSR69 0x4500 /* Transmit Status Temporary Storage */
+#define CSR70 0x4600 /* Temporary Storage */
+#define CSR71 0x4700 /* Temporary Storage */
+#define CSR72 0x4800 /* Receive Ring Counter */
+#define CSR74 0x4a00 /* Transmit Ring Counter */
+#define CSR76 0x4c00 /* - Receive Ring Length */
+#define CSR78 0x4e00 /* - Transmit Ring Length */
+#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */
+#define CSR82 0x5200 /* - Bus Activity Timer */
+#define CSR84 0x5400 /* DMA Address */
+#define CSR85 0x5500 /* DMA Address */
+#define CSR86 0x5600 /* Buffer Byte Counter */
+#define CSR88 0x5800 /* - Chip ID */
+#define CSR89 0x5900 /* - Chip ID */
+#define CSR92 0x5c00 /* Ring Length Conversion */
+#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */
+#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */
+#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */
+#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */
+#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */
+#define CSR104 0x6800 /* SWAP */
+#define CSR105 0x6900 /* SWAP */
+#define CSR108 0x6c00 /* Buffer Management Scratch */
+#define CSR109 0x6d00 /* Buffer Management Scratch */
+#define CSR112 0x7000 /* - Missed Frame Count */
+#define CSR114 0x7200 /* - Receive Collision Count */
+#define CSR124 0x7c00 /* - Buffer Management Unit Test */
+
+
+ /*
+ * Am79C960 ISA Control and Status Registers
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ISACSR0 0x0000 /* Master Mode Read Active */
+#define ISACSR1 0x0100 /* Master Mode Write Active */
+#define ISACSR2 0x0200 /* Miscellaneous Configuration */
+#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */
+#define ISACSR5 0x0500 /* LED1 Status */
+#define ISACSR6 0x0600 /* LED2 Status */
+#define ISACSR7 0x0700 /* LED3 Status */
+
+
+ /*
+ * Bit definitions for CSR0 (PCnet-ISA Controller Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ERR 0x0080 /* Error */
+#define BABL 0x0040 /* Babble: Transmitted too many bits */
+#define CERR 0x0020 /* No Heartbeat (10BASE-T) */
+#define MISS 0x0010 /* Missed Frame */
+#define MERR 0x0008 /* Memory Error */
+#define RINT 0x0004 /* Receive Interrupt */
+#define TINT 0x0002 /* Transmit Interrupt */
+#define IDON 0x0001 /* Initialization Done */
+#define INTR 0x8000 /* Interrupt Flag */
+#define INEA 0x4000 /* Interrupt Enable */
+#define RXON 0x2000 /* Receive On */
+#define TXON 0x1000 /* Transmit On */
+#define TDMD 0x0800 /* Transmit Demand */
+#define STOP 0x0400 /* Stop */
+#define STRT 0x0200 /* Start */
+#define INIT 0x0100 /* Initialize */
+
+
+ /*
+ * Bit definitions for CSR3 (Interrupt Masks and Deferral Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define BABLM 0x0040 /* Babble Mask */
+#define MISSM 0x0010 /* Missed Frame Mask */
+#define MERRM 0x0008 /* Memory Error Mask */
+#define RINTM 0x0004 /* Receive Interrupt Mask */
+#define TINTM 0x0002 /* Transmit Interrupt Mask */
+#define IDONM 0x0001 /* Initialization Done Mask */
+#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */
+#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */
+
+
+ /*
+ * Bit definitions for CSR4 (Test and Features Control)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ENTST 0x0080 /* Enable Test Mode */
+#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */
+#define TIMER 0x0020 /* Timer Enable Register */
+#define DPOLL 0x0010 /* Disable Transmit Polling */
+#define APAD_XMT 0x0008 /* Auto Pad Transmit */
+#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */
+#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */
+#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */
+#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */
+#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */
+#define TXSTRT 0x0800 /* Transmit Start Status */
+#define TXSTRTM 0x0400 /* Transmit Start Mask */
+#define JAB 0x0200 /* Jabber Error */
+#define JABM 0x0100 /* Jabber Error Mask */
+
+
+ /*
+ * Bit definitions for CSR15 (Mode Register)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define PROM 0x0080 /* Promiscuous Mode */
+#define DRCVBC 0x0040 /* Disable Receive Broadcast */
+#define DRCVPA 0x0020 /* Disable Receive Physical Address */
+#define DLNKTST 0x0010 /* Disable Link Status */
+#define DAPC 0x0008 /* Disable Automatic Polarity Correction */
+#define MENDECL 0x0004 /* MENDEC Loopback Mode */
+#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */
+#define PORTSEL1 0x0001 /* Port Select Bits */
+#define PORTSEL2 0x8000 /* Port Select Bits */
+#define INTL 0x4000 /* Internal Loopback */
+#define DRTY 0x2000 /* Disable Retry */
+#define FCOLL 0x1000 /* Force Collision */
+#define DXMTFCS 0x0800 /* Disable Transmit CRC */
+#define LOOP 0x0400 /* Loopback Enable */
+#define DTX 0x0200 /* Disable Transmitter */
+#define DRX 0x0100 /* Disable Receiver */
+
+
+ /*
+ * Bit definitions for ISACSR2 (Miscellaneous Configuration)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define ASEL 0x0200 /* Media Interface Port Auto Select */
+
+
+ /*
+ * Bit definitions for ISACSR5-7 (LED1-3 Status)
+ *
+ * These values are already swap()ed!!
+ */
+
+#define LEDOUT 0x0080 /* Current LED Status */
+#define PSE 0x8000 /* Pulse Stretcher Enable */
+#define XMTE 0x1000 /* Enable Transmit Status Signal */
+#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */
+#define RCVE 0x0400 /* Enable Receive Status Signal */
+#define JABE 0x0200 /* Enable Jabber Signal */
+#define COLE 0x0100 /* Enable Collision Signal */
+
+
+ /*
+ * Receive Descriptor Ring Entry
+ */
+
+struct RDRE {
+ volatile u_short RMD0; /* LADR[15:0] */
+ volatile u_short RMD1; /* HADR[23:16] | Receive Flags */
+ volatile u_short RMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short RMD3; /* Message Byte Count */
+};
+
+
+ /*
+ * Transmit Descriptor Ring Entry
+ */
+
+struct TDRE {
+ volatile u_short TMD0; /* LADR[15:0] */
+ volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */
+ volatile u_short TMD2; /* Buffer Byte Count (two's complement) */
+ volatile u_short TMD3; /* Error Flags */
+};
+
+
+ /*
+ * Receive Flags
+ */
+
+#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define RF_ERR 0x0040 /* Error */
+#define RF_FRAM 0x0020 /* Framing Error */
+#define RF_OFLO 0x0010 /* Overflow Error */
+#define RF_CRC 0x0008 /* CRC Error */
+#define RF_BUFF 0x0004 /* Buffer Error */
+#define RF_STP 0x0002 /* Start of Packet */
+#define RF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Transmit Flags
+ */
+
+#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
+#define TF_ERR 0x0040 /* Error */
+#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */
+#define TF_MORE 0x0010 /* More than one retry needed */
+#define TF_ONE 0x0008 /* One retry needed */
+#define TF_DEF 0x0004 /* Deferred */
+#define TF_STP 0x0002 /* Start of Packet */
+#define TF_ENP 0x0001 /* End of Packet */
+
+
+ /*
+ * Error Flags
+ */
+
+#define EF_BUFF 0x0080 /* Buffer Error */
+#define EF_UFLO 0x0040 /* Underflow Error */
+#define EF_LCOL 0x0010 /* Late Collision */
+#define EF_LCAR 0x0008 /* Loss of Carrier */
+#define EF_RTRY 0x0004 /* Retry Error */
+#define EF_TDR 0xff03 /* Time Domain Reflectometry */
+
+
+
+ /*
+ * MC68230 Parallel Interface/Timer
+ */
+
+struct MC68230 {
+ volatile u_char PGCR; /* Port General Control Register */
+ u_char Pad1[1];
+ volatile u_char PSRR; /* Port Service Request Register */
+ u_char Pad2[1];
+ volatile u_char PADDR; /* Port A Data Direction Register */
+ u_char Pad3[1];
+ volatile u_char PBDDR; /* Port B Data Direction Register */
+ u_char Pad4[1];
+ volatile u_char PCDDR; /* Port C Data Direction Register */
+ u_char Pad5[1];
+ volatile u_char PIVR; /* Port Interrupt Vector Register */
+ u_char Pad6[1];
+ volatile u_char PACR; /* Port A Control Register */
+ u_char Pad7[1];
+ volatile u_char PBCR; /* Port B Control Register */
+ u_char Pad8[1];
+ volatile u_char PADR; /* Port A Data Register */
+ u_char Pad9[1];
+ volatile u_char PBDR; /* Port B Data Register */
+ u_char Pad10[1];
+ volatile u_char PAAR; /* Port A Alternate Register */
+ u_char Pad11[1];
+ volatile u_char PBAR; /* Port B Alternate Register */
+ u_char Pad12[1];
+ volatile u_char PCDR; /* Port C Data Register */
+ u_char Pad13[1];
+ volatile u_char PSR; /* Port Status Register */
+ u_char Pad14[5];
+ volatile u_char TCR; /* Timer Control Register */
+ u_char Pad15[1];
+ volatile u_char TIVR; /* Timer Interrupt Vector Register */
+ u_char Pad16[3];
+ volatile u_char CPRH; /* Counter Preload Register (High) */
+ u_char Pad17[1];
+ volatile u_char CPRM; /* Counter Preload Register (Mid) */
+ u_char Pad18[1];
+ volatile u_char CPRL; /* Counter Preload Register (Low) */
+ u_char Pad19[3];
+ volatile u_char CNTRH; /* Count Register (High) */
+ u_char Pad20[1];
+ volatile u_char CNTRM; /* Count Register (Mid) */
+ u_char Pad21[1];
+ volatile u_char CNTRL; /* Count Register (Low) */
+ u_char Pad22[1];
+ volatile u_char TSR; /* Timer Status Register */
+ u_char Pad23[11];
+};
+
+
+ /*
+ * Ariadne Expansion Board Structure
+ */
+
+#define ARIADNE_LANCE 0x360
+
+#define ARIADNE_PIT 0x1000
+
+#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */
+#define ARIADNE_BOOTPROM_SIZE 0x4000
+
+#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */
+#define ARIADNE_RAM_SIZE 0x8000
+
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
new file mode 100644
index 000000000..b10964e8c
--- /dev/null
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -0,0 +1,1169 @@
+/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
+/*
+ Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This drivers was written with the following sources of reference:
+ - The driver for the Riebl Lance card by the TU Vienna.
+ - The modified TUW driver for PAM's VME cards
+ - The PC-Linux driver for Lance cards (but this is for bus master
+ cards, not the shared memory ones)
+ - The Amiga Ariadne driver
+
+ v1.0: (in 1.2.13pl4/0.9.13)
+ Initial version
+ v1.1: (in 1.2.13pl5)
+ more comments
+ deleted some debugging stuff
+ optimized register access (keep AREG pointing to CSR0)
+ following AMD, CSR0_STRT should be set only after IDON is detected
+ use memcpy() for data transfers, that also employs long word moves
+ better probe procedure for 24-bit systems
+ non-VME-RieblCards need extra delays in memcpy
+ must also do write test, since 0xfxe00000 may hit ROM
+ use 8/32 tx/rx buffers, which should give better NFS performance;
+ this is made possible by shifting the last packet buffer after the
+ RieblCard reserved area
+ v1.2: (in 1.2.13pl8)
+ again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
+ and thus RAM, in case of no Lance found all memory contents have to
+ be restored!
+ Now possible to compile as module.
+ v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
+ Several little 1.3 adaptions
+ When the lance is stopped it jumps back into little-endian
+ mode. It is therefore necessary to put it back where it
+ belongs, in big endian mode, in order to make things work.
+ This might be the reason why multicast-mode didn't work
+ before, but I'm not able to test it as I only got an Amiga
+ (we had similar problems with the A2065 driver).
+
+*/
+
+static char version[] = "atarilance.c: v1.3 04/04/96 "
+ "Roman.Hodek@informatik.uni-erlangen.de\n";
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/io.h>
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 1
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+/* Print debug messages on probing? */
+#undef LANCE_DEBUG_PROBE
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+#ifdef LANCE_DEBUG_PROBE
+# define PROBE_PRINT(a) printk a
+#else
+# define PROBE_PRINT(a)
+#endif
+
+/* These define the number of Rx and Tx buffers as log2. (Only powers
+ * of two are valid)
+ * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
+ * is more time critical then sending and packets may have to remain in the
+ * board's memory when main memory is low.
+ */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define TX_TIMEOUT (HZ/5)
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+struct ringdesc {
+ unsigned short adr_lo; /* Low 16 bits of address */
+ unsigned char len; /* Length bits */
+ unsigned char adr_hi; /* High 8 bits of address (unused) */
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ struct ringdesc rx_ring;
+ struct ringdesc tx_ring;
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char packet_area[0]; /* packet data follow after the
+ * init block and the ring
+ * descriptors and are located
+ * at runtime */
+};
+
+/* RieblCard specifics:
+ * The original TOS driver for these cards reserves the area from offset
+ * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
+ * Ethernet address there, and the magic for verifying the data's validity.
+ * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
+ * is reserved for the interrupt vector number.
+ */
+#define RIEBL_RSVD_START 0xee70
+#define RIEBL_RSVD_END 0xeec0
+#define RIEBL_MAGIC 0x09051990
+#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
+#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
+#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
+
+/* This is a default address for the old RieblCards without a battery
+ * that have no ethernet address at boot time. 00:00:36:04 is the
+ * prefix for Riebl cards, the 00:00 at the end is arbitrary.
+ */
+
+static unsigned char OldRieblDefHwaddr[6] = {
+ 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
+};
+
+
+/* I/O registers of the Lance chip */
+
+struct lance_ioreg {
+/* base+0x0 */ volatile unsigned short data;
+/* base+0x2 */ volatile unsigned short addr;
+ unsigned char _dummy1[3];
+/* base+0x7 */ volatile unsigned char ivec;
+ unsigned char _dummy2[5];
+/* base+0xd */ volatile unsigned char eeprom;
+ unsigned char _dummy3;
+/* base+0xf */ volatile unsigned char mem;
+};
+
+/* Types of boards this driver supports */
+
+enum lance_type {
+ OLD_RIEBL, /* old Riebl card without battery */
+ NEW_RIEBL, /* new Riebl card with battery */
+ PAM_CARD /* PAM card with EEPROM */
+};
+
+static char *lance_names[] = {
+ "Riebl-Card (without battery)",
+ "Riebl-Card (with battery)",
+ "PAM intern card"
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ enum lance_type cardtype;
+ struct lance_ioreg *iobase;
+ struct lance_memory *mem;
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_tx; /* Ring entries to be freed. */
+ /* copy function */
+ void *(*memcpy_f)( void *, const void *, size_t );
+/* This must be long for set_bit() */
+ long tx_full;
+ spinlock_t devlock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG IO->data
+#define AREG IO->addr
+#define REGA(a) (*( AREG = (a), &DREG ))
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
+
+/* Possible memory/IO addresses for probing */
+
+static struct lance_addr {
+ unsigned long memaddr;
+ unsigned long ioaddr;
+ int slow_flag;
+} lance_addr_list[] = {
+ { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */
+ { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE
+ (highest byte stripped) */
+ { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST
+ (highest byte stripped) */
+ { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to
+ avoid conflict with ROM
+ (highest byte stripped) */
+ { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+ { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE
+ (highest byte stripped) */
+};
+
+#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list)
+
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+
+
+/***************************** Prototypes *****************************/
+
+static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
+ *init_rec );
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static irqreturn_t lance_interrupt( int irq, void *dev_id );
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+static int lance_set_mac_address( struct net_device *dev, void *addr );
+static void lance_tx_timeout (struct net_device *dev);
+
+/************************* End of Prototypes **************************/
+
+
+
+
+
+static void *slow_memcpy( void *dst, const void *src, size_t len )
+
+{ char *cto = dst;
+ const char *cfrom = src;
+
+ while( len-- ) {
+ *cto++ = *cfrom++;
+ MFPDELAY();
+ }
+ return dst;
+}
+
+
+struct net_device * __init atarilance_probe(int unit)
+{
+ int i;
+ static int found;
+ struct net_device *dev;
+ int err = -ENODEV;
+
+ if (!MACH_IS_ATARI || found)
+ /* Assume there's only one board possible... That seems true, since
+ * the Riebl/PAM board's address cannot be changed. */
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ for( i = 0; i < N_LANCE_ADDR; ++i ) {
+ if (lance_probe1( dev, &lance_addr_list[i] )) {
+ found = 1;
+ err = register_netdev(dev);
+ if (!err)
+ return dev;
+ free_irq(dev->irq, dev);
+ break;
+ }
+ }
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+
+/* Derived from hwreg_present() in atari/config.c: */
+
+static noinline int __init addr_accessible(volatile void *regp, int wordflag,
+ int writeflag)
+{
+ int ret;
+ unsigned long flags;
+ long *vbr, save_berr;
+
+ local_irq_save(flags);
+
+ __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : );
+ save_berr = vbr[2];
+
+ __asm__ __volatile__
+ ( "movel %/sp,%/d1\n\t"
+ "movel #Lberr,%2@\n\t"
+ "moveq #0,%0\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "moveb %1@,%/d0\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: movew %1@,%/d0\n\t"
+ "nop \n"
+"2: tstl %4\n\t"
+ "beq 2f\n\t"
+ "tstl %3\n\t"
+ "bne 1f\n\t"
+ "clrb %1@\n\t"
+ "nop \n\t"
+ "moveb %/d0,%1@\n\t"
+ "nop \n\t"
+ "bra 2f\n"
+"1: clrw %1@\n\t"
+ "nop \n\t"
+ "movew %/d0,%1@\n\t"
+ "nop \n"
+"2: moveq #1,%0\n"
+"Lberr: movel %/d1,%/sp"
+ : "=&d" (ret)
+ : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
+ : "d0", "d1", "memory"
+ );
+
+ vbr[2] = save_berr;
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = lance_set_mac_address,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static unsigned long __init lance_probe1( struct net_device *dev,
+ struct lance_addr *init_rec )
+{
+ volatile unsigned short *memaddr =
+ (volatile unsigned short *)init_rec->memaddr;
+ volatile unsigned short *ioaddr =
+ (volatile unsigned short *)init_rec->ioaddr;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int i;
+ static int did_version;
+ unsigned short save1, save2;
+
+ PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
+ (long)memaddr, (long)ioaddr ));
+
+ /* Test whether memory readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
+ if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
+
+ /* Written values should come back... */
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
+ save1 = *memaddr;
+ *memaddr = 0x0001;
+ if (*memaddr != 0x0001) goto probe_fail;
+ PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
+ *memaddr = 0x0000;
+ if (*memaddr != 0x0000) goto probe_fail;
+ *memaddr = save1;
+
+ /* First port should be readable and writable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
+ if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
+
+ /* and written values should be readable */
+ PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
+ save2 = ioaddr[1];
+ ioaddr[1] = 0x0001;
+ if (ioaddr[1] != 0x0001) goto probe_fail;
+
+ /* The CSR0_INIT bit should not be readable */
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
+ save1 = ioaddr[0];
+ ioaddr[1] = CSR0;
+ ioaddr[0] = CSR0_INIT | CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+ PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
+ ioaddr[0] = CSR0_STOP;
+ if (ioaddr[0] != CSR0_STOP) {
+ ioaddr[0] = save1;
+ ioaddr[1] = save2;
+ goto probe_fail;
+ }
+
+ /* Now ok... */
+ PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
+ goto probe_ok;
+
+ probe_fail:
+ return 0;
+
+ probe_ok:
+ lp = netdev_priv(dev);
+ MEM = (struct lance_memory *)memaddr;
+ IO = lp->iobase = (struct lance_ioreg *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+ lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
+
+ REGA( CSR0 ) = CSR0_STOP;
+
+ /* Now test for type: If the eeprom I/O port is readable, it is a
+ * PAM card */
+ if (addr_accessible( &(IO->eeprom), 0, 0 )) {
+ /* Switch back to Ram */
+ i = IO->mem;
+ lp->cardtype = PAM_CARD;
+ }
+ else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
+ lp->cardtype = NEW_RIEBL;
+ }
+ else
+ lp->cardtype = OLD_RIEBL;
+
+ if (lp->cardtype == PAM_CARD ||
+ memaddr == (unsigned short *)0xffe00000) {
+ /* PAMs card and Riebl on ST use level 5 autovector */
+ if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
+ "PAM,Riebl-ST Ethernet", dev)) {
+ printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
+ return 0;
+ }
+ dev->irq = IRQ_AUTO_5;
+ }
+ else {
+ /* For VME-RieblCards, request a free VME int */
+ unsigned int irq = atari_register_vme_int();
+ if (!irq) {
+ printk( "Lance: request for VME interrupt failed\n" );
+ return 0;
+ }
+ if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
+ dev)) {
+ printk( "Lance: request for irq %u failed\n", irq );
+ return 0;
+ }
+ dev->irq = irq;
+ }
+
+ printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
+ dev->name, lance_names[lp->cardtype],
+ (unsigned long)ioaddr,
+ (unsigned long)memaddr,
+ dev->irq,
+ init_rec->slow_flag ? " (slow memcpy)" : "" );
+
+ /* Get the ethernet address */
+ switch( lp->cardtype ) {
+ case OLD_RIEBL:
+ /* No ethernet address! (Set some default address) */
+ memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
+ break;
+ case NEW_RIEBL:
+ lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
+ break;
+ case PAM_CARD:
+ i = IO->eeprom;
+ for( i = 0; i < 6; ++i )
+ dev->dev_addr[i] =
+ ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
+ ((((unsigned short *)MEM)[i*2+1] & 0x0f));
+ i = IO->mem;
+ break;
+ }
+ printk("%pM\n", dev->dev_addr);
+ if (lp->cardtype == OLD_RIEBL) {
+ printk( "%s: Warning: This is a default ethernet address!\n",
+ dev->name );
+ printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
+ }
+
+ spin_lock_init(&lp->devlock);
+
+ MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
+ MEM->init.rx_ring.adr_hi = 0;
+ MEM->init.rx_ring.len = RX_RING_LEN_BITS;
+ MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
+ MEM->init.tx_ring.adr_hi = 0;
+ MEM->init.tx_ring.len = TX_RING_LEN_BITS;
+
+ if (lp->cardtype == PAM_CARD)
+ IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
+ else
+ *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
+
+ if (did_version++ == 0)
+ DPRINTK( 1, ( version ));
+
+ dev->netdev_ops = &lance_netdev_ops;
+
+ /* XXX MSch */
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 1;
+}
+
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ lance_init_ring(dev);
+ /* Re-initialize the LANCE, and start it when done. */
+
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ REGA( CSR2 ) = 0;
+ REGA( CSR1 ) = 0;
+ REGA( CSR0 ) = CSR0_INIT;
+ /* From now on, AREG is kept to point to CSR0 */
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i <= 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return -EIO;
+ }
+ DREG = CSR0_IDON;
+ DREG = CSR0_STRT;
+ DREG = CSR0_INEA;
+
+ netif_start_queue (dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return 0;
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+ unsigned offset;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_tx = 0;
+
+ offset = offsetof( struct lance_memory, packet_area );
+
+/* If the packet buffer at offset 'o' would conflict with the reserved area
+ * of RieblCards, advance it */
+#define CHECK_OFFSET(o) \
+ do { \
+ if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
+ if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
+ : (o) < RIEBL_RSVD_END) \
+ (o) = RIEBL_RSVD_END; \
+ } \
+ } while(0)
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->tx_head[i].base = offset;
+ MEM->tx_head[i].flag = TMD1_OWN_HOST;
+ MEM->tx_head[i].base_hi = 0;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ offset += PKT_BUF_SZ;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ CHECK_OFFSET(offset);
+ MEM->rx_head[i].base = offset;
+ MEM->rx_head[i].flag = TMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi = 0;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
+ MEM->rx_head[i].msg_length = 0;
+ offset += PKT_BUF_SZ;
+ }
+}
+
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+
+static void lance_tx_timeout (struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ AREG = CSR0;
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+ dev->stats.tx_errors++;
+#ifndef final_version
+ { int i;
+ DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
+ lp->dirty_tx, lp->cur_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->cur_rx ));
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length ));
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc ));
+ }
+#endif
+ /* XXX MSch: maybe purge/reinit ring here */
+ /* lance_restart, essentially */
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
+
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ len = skb->len;
+ if (len < ETH_ZLEN)
+ len = ETH_ZLEN;
+ /* PAM-Card has a bug: Can only send packets with even number of bytes! */
+ else if (lp->cardtype == PAM_CARD && (len & 1))
+ ++len;
+
+ if (len > skb->len) {
+ if (skb_padto(skb, len))
+ return NETDEV_TX_OK;
+ }
+
+ netif_stop_queue (dev);
+
+ /* Fill in a Tx ring entry */
+ if (lance_debug >= 3) {
+ printk( "%s: TX pkt type 0x%04x from %pM to %pM"
+ " data at 0x%08x len %d\n",
+ dev->name, ((u_short *)skb->data)[6],
+ &skb->data[6], skb->data,
+ (int)skb->data, (int)skb->len );
+ }
+
+ /* We're not prepared for the int until the last flags are set/reset. And
+ * the int may happen already after setting the OWN_CHIP... */
+ spin_lock_irqsave (&lp->devlock, flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+
+ head->length = -len;
+ head->misc = 0;
+ lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ dev->stats.tx_bytes += skb->len;
+ dev_kfree_skb( skb );
+ lp->cur_tx++;
+ while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
+ lp->cur_tx -= TX_RING_SIZE;
+ lp->dirty_tx -= TX_RING_SIZE;
+ }
+
+ /* Trigger an immediate send poll. */
+ DREG = CSR0_INEA | CSR0_TDMD;
+
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue (dev);
+ else
+ lp->tx_full = 1;
+ spin_unlock_irqrestore (&lp->devlock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id )
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ struct lance_ioreg *IO;
+ int csr0, boguscnt = 10;
+ int handled = 0;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
+ return IRQ_NONE;
+ }
+
+ lp = netdev_priv(dev);
+ IO = lp->iobase;
+ spin_lock (&lp->devlock);
+
+ AREG = CSR0;
+
+ while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
+ --boguscnt >= 0) {
+ handled = 1;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
+ CSR0_TDMD | CSR0_INEA);
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while( dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = MEM->tx_head[entry].flag;
+
+ if (status & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ MEM->tx_head[entry].flag = 0;
+
+ if (status & TMD1_ERR) {
+ /* There was an major error, log it. */
+ int err_status = MEM->tx_head[entry].misc;
+ dev->stats.tx_errors++;
+ if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
+ if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
+ if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++;
+ if (err_status & TMD3_UFLO) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
+ dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ } else {
+ if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* XXX MSch: free skb?? */
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ DPRINTK( 0, ( "out-of-sync dirty pointer,"
+ " %d vs. %d, full=%ld.\n",
+ dirty_tx, lp->cur_tx, lp->tx_full ));
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ if (lp->tx_full && (netif_queue_stopped(dev)) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue (dev);
+ }
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ DREG = CSR0_STRT;
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+ CSR0_IDON | CSR0_INEA;
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_RETVAL(handled);
+}
+
+
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
+ MEM->rx_head[entry].flag ));
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
+ if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+ short pkt_len = head->msg_length & 0xfff;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ dev->stats.rx_errors++;
+ }
+ else {
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skb == NULL) {
+ for( i = 0; i < RX_RING_SIZE; i++ )
+ if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
+ RMD1_OWN_CHIP)
+ break;
+
+ if (i > RX_RING_SIZE - 2) {
+ dev->stats.rx_dropped++;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->cur_rx++;
+ }
+ break;
+ }
+
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+
+ printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
+ "data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d\n",
+ dev->name, ((u_short *)data)[6],
+ &data[6], data,
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len);
+ }
+
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+
+ head->flag |= RMD1_OWN_CHIP;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+ lp->cur_rx &= RX_RING_MOD_MASK;
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ netif_stop_queue (dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+
+ return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_ioreg *IO = lp->iobase;
+
+ if (netif_running(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+/* This is needed for old RieblCards and possible for new RieblCards */
+
+static int lance_set_mac_address( struct net_device *dev, void *addr )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct sockaddr *saddr = addr;
+ int i;
+
+ if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
+ return -EOPNOTSUPP;
+
+ if (netif_running(dev)) {
+ /* Only possible while card isn't started */
+ DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
+ dev->name ));
+ return -EIO;
+ }
+
+ memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ for( i = 0; i < 6; i++ )
+ MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
+ lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
+ /* set also the magic for future sessions */
+ *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
+
+ return 0;
+}
+
+
+#ifdef MODULE
+static struct net_device *atarilance_dev;
+
+static int __init atarilance_module_init(void)
+{
+ atarilance_dev = atarilance_probe(-1);
+ return PTR_ERR_OR_ZERO(atarilance_dev);
+}
+
+static void __exit atarilance_module_exit(void)
+{
+ unregister_netdev(atarilance_dev);
+ free_irq(atarilance_dev->irq, atarilance_dev);
+ free_netdev(atarilance_dev);
+}
+module_init(atarilance_module_init);
+module_exit(atarilance_module_exit);
+#endif /* MODULE */
+
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 4
+ * End:
+ */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
new file mode 100644
index 000000000..cb367cc59
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -0,0 +1,1469 @@
+/*
+ *
+ * Alchemy Au1x00 ethernet driver
+ *
+ * Copyright 2001-2003, 2006 MontaVista Software Inc.
+ * Copyright 2002 TimeSys Corp.
+ * Added ethtool/mii-tool support,
+ * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
+ * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
+ * or riemer@riemer-nt.de: fixed the link beat detection with
+ * ioctls (SIOCGMIIPHY)
+ * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ * converted to use linux-2.6.x's PHY framework
+ *
+ * Author: MontaVista Software, Inc.
+ * ppopov@mvista.com or source@mvista.com
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * ########################################################################
+ *
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/capability.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/cpu.h>
+#include <linux/io.h>
+
+#include <asm/mipsregs.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+
+#include <au1000.h>
+#include <au1xxx_eth.h>
+#include <prom.h>
+
+#include "au1000_eth.h"
+
+#ifdef AU1000_ETH_DEBUG
+static int au1000_debug = 5;
+#else
+static int au1000_debug = 3;
+#endif
+
+#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+#define DRV_NAME "au1000_eth"
+#define DRV_VERSION "1.7"
+#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
+#define DRV_DESC "Au1xxx on-chip Ethernet driver"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* AU1000 MAC registers and bits */
+#define MAC_CONTROL 0x0
+# define MAC_RX_ENABLE (1 << 2)
+# define MAC_TX_ENABLE (1 << 3)
+# define MAC_DEF_CHECK (1 << 5)
+# define MAC_SET_BL(X) (((X) & 0x3) << 6)
+# define MAC_AUTO_PAD (1 << 8)
+# define MAC_DISABLE_RETRY (1 << 10)
+# define MAC_DISABLE_BCAST (1 << 11)
+# define MAC_LATE_COL (1 << 12)
+# define MAC_HASH_MODE (1 << 13)
+# define MAC_HASH_ONLY (1 << 15)
+# define MAC_PASS_ALL (1 << 16)
+# define MAC_INVERSE_FILTER (1 << 17)
+# define MAC_PROMISCUOUS (1 << 18)
+# define MAC_PASS_ALL_MULTI (1 << 19)
+# define MAC_FULL_DUPLEX (1 << 20)
+# define MAC_NORMAL_MODE 0
+# define MAC_INT_LOOPBACK (1 << 21)
+# define MAC_EXT_LOOPBACK (1 << 22)
+# define MAC_DISABLE_RX_OWN (1 << 23)
+# define MAC_BIG_ENDIAN (1 << 30)
+# define MAC_RX_ALL (1 << 31)
+#define MAC_ADDRESS_HIGH 0x4
+#define MAC_ADDRESS_LOW 0x8
+#define MAC_MCAST_HIGH 0xC
+#define MAC_MCAST_LOW 0x10
+#define MAC_MII_CNTRL 0x14
+# define MAC_MII_BUSY (1 << 0)
+# define MAC_MII_READ 0
+# define MAC_MII_WRITE (1 << 1)
+# define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
+# define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
+#define MAC_MII_DATA 0x18
+#define MAC_FLOW_CNTRL 0x1C
+# define MAC_FLOW_CNTRL_BUSY (1 << 0)
+# define MAC_FLOW_CNTRL_ENABLE (1 << 1)
+# define MAC_PASS_CONTROL (1 << 2)
+# define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16)
+#define MAC_VLAN1_TAG 0x20
+#define MAC_VLAN2_TAG 0x24
+
+/* Ethernet Controller Enable */
+# define MAC_EN_CLOCK_ENABLE (1 << 0)
+# define MAC_EN_RESET0 (1 << 1)
+# define MAC_EN_TOSS (0 << 2)
+# define MAC_EN_CACHEABLE (1 << 3)
+# define MAC_EN_RESET1 (1 << 4)
+# define MAC_EN_RESET2 (1 << 5)
+# define MAC_DMA_RESET (1 << 6)
+
+/* Ethernet Controller DMA Channels */
+/* offsets from MAC_TX_RING_ADDR address */
+#define MAC_TX_BUFF0_STATUS 0x0
+# define TX_FRAME_ABORTED (1 << 0)
+# define TX_JAB_TIMEOUT (1 << 1)
+# define TX_NO_CARRIER (1 << 2)
+# define TX_LOSS_CARRIER (1 << 3)
+# define TX_EXC_DEF (1 << 4)
+# define TX_LATE_COLL_ABORT (1 << 5)
+# define TX_EXC_COLL (1 << 6)
+# define TX_UNDERRUN (1 << 7)
+# define TX_DEFERRED (1 << 8)
+# define TX_LATE_COLL (1 << 9)
+# define TX_COLL_CNT_MASK (0xF << 10)
+# define TX_PKT_RETRY (1 << 31)
+#define MAC_TX_BUFF0_ADDR 0x4
+# define TX_DMA_ENABLE (1 << 0)
+# define TX_T_DONE (1 << 1)
+# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
+#define MAC_TX_BUFF0_LEN 0x8
+#define MAC_TX_BUFF1_STATUS 0x10
+#define MAC_TX_BUFF1_ADDR 0x14
+#define MAC_TX_BUFF1_LEN 0x18
+#define MAC_TX_BUFF2_STATUS 0x20
+#define MAC_TX_BUFF2_ADDR 0x24
+#define MAC_TX_BUFF2_LEN 0x28
+#define MAC_TX_BUFF3_STATUS 0x30
+#define MAC_TX_BUFF3_ADDR 0x34
+#define MAC_TX_BUFF3_LEN 0x38
+
+/* offsets from MAC_RX_RING_ADDR */
+#define MAC_RX_BUFF0_STATUS 0x0
+# define RX_FRAME_LEN_MASK 0x3fff
+# define RX_WDOG_TIMER (1 << 14)
+# define RX_RUNT (1 << 15)
+# define RX_OVERLEN (1 << 16)
+# define RX_COLL (1 << 17)
+# define RX_ETHER (1 << 18)
+# define RX_MII_ERROR (1 << 19)
+# define RX_DRIBBLING (1 << 20)
+# define RX_CRC_ERROR (1 << 21)
+# define RX_VLAN1 (1 << 22)
+# define RX_VLAN2 (1 << 23)
+# define RX_LEN_ERROR (1 << 24)
+# define RX_CNTRL_FRAME (1 << 25)
+# define RX_U_CNTRL_FRAME (1 << 26)
+# define RX_MCAST_FRAME (1 << 27)
+# define RX_BCAST_FRAME (1 << 28)
+# define RX_FILTER_FAIL (1 << 29)
+# define RX_PACKET_FILTER (1 << 30)
+# define RX_MISSED_FRAME (1 << 31)
+
+# define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \
+ RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
+ RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
+#define MAC_RX_BUFF0_ADDR 0x4
+# define RX_DMA_ENABLE (1 << 0)
+# define RX_T_DONE (1 << 1)
+# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
+# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
+#define MAC_RX_BUFF1_STATUS 0x10
+#define MAC_RX_BUFF1_ADDR 0x14
+#define MAC_RX_BUFF2_STATUS 0x20
+#define MAC_RX_BUFF2_ADDR 0x24
+#define MAC_RX_BUFF3_STATUS 0x30
+#define MAC_RX_BUFF3_ADDR 0x34
+
+/*
+ * Theory of operation
+ *
+ * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
+ * There are four receive and four transmit descriptors. These
+ * descriptors are not in memory; rather, they are just a set of
+ * hardware registers.
+ *
+ * Since the Au1000 has a coherent data cache, the receive and
+ * transmit buffers are allocated from the KSEG0 segment. The
+ * hardware registers, however, are still mapped at KSEG1 to
+ * make sure there's no out-of-order writes, and that all writes
+ * complete immediately.
+ */
+
+/*
+ * board-specific configurations
+ *
+ * PHY detection algorithm
+ *
+ * If phy_static_config is undefined, the PHY setup is
+ * autodetected:
+ *
+ * mii_probe() first searches the current MAC's MII bus for a PHY,
+ * selecting the first (or last, if phy_search_highest_addr is
+ * defined) PHY address not already claimed by another netdev.
+ *
+ * If nothing was found that way when searching for the 2nd ethernet
+ * controller's PHY and phy1_search_mac0 is defined, then
+ * the first MII bus is searched as well for an unclaimed PHY; this is
+ * needed in case of a dual-PHY accessible only through the MAC0's MII
+ * bus.
+ *
+ * Finally, if no PHY is found, then the corresponding ethernet
+ * controller is not registered to the network subsystem.
+ */
+
+/* autodetection defaults: phy1_search_mac0 */
+
+/* static PHY setup
+ *
+ * most boards PHY setup should be detectable properly with the
+ * autodetection algorithm in mii_probe(), but in some cases (e.g. if
+ * you have a switch attached, or want to use the PHY's interrupt
+ * notification capabilities) you can provide a static PHY
+ * configuration here
+ *
+ * IRQs may only be set, if a PHY address was configured
+ * If a PHY address is given, also a bus id is required to be set
+ *
+ * ps: make sure the used irqs are configured properly in the board
+ * specific irq-map
+ */
+
+static void au1000_enable_mac(struct net_device *dev, int force_reset)
+{
+ unsigned long flags;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (force_reset || (!aup->mac_enabled)) {
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+ writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+
+ aup->mac_enabled = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+/*
+ * MII operations
+ */
+static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "read_MII busy timeout!!\n");
+ return -1;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
+
+ writel(mii_control, mii_control_reg);
+
+ timedout = 20;
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "mdio_read busy timeout!!\n");
+ return -1;
+ }
+ }
+ return readl(mii_data_reg);
+}
+
+static void au1000_mdio_write(struct net_device *dev, int phy_addr,
+ int reg, u16 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 *const mii_control_reg = &aup->mac->mii_control;
+ u32 *const mii_data_reg = &aup->mac->mii_data;
+ u32 timedout = 20;
+ u32 mii_control;
+
+ while (readl(mii_control_reg) & MAC_MII_BUSY) {
+ mdelay(1);
+ if (--timedout == 0) {
+ netdev_err(dev, "mdio_write busy timeout!!\n");
+ return;
+ }
+ }
+
+ mii_control = MAC_SET_MII_SELECT_REG(reg) |
+ MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
+
+ writel(value, mii_data_reg);
+ writel(mii_control, mii_control_reg);
+}
+
+static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
+ * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
+ */
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ return au1000_mdio_read(dev, phy_addr, regnum);
+}
+
+static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
+ u16 value)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ au1000_mdio_write(dev, phy_addr, regnum, value);
+ return 0;
+}
+
+static int au1000_mdiobus_reset(struct mii_bus *bus)
+{
+ struct net_device *const dev = bus->priv;
+
+ /* make sure the MAC associated with this
+ * mii_bus is enabled
+ */
+ au1000_enable_mac(dev, 0);
+
+ return 0;
+}
+
+static void au1000_hard_stop(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, drv, dev, "hard stop\n");
+
+ reg = readl(&aup->mac->control);
+ reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
+ wmb(); /* drain writebuffer */
+ mdelay(10);
+}
+
+static void au1000_enable_rx_tx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, hw, dev, "enable_rx_tx\n");
+
+ reg = readl(&aup->mac->control);
+ reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
+ writel(reg, &aup->mac->control);
+ wmb(); /* drain writebuffer */
+ mdelay(10);
+}
+
+static void
+au1000_adjust_link(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct phy_device *phydev = aup->phy_dev;
+ unsigned long flags;
+ u32 reg;
+
+ int status_change = 0;
+
+ BUG_ON(!aup->phy_dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ if (phydev->link && (aup->old_speed != phydev->speed)) {
+ /* speed changed */
+
+ switch (phydev->speed) {
+ case SPEED_10:
+ case SPEED_100:
+ break;
+ default:
+ netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
+ phydev->speed);
+ break;
+ }
+
+ aup->old_speed = phydev->speed;
+
+ status_change = 1;
+ }
+
+ if (phydev->link && (aup->old_duplex != phydev->duplex)) {
+ /* duplex mode changed */
+
+ /* switching duplex mode requires to disable rx and tx! */
+ au1000_hard_stop(dev);
+
+ reg = readl(&aup->mac->control);
+ if (DUPLEX_FULL == phydev->duplex) {
+ reg |= MAC_FULL_DUPLEX;
+ reg &= ~MAC_DISABLE_RX_OWN;
+ } else {
+ reg &= ~MAC_FULL_DUPLEX;
+ reg |= MAC_DISABLE_RX_OWN;
+ }
+ writel(reg, &aup->mac->control);
+ wmb(); /* drain writebuffer */
+ mdelay(1);
+
+ au1000_enable_rx_tx(dev);
+ aup->old_duplex = phydev->duplex;
+
+ status_change = 1;
+ }
+
+ if (phydev->link != aup->old_link) {
+ /* link state changed */
+
+ if (!phydev->link) {
+ /* link went down */
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ }
+
+ aup->old_link = phydev->link;
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ netdev_info(dev, "link down\n");
+ }
+}
+
+static int au1000_mii_probe(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
+ int phy_addr;
+
+ if (aup->phy_static_config) {
+ BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
+
+ if (aup->phy_addr)
+ phydev = aup->mii_bus->phy_map[aup->phy_addr];
+ else
+ netdev_info(dev, "using PHY-less setup\n");
+ return 0;
+ }
+
+ /* find the first (lowest address) PHY
+ * on the current MAC's MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus->phy_map[phy_addr]) {
+ phydev = aup->mii_bus->phy_map[phy_addr];
+ if (!aup->phy_search_highest_addr)
+ /* break out with first one found */
+ break;
+ }
+
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ dev_info(&dev->dev, ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ /* find the first (lowest address) non-attached
+ * PHY on the MAC0 MII bus
+ */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ struct phy_device *const tmp_phydev =
+ aup->mii_bus->phy_map[phy_addr];
+
+ if (aup->mac_id == 1)
+ break;
+
+ /* no PHY here... */
+ if (!tmp_phydev)
+ continue;
+
+ /* already claimed by MAC0 */
+ if (tmp_phydev->attached_dev)
+ continue;
+
+ phydev = tmp_phydev;
+ break; /* found it */
+ }
+ }
+ }
+
+ if (!phydev) {
+ netdev_err(dev, "no PHY found\n");
+ return -1;
+ }
+
+ /* now we are supposed to have a proper phydev, to attach to... */
+ BUG_ON(phydev->attached_dev);
+
+ phydev = phy_connect(dev, dev_name(&phydev->dev),
+ &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ netdev_err(dev, "Could not attach to PHY\n");
+ return PTR_ERR(phydev);
+ }
+
+ /* mask with MAC supported features */
+ phydev->supported &= (SUPPORTED_10baseT_Half
+ | SUPPORTED_10baseT_Full
+ | SUPPORTED_100baseT_Half
+ | SUPPORTED_100baseT_Full
+ | SUPPORTED_Autoneg
+ /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
+ | SUPPORTED_MII
+ | SUPPORTED_TP);
+
+ phydev->advertising = phydev->supported;
+
+ aup->old_link = 0;
+ aup->old_speed = 0;
+ aup->old_duplex = -1;
+ aup->phy_dev = phydev;
+
+ netdev_info(dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+ return 0;
+}
+
+
+/*
+ * Buffer allocation/deallocation routines. The buffer descriptor returned
+ * has the virtual and dma address of a buffer suitable for
+ * both, receive and transmit operations.
+ */
+static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
+{
+ struct db_dest *pDB;
+ pDB = aup->pDBfree;
+
+ if (pDB)
+ aup->pDBfree = pDB->pnext;
+
+ return pDB;
+}
+
+void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
+{
+ struct db_dest *pDBfree = aup->pDBfree;
+ if (pDBfree)
+ pDBfree->pnext = pDB;
+ aup->pDBfree = pDB;
+}
+
+static void au1000_reset_mac_unlocked(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ int i;
+
+ au1000_hard_stop(dev);
+
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+ writel(0, aup->enable);
+ wmb(); /* drain writebuffer */
+ mdelay(2);
+
+ aup->tx_full = 0;
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ /* reset control bits */
+ aup->rx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ /* reset control bits */
+ aup->tx_dma_ring[i]->buff_stat &= ~0xf;
+ }
+
+ aup->mac_enabled = 0;
+
+}
+
+static void au1000_reset_mac(struct net_device *dev)
+{
+ struct au1000_private *const aup = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
+ (unsigned)aup);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ au1000_reset_mac_unlocked(dev);
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+}
+
+/*
+ * Setup the receive and transmit "rings". These pointers are the addresses
+ * of the rx and tx MAC DMA registers so they are fixed by the hardware --
+ * these are not descriptors sitting in memory.
+ */
+static void
+au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
+{
+ int i;
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ aup->rx_dma_ring[i] = (struct rx_dma *)
+ (tx_base + 0x100 + sizeof(struct rx_dma) * i);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ aup->tx_dma_ring[i] = (struct tx_dma *)
+ (tx_base + sizeof(struct tx_dma) * i);
+ }
+}
+
+/*
+ * ethtool operations
+ */
+
+static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (aup->phy_dev)
+ return phy_ethtool_gset(aup->phy_dev, cmd);
+
+ return -EINVAL;
+}
+
+static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (aup->phy_dev)
+ return phy_ethtool_sset(aup->phy_dev, cmd);
+
+ return -EINVAL;
+}
+
+static void
+au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
+ aup->mac_id);
+ info->regdump_len = 0;
+}
+
+static void au1000_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ aup->msg_enable = value;
+}
+
+static u32 au1000_get_msglevel(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ return aup->msg_enable;
+}
+
+static const struct ethtool_ops au1000_ethtool_ops = {
+ .get_settings = au1000_get_settings,
+ .set_settings = au1000_set_settings,
+ .get_drvinfo = au1000_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = au1000_get_msglevel,
+ .set_msglevel = au1000_set_msglevel,
+};
+
+
+/*
+ * Initialize the interface.
+ *
+ * When the device powers up, the clocks are disabled and the
+ * mac is in reset state. When the interface is closed, we
+ * do the same -- reset the device and disable the clocks to
+ * conserve power. Thus, whenever au1000_init() is called,
+ * the device should already be in reset state.
+ */
+static int au1000_init(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ unsigned long flags;
+ int i;
+ u32 control;
+
+ netif_dbg(aup, hw, dev, "au1000_init\n");
+
+ /* bring the device out of reset */
+ au1000_enable_mac(dev, 1);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ writel(0, &aup->mac->control);
+ aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
+ aup->tx_tail = aup->tx_head;
+ aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
+
+ writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
+ &aup->mac->mac_addr_high);
+ writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
+ dev->dev_addr[1]<<8 | dev->dev_addr[0],
+ &aup->mac->mac_addr_low);
+
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
+
+ wmb(); /* drain writebuffer */
+
+ control = MAC_RX_ENABLE | MAC_TX_ENABLE;
+#ifndef CONFIG_CPU_LITTLE_ENDIAN
+ control |= MAC_BIG_ENDIAN;
+#endif
+ if (aup->phy_dev) {
+ if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
+ control |= MAC_FULL_DUPLEX;
+ else
+ control |= MAC_DISABLE_RX_OWN;
+ } else { /* PHY-less op, assume full-duplex */
+ control |= MAC_FULL_DUPLEX;
+ }
+
+ writel(control, &aup->mac->control);
+ writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
+ wmb(); /* drain writebuffer */
+
+ spin_unlock_irqrestore(&aup->lock, flags);
+ return 0;
+}
+
+static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+ if (status & RX_MCAST_FRAME)
+ ps->multicast++;
+
+ if (status & RX_ERROR) {
+ ps->rx_errors++;
+ if (status & RX_MISSED_FRAME)
+ ps->rx_missed_errors++;
+ if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
+ ps->rx_length_errors++;
+ if (status & RX_CRC_ERROR)
+ ps->rx_crc_errors++;
+ if (status & RX_COLL)
+ ps->collisions++;
+ } else
+ ps->rx_bytes += status & RX_FRAME_LEN_MASK;
+
+}
+
+/*
+ * Au1000 receive routine.
+ */
+static int au1000_rx(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct rx_dma *prxd;
+ u32 buff_stat, status;
+ struct db_dest *pDB;
+ u32 frmlen;
+
+ netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
+
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ while (buff_stat & RX_T_DONE) {
+ status = prxd->status;
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ au1000_update_rx_stats(dev, status);
+ if (!(status & RX_ERROR)) {
+
+ /* good frame */
+ frmlen = (status & RX_FRAME_LEN_MASK);
+ frmlen -= 4; /* Remove FCS */
+ skb = netdev_alloc_skb(dev, frmlen + 2);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 2); /* 16 byte IP header align */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)pDB->vaddr, frmlen);
+ skb_put(skb, frmlen);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb); /* pass the packet to upper layers */
+ } else {
+ if (au1000_debug > 4) {
+ pr_err("rx_error(s):");
+ if (status & RX_MISSED_FRAME)
+ pr_cont(" miss");
+ if (status & RX_WDOG_TIMER)
+ pr_cont(" wdog");
+ if (status & RX_RUNT)
+ pr_cont(" runt");
+ if (status & RX_OVERLEN)
+ pr_cont(" overlen");
+ if (status & RX_COLL)
+ pr_cont(" coll");
+ if (status & RX_MII_ERROR)
+ pr_cont(" mii error");
+ if (status & RX_CRC_ERROR)
+ pr_cont(" crc error");
+ if (status & RX_LEN_ERROR)
+ pr_cont(" len error");
+ if (status & RX_U_CNTRL_FRAME)
+ pr_cont(" u control frame");
+ pr_cont("\n");
+ }
+ }
+ prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
+ aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
+ wmb(); /* drain writebuffer */
+
+ /* next descriptor */
+ prxd = aup->rx_dma_ring[aup->rx_head];
+ buff_stat = prxd->buff_stat;
+ }
+ return 0;
+}
+
+static void au1000_update_tx_stats(struct net_device *dev, u32 status)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &dev->stats;
+
+ if (status & TX_FRAME_ABORTED) {
+ if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
+ if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
+ /* any other tx errors are only valid
+ * in half duplex mode
+ */
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ }
+ } else {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
+ if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
+ ps->tx_carrier_errors++;
+ }
+ }
+}
+
+/*
+ * Called from the interrupt service routine to acknowledge
+ * the TX DONE bits. This is a must if the irq is setup as
+ * edge triggered.
+ */
+static void au1000_tx_ack(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct tx_dma *ptxd;
+
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ while (ptxd->buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
+ ptxd->buff_stat &= ~TX_T_DONE;
+ ptxd->len = 0;
+ wmb(); /* drain writebuffer */
+
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
+ ptxd = aup->tx_dma_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ }
+}
+
+/*
+ * Au1000 interrupt service routine.
+ */
+static irqreturn_t au1000_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+
+ /* Handle RX interrupts first to minimize chance of overrun */
+
+ au1000_rx(dev);
+ au1000_tx_ack(dev);
+ return IRQ_RETVAL(1);
+}
+
+static int au1000_open(struct net_device *dev)
+{
+ int retval;
+ struct au1000_private *aup = netdev_priv(dev);
+
+ netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
+
+ retval = request_irq(dev->irq, au1000_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
+ return retval;
+ }
+
+ retval = au1000_init(dev);
+ if (retval) {
+ netdev_err(dev, "error in au1000_init\n");
+ free_irq(dev->irq, dev);
+ return retval;
+ }
+
+ if (aup->phy_dev) {
+ /* cause the PHY state machine to schedule a link state check */
+ aup->phy_dev->state = PHY_CHANGELINK;
+ phy_start(aup->phy_dev);
+ }
+
+ netif_start_queue(dev);
+
+ netif_dbg(aup, drv, dev, "open: Initialization done.\n");
+
+ return 0;
+}
+
+static int au1000_close(struct net_device *dev)
+{
+ unsigned long flags;
+ struct au1000_private *const aup = netdev_priv(dev);
+
+ netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
+
+ if (aup->phy_dev)
+ phy_stop(aup->phy_dev);
+
+ spin_lock_irqsave(&aup->lock, flags);
+
+ au1000_reset_mac_unlocked(dev);
+
+ /* stop the device */
+ netif_stop_queue(dev);
+
+ /* disable the interrupt */
+ free_irq(dev->irq, dev);
+ spin_unlock_irqrestore(&aup->lock, flags);
+
+ return 0;
+}
+
+/*
+ * Au1000 transmit routine.
+ */
+static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ struct net_device_stats *ps = &dev->stats;
+ struct tx_dma *ptxd;
+ u32 buff_stat;
+ struct db_dest *pDB;
+ int i;
+
+ netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
+ (unsigned)aup, skb->len,
+ skb->data, aup->tx_head);
+
+ ptxd = aup->tx_dma_ring[aup->tx_head];
+ buff_stat = ptxd->buff_stat;
+ if (buff_stat & TX_DMA_ENABLE) {
+ /* We've wrapped around and the transmitter is still busy */
+ netif_stop_queue(dev);
+ aup->tx_full = 1;
+ return NETDEV_TX_BUSY;
+ } else if (buff_stat & TX_T_DONE) {
+ au1000_update_tx_stats(dev, ptxd->status);
+ ptxd->len = 0;
+ }
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+
+ pDB = aup->tx_db_inuse[aup->tx_head];
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
+ if (skb->len < ETH_ZLEN) {
+ for (i = skb->len; i < ETH_ZLEN; i++)
+ ((char *)pDB->vaddr)[i] = 0;
+
+ ptxd->len = ETH_ZLEN;
+ } else
+ ptxd->len = skb->len;
+
+ ps->tx_packets++;
+ ps->tx_bytes += ptxd->len;
+
+ ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
+ wmb(); /* drain writebuffer */
+ dev_kfree_skb(skb);
+ aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * The Tx ring has been full longer than the watchdog timeout
+ * value. The transmitter must be hung?
+ */
+static void au1000_tx_timeout(struct net_device *dev)
+{
+ netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
+ au1000_reset_mac(dev);
+ au1000_init(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static void au1000_multicast_list(struct net_device *dev)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+ u32 reg;
+
+ netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
+ reg = readl(&aup->mac->control);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ reg |= MAC_PROMISCUOUS;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
+ reg |= MAC_PASS_ALL_MULTI;
+ reg &= ~MAC_PROMISCUOUS;
+ netdev_info(dev, "Pass all multicast\n");
+ } else {
+ struct netdev_hw_addr *ha;
+ u32 mc_filter[2]; /* Multicast hash filter */
+
+ mc_filter[1] = mc_filter[0] = 0;
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
+ (long *)mc_filter);
+ writel(mc_filter[1], &aup->mac->multi_hash_high);
+ writel(mc_filter[0], &aup->mac->multi_hash_low);
+ reg &= ~MAC_PROMISCUOUS;
+ reg |= MAC_HASH_MODE;
+ }
+ writel(reg, &aup->mac->control);
+}
+
+static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct au1000_private *aup = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!aup->phy_dev)
+ return -EINVAL; /* PHY not controllable */
+
+ return phy_mii_ioctl(aup->phy_dev, rq, cmd);
+}
+
+static const struct net_device_ops au1000_netdev_ops = {
+ .ndo_open = au1000_open,
+ .ndo_stop = au1000_close,
+ .ndo_start_xmit = au1000_tx,
+ .ndo_set_rx_mode = au1000_multicast_list,
+ .ndo_do_ioctl = au1000_ioctl,
+ .ndo_tx_timeout = au1000_tx_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int au1000_probe(struct platform_device *pdev)
+{
+ struct au1000_private *aup = NULL;
+ struct au1000_eth_platform_data *pd;
+ struct net_device *dev = NULL;
+ struct db_dest *pDB, *pDBfree;
+ int irq, i, err = 0;
+ struct resource *base, *macen, *macdma;
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base) {
+ dev_err(&pdev->dev, "failed to retrieve base register\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!macen) {
+ dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to retrieve IRQ\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!macdma) {
+ dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (!request_mem_region(base->start, resource_size(base),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request memory region for base registers\n");
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!request_mem_region(macen->start, resource_size(macen),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
+ err = -ENXIO;
+ goto err_request;
+ }
+
+ if (!request_mem_region(macdma->start, resource_size(macdma),
+ pdev->name)) {
+ dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
+ err = -ENXIO;
+ goto err_macdma;
+ }
+
+ dev = alloc_etherdev(sizeof(struct au1000_private));
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ aup = netdev_priv(dev);
+
+ spin_lock_init(&aup->lock);
+ aup->msg_enable = (au1000_debug < 4 ?
+ AU1000_DEF_MSG_ENABLE : au1000_debug);
+
+ /* Allocate the data buffers
+ * Snooping works fine with eth on all au1xxx
+ */
+ aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ &aup->dma_addr, 0);
+ if (!aup->vaddr) {
+ dev_err(&pdev->dev, "failed to allocate data buffers\n");
+ err = -ENOMEM;
+ goto err_vaddr;
+ }
+
+ /* aup->mac is the base address of the MAC's registers */
+ aup->mac = (struct mac_reg *)
+ ioremap_nocache(base->start, resource_size(base));
+ if (!aup->mac) {
+ dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
+ err = -ENXIO;
+ goto err_remap1;
+ }
+
+ /* Setup some variables for quick register address access */
+ aup->enable = (u32 *)ioremap_nocache(macen->start,
+ resource_size(macen));
+ if (!aup->enable) {
+ dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
+ err = -ENXIO;
+ goto err_remap2;
+ }
+ aup->mac_id = pdev->id;
+
+ aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
+ if (!aup->macdma) {
+ dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
+ err = -ENXIO;
+ goto err_remap3;
+ }
+
+ au1000_setup_hw_rings(aup, aup->macdma);
+
+ writel(0, aup->enable);
+ aup->mac_enabled = 0;
+
+ pd = dev_get_platdata(&pdev->dev);
+ if (!pd) {
+ dev_info(&pdev->dev, "no platform_data passed,"
+ " PHY search on MAC0\n");
+ aup->phy1_search_mac0 = 1;
+ } else {
+ if (is_valid_ether_addr(pd->mac)) {
+ memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ } else {
+ /* Set a random MAC since no valid provided by platform_data. */
+ eth_hw_addr_random(dev);
+ }
+
+ aup->phy_static_config = pd->phy_static_config;
+ aup->phy_search_highest_addr = pd->phy_search_highest_addr;
+ aup->phy1_search_mac0 = pd->phy1_search_mac0;
+ aup->phy_addr = pd->phy_addr;
+ aup->phy_busid = pd->phy_busid;
+ aup->phy_irq = pd->phy_irq;
+ }
+
+ if (aup->phy_busid && aup->phy_busid > 0) {
+ dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
+ err = -ENODEV;
+ goto err_mdiobus_alloc;
+ }
+
+ aup->mii_bus = mdiobus_alloc();
+ if (aup->mii_bus == NULL) {
+ dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
+
+ aup->mii_bus->priv = dev;
+ aup->mii_bus->read = au1000_mdiobus_read;
+ aup->mii_bus->write = au1000_mdiobus_write;
+ aup->mii_bus->reset = au1000_mdiobus_reset;
+ aup->mii_bus->name = "au1000_eth_mii";
+ snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, aup->mac_id);
+ aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (aup->mii_bus->irq == NULL) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ aup->mii_bus->irq[i] = PHY_POLL;
+ /* if known, set corresponding PHY IRQs */
+ if (aup->phy_static_config)
+ if (aup->phy_irq && aup->phy_busid == aup->mac_id)
+ aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
+
+ err = mdiobus_register(aup->mii_bus);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register MDIO bus\n");
+ goto err_mdiobus_reg;
+ }
+
+ err = au1000_mii_probe(dev);
+ if (err != 0)
+ goto err_out;
+
+ pDBfree = NULL;
+ /* setup the data buffer descriptors and attach a buffer to each one */
+ pDB = aup->db;
+ for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ err = -ENODEV;
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ pDB = au1000_GetFreeDB(aup);
+ if (!pDB)
+ goto err_out;
+
+ aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->rx_db_inuse[i] = pDB;
+ }
+
+ err = -ENODEV;
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ pDB = au1000_GetFreeDB(aup);
+ if (!pDB)
+ goto err_out;
+
+ aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
+ aup->tx_dma_ring[i]->len = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ dev->base_addr = base->start;
+ dev->irq = irq;
+ dev->netdev_ops = &au1000_netdev_ops;
+ dev->ethtool_ops = &au1000_ethtool_ops;
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
+ /*
+ * The boot code uses the ethernet controller, so reset it to start
+ * fresh. au1000_init() expects that the device is in reset state.
+ */
+ au1000_reset_mac(dev);
+
+ err = register_netdev(dev);
+ if (err) {
+ netdev_err(dev, "Cannot register net device, aborting.\n");
+ goto err_out;
+ }
+
+ netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
+ (unsigned long)base->start, irq);
+
+ pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ return 0;
+
+err_out:
+ if (aup->mii_bus != NULL)
+ mdiobus_unregister(aup->mii_bus);
+
+ /* here we should have a valid dev plus aup-> register addresses
+ * so we can reset the mac properly.
+ */
+ au1000_reset_mac(dev);
+
+ for (i = 0; i < NUM_RX_DMA; i++) {
+ if (aup->rx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
+ }
+ for (i = 0; i < NUM_TX_DMA; i++) {
+ if (aup->tx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
+ }
+err_mdiobus_reg:
+ mdiobus_free(aup->mii_bus);
+err_mdiobus_alloc:
+ iounmap(aup->macdma);
+err_remap3:
+ iounmap(aup->enable);
+err_remap2:
+ iounmap(aup->mac);
+err_remap1:
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+err_vaddr:
+ free_netdev(dev);
+err_alloc:
+ release_mem_region(macdma->start, resource_size(macdma));
+err_macdma:
+ release_mem_region(macen->start, resource_size(macen));
+err_request:
+ release_mem_region(base->start, resource_size(base));
+out:
+ return err;
+}
+
+static int au1000_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1000_private *aup = netdev_priv(dev);
+ int i;
+ struct resource *base, *macen;
+
+ unregister_netdev(dev);
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ if (aup->rx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
+
+ for (i = 0; i < NUM_TX_DMA; i++)
+ if (aup->tx_db_inuse[i])
+ au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
+
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+
+ iounmap(aup->macdma);
+ iounmap(aup->mac);
+ iounmap(aup->enable);
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ release_mem_region(base->start, resource_size(base));
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(base->start, resource_size(base));
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(macen->start, resource_size(macen));
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1000_eth_driver = {
+ .probe = au1000_probe,
+ .remove = au1000_remove,
+ .driver = {
+ .name = "au1000-eth",
+ },
+};
+
+module_platform_driver(au1000_eth_driver);
+
+MODULE_ALIAS("platform:au1000-eth");
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
new file mode 100644
index 000000000..ca53024f0
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -0,0 +1,133 @@
+/*
+ *
+ * Alchemy Au1x00 ethernet driver include file
+ *
+ * Author: Pete Popov <ppopov@mvista.com>
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * ########################################################################
+ *
+ *
+ */
+
+
+#define MAC_IOSIZE 0x10000
+#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */
+#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */
+
+#define NUM_RX_BUFFS 4
+#define NUM_TX_BUFFS 4
+#define MAX_BUF_SIZE 2048
+
+#define ETH_TX_TIMEOUT (HZ/4)
+#define MAC_MIN_PKT_SIZE 64
+
+#define MULTICAST_FILTER_LIMIT 64
+
+/*
+ * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
+ * boundary for both, receive and transmit.
+ */
+struct db_dest {
+ struct db_dest *pnext;
+ u32 *vaddr;
+ dma_addr_t dma_addr;
+};
+
+/*
+ * The transmit and receive descriptors are memory
+ * mapped registers.
+ */
+struct tx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 len;
+ u32 pad;
+};
+
+struct rx_dma {
+ u32 status;
+ u32 buff_stat;
+ u32 pad[2];
+};
+
+
+/*
+ * MAC control registers, memory mapped.
+ */
+struct mac_reg {
+ u32 control;
+ u32 mac_addr_high;
+ u32 mac_addr_low;
+ u32 multi_hash_high;
+ u32 multi_hash_low;
+ u32 mii_control;
+ u32 mii_data;
+ u32 flow_control;
+ u32 vlan1_tag;
+ u32 vlan2_tag;
+};
+
+
+struct au1000_private {
+ struct db_dest *pDBfree;
+ struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
+ struct rx_dma *rx_dma_ring[NUM_RX_DMA];
+ struct tx_dma *tx_dma_ring[NUM_TX_DMA];
+ struct db_dest *rx_db_inuse[NUM_RX_DMA];
+ struct db_dest *tx_db_inuse[NUM_TX_DMA];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ int mac_id;
+
+ int mac_enabled; /* whether MAC is currently enabled and running
+ * (req. for mdio)
+ */
+
+ int old_link; /* used by au1000_adjust_link */
+ int old_speed;
+ int old_duplex;
+
+ struct phy_device *phy_dev;
+ struct mii_bus *mii_bus;
+
+ /* PHY configuration */
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+
+ /* These variables are just for quick access
+ * to certain regs addresses.
+ */
+ struct mac_reg *mac; /* mac registers */
+ u32 *enable; /* address of MAC Enable Register */
+ void __iomem *macdma; /* base of MAC DMA port */
+ u32 vaddr; /* virtual address of rx/tx buffers */
+ dma_addr_t dma_addr; /* dma address of rx/tx buffers */
+
+ spinlock_t lock; /* Serialise access to device */
+
+ u32 msg_enable;
+};
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
new file mode 100644
index 000000000..b584b7823
--- /dev/null
+++ b/drivers/net/ethernet/amd/declance.c
@@ -0,0 +1,1376 @@
+/*
+ * Lance ethernet driver for the MIPS processor based
+ * DECstation family
+ *
+ *
+ * adopted from sunlance.c by Richard van den Berg
+ *
+ * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
+ *
+ * additional sources:
+ * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
+ * Revision 1.2
+ *
+ * History:
+ *
+ * v0.001: The kernel accepts the code and it shows the hardware address.
+ *
+ * v0.002: Removed most sparc stuff, left only some module and dma stuff.
+ *
+ * v0.003: Enhanced base address calculation from proposals by
+ * Harald Koerfgen and Thomas Riemer.
+ *
+ * v0.004: lance-regs is pointing at the right addresses, added prom
+ * check. First start of address mapping and DMA.
+ *
+ * v0.005: started to play around with LANCE-DMA. This driver will not
+ * work for non IOASIC lances. HK
+ *
+ * v0.006: added pointer arrays to lance_private and setup routine for
+ * them in dec_lance_init. HK
+ *
+ * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
+ * access the init block. This looks like one (short) word at a
+ * time, but the smallest amount the IOASIC can transfer is a
+ * (long) word. So we have a 2-2 padding here. Changed
+ * lance_init_block accordingly. The 16-16 padding for the buffers
+ * seems to be correct. HK
+ *
+ * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
+ *
+ * v0.009: Module support fixes, multiple interfaces support, various
+ * bits. macro
+ *
+ * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
+ * PMAX requirement to only use halfword accesses to the
+ * buffer. macro
+ *
+ * v0.011: Converted the PMAD to the driver model. macro
+ */
+
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/spinlock.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/tc.h>
+#include <linux/types.h>
+
+#include <asm/addrspace.h>
+
+#include <asm/dec/interrupts.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+#include <asm/dec/kn01.h>
+#include <asm/dec/machtype.h>
+#include <asm/dec/system.h>
+
+static char version[] =
+"declance.c: v0.011 by Linux MIPS DECstation task force\n";
+
+MODULE_AUTHOR("Linux MIPS DECstation task force");
+MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
+MODULE_LICENSE("GPL");
+
+#define __unused __attribute__ ((unused))
+
+/*
+ * card types
+ */
+#define ASIC_LANCE 1
+#define PMAD_LANCE 2
+#define PMAX_LANCE 3
+
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x8000 /* Who owns the entry */
+#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x2000 /* FRA: Frame error */
+#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x0800 /* CRC error */
+#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
+#define LE_R1_SOP 0x0200 /* Start of packet */
+#define LE_R1_EOP 0x0100 /* End of packet */
+#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
+
+/* Transmit message descriptor 1 */
+#define LE_T1_OWN 0x8000 /* Lance owns the packet */
+#define LE_T1_ERR 0x4000 /* Error summary */
+#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x0800 /* Error: one retry needed */
+#define LE_T1_EDEF 0x0400 /* Error: deferred */
+#define LE_T1_SOP 0x0200 /* Start of packet */
+#define LE_T1_EOP 0x0100 /* End of packet */
+#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+#define PKT_BUF_SZ 1536
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+#undef TEST_HITS
+#define ZERO 0
+
+/*
+ * The DS2100/3100 have a linear 64 kB buffer which supports halfword
+ * accesses only. Each halfword of the buffer is word-aligned in the
+ * CPU address space.
+ *
+ * The PMAD-AA has a 128 kB buffer on-board.
+ *
+ * The IOASIC LANCE devices use a shared memory region. This region
+ * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
+ * boundary. The LANCE sees this as a 64 kB long continuous memory
+ * region.
+ *
+ * The LANCE's DMA address is used as an index in this buffer and DMA
+ * takes place in bursts of eight 16-bit words which are packed into
+ * four 32-bit words by the IOASIC. This leads to a strange padding:
+ * 16 bytes of valid data followed by a 16 byte gap :-(.
+ */
+
+struct lance_rx_desc {
+ unsigned short rmd0; /* low address of packet */
+ unsigned short rmd1; /* high address of packet
+ and descriptor bits */
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ unsigned short mblength; /* actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ unsigned short tmd0; /* low address of packet */
+ unsigned short tmd1; /* high address of packet
+ and descriptor bits */
+ short length; /* 2s complement (negative!)
+ of buffer length */
+ unsigned short misc;
+};
+
+
+/* First part of the LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* pre-set mode (reg. 15) */
+
+ unsigned short phys_addr[3]; /* physical ethernet address */
+ unsigned short filter[4]; /* multicast filter */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ unsigned short rx_ptr; /* receive descriptor addr */
+ unsigned short rx_len; /* receive len and high addr */
+ unsigned short tx_ptr; /* transmit descriptor addr */
+ unsigned short tx_len; /* transmit len and high addr */
+
+ short gap[4];
+
+ /* The buffer descriptors */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+};
+
+#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
+#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
+
+#define shift_off(off, type) \
+ (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
+
+#define lib_off(rt, type) \
+ shift_off(offsetof(struct lance_init_block, rt), type)
+
+#define lib_ptr(ib, rt, type) \
+ ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
+
+#define rds_off(rt, type) \
+ shift_off(offsetof(struct lance_rx_desc, rt), type)
+
+#define rds_ptr(rd, rt, type) \
+ ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
+
+#define tds_off(rt, type) \
+ shift_off(offsetof(struct lance_tx_desc, rt), type)
+
+#define tds_ptr(td, rt, type) \
+ ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
+
+struct lance_private {
+ struct net_device *next;
+ int type;
+ int dma_irq;
+ volatile struct lance_regs *ll;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ unsigned short busmaster_regval;
+
+ struct timer_list multicast_timer;
+
+ /* Pointers to the ring buffers as seen from the CPU */
+ char *rx_buf_ptr_cpu[RX_RING_SIZE];
+ char *tx_buf_ptr_cpu[TX_RING_SIZE];
+
+ /* Pointers to the ring buffers as seen from the LANCE */
+ uint rx_buf_ptr_lnc[RX_RING_SIZE];
+ uint tx_buf_ptr_lnc[TX_RING_SIZE];
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* The lance control ports are at an absolute address, machine and tc-slot
+ * dependent.
+ * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
+ * so we have to give the structure an extra member making rap pointing
+ * at the right address
+ */
+struct lance_regs {
+ volatile unsigned short rdp; /* register data port */
+ unsigned short pad;
+ volatile unsigned short rap; /* register address port */
+};
+
+int dec_lance_debug = 2;
+
+static struct tc_driver dec_lance_tc_driver;
+static struct net_device *root_lance_dev;
+
+static inline void writereg(volatile unsigned short *regptr, short value)
+{
+ *regptr = value;
+ iob();
+}
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ uint leptr;
+
+ /* The address space as seen from the LANCE
+ * begins at address 0. HK
+ */
+ leptr = 0;
+
+ writereg(&ll->rap, LE_CSR1);
+ writereg(&ll->rdp, (leptr & 0xFFFF));
+ writereg(&ll->rap, LE_CSR2);
+ writereg(&ll->rdp, leptr >> 16);
+ writereg(&ll->rap, LE_CSR3);
+ writereg(&ll->rdp, lp->busmaster_regval);
+
+ /* Point back to csr0 */
+ writereg(&ll->rap, LE_CSR0);
+}
+
+/*
+ * Our specialized copy routines
+ *
+ */
+static void cp_to_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
+
+ if (type == PMAD_LANCE) {
+ memcpy(to, from, len);
+ } else if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = to;
+ fp = from;
+
+ while (clen--) {
+ *tp++ = *fp++;
+ tp++;
+ }
+
+ clen = len & 1;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ tp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ }
+
+ iob();
+}
+
+static void cp_from_buf(const int type, void *to, const void *from, int len)
+{
+ unsigned short *tp;
+ const unsigned short *fp;
+ unsigned short clen;
+ unsigned char *rtp;
+ const unsigned char *rfp;
+
+ if (type == PMAD_LANCE) {
+ memcpy(to, from, len);
+ } else if (type == PMAX_LANCE) {
+ clen = len >> 1;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ fp++;
+ }
+
+ clen = len & 1;
+
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+ } else {
+
+ /*
+ * copy 16 Byte chunks
+ */
+ clen = len >> 4;
+ tp = to;
+ fp = from;
+ while (clen--) {
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ *tp++ = *fp++;
+ fp += 8;
+ }
+
+ /*
+ * do the rest, if any.
+ */
+ clen = len & 15;
+ rtp = (unsigned char *)tp;
+ rfp = (const unsigned char *)fp;
+ while (clen--) {
+ *rtp++ = *rfp++;
+ }
+
+
+ }
+
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ uint leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block.
+ * XXX bit 0 of the physical address registers has to be zero
+ */
+ *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
+ dev->dev_addr[0];
+ *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
+ dev->dev_addr[2];
+ *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
+ dev->dev_addr[4];
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = offsetof(struct lance_init_block, brx_ring);
+ *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
+ (leptr >> 16);
+ *lib_ptr(ib, rx_ptr, lp->type) = leptr;
+ if (ZERO)
+ printk("RX ptr: %8.8x(%8.8x)\n",
+ leptr, (uint)lib_off(brx_ring, lp->type));
+
+ /* Setup tx descriptor pointer */
+ leptr = offsetof(struct lance_init_block, btx_ring);
+ *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
+ (leptr >> 16);
+ *lib_ptr(ib, tx_ptr, lp->type) = leptr;
+ if (ZERO)
+ printk("TX ptr: %8.8x(%8.8x)\n",
+ leptr, (uint)lib_off(btx_ring, lp->type));
+
+ if (ZERO)
+ printk("TX rings:\n");
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = lp->tx_buf_ptr_lnc[i];
+ *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
+ *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
+ 0xff;
+ *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
+ /* The ones required by tmd2 */
+ *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
+ if (i < 3 && ZERO)
+ printk("%d: %8.8x(%p)\n",
+ i, leptr, lp->tx_buf_ptr_cpu[i]);
+ }
+
+ /* Setup the Rx ring entries */
+ if (ZERO)
+ printk("RX rings:\n");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = lp->rx_buf_ptr_lnc[i];
+ *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
+ *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
+ 0xff) |
+ LE_R1_OWN;
+ *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
+ 0xf000;
+ *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
+ if (i < 3 && ZERO)
+ printk("%d: %8.8x(%p)\n",
+ i, leptr, lp->rx_buf_ptr_cpu[i]);
+ }
+ iob();
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ volatile struct lance_regs *ll = lp->ll;
+ int i;
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_INIT);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
+ udelay(10);
+ }
+ if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -1;
+ }
+ if ((ll->rdp & LE_C0_ERR)) {
+ printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, ll->rdp);
+ return -1;
+ }
+ writereg(&ll->rdp, LE_C0_IDON);
+ writereg(&ll->rdp, LE_C0_STRT);
+ writereg(&ll->rdp, LE_C0_INEA);
+
+ return 0;
+}
+
+static int lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile u16 *rd;
+ unsigned short bits;
+ int entry, len;
+ struct sk_buff *skb;
+
+#ifdef TEST_HITS
+ {
+ int i;
+
+ printk("[");
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ if (i == lp->rx_new)
+ printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+ lp->type) &
+ LE_R1_OWN ? "_" : "X");
+ else
+ printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
+ lp->type) &
+ LE_R1_OWN ? "." : "1");
+ }
+ printk("]");
+ }
+#endif
+
+ for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
+ !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
+ rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
+ entry = lp->rx_new;
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF)
+ dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC)
+ dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL)
+ dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA)
+ dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP)
+ dev->stats.rx_errors++;
+ } else {
+ len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb == 0) {
+ dev->stats.rx_dropped++;
+ *rds_ptr(rd, mblength, lp->type) = 0;
+ *rds_ptr(rd, rmd1, lp->type) =
+ ((lp->rx_buf_ptr_lnc[entry] >> 16) &
+ 0xff) | LE_R1_OWN;
+ lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
+ return 0;
+ }
+ dev->stats.rx_bytes += len;
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+
+ cp_from_buf(lp->type, skb->data,
+ lp->rx_buf_ptr_cpu[entry], len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ *rds_ptr(rd, mblength, lp->type) = 0;
+ *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
+ *rds_ptr(rd, rmd1, lp->type) =
+ ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
+ lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
+ }
+ return 0;
+}
+
+static void lance_tx(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile struct lance_regs *ll = lp->ll;
+ volatile u16 *td;
+ int i, j;
+ int status;
+
+ j = lp->tx_old;
+
+ spin_lock(&lp->lock);
+
+ for (i = j; i != lp->tx_new; i = j) {
+ td = lib_ptr(ib, btx_ring[i], lp->type);
+ /* If we hit a packet not owned by us, stop */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
+ break;
+
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
+ status = *tds_ptr(td, misc, lp->type);
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY)
+ dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL)
+ dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ printk("%s: Carrier Lost\n", dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF | LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
+ LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+ j = (j + 1) & TX_RING_MOD_MASK;
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+
+ printk(KERN_ERR "%s: DMA error\n", dev->name);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int csr0;
+
+ writereg(&ll->rap, LE_CSR0);
+ csr0 = ll->rdp;
+
+ /* Acknowledge all the interrupt sources ASAP */
+ writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
+
+ if ((csr0 & LE_C0_ERR)) {
+ /* Clear the error condition */
+ writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR);
+ }
+ if (csr0 & LE_C0_RINT)
+ lance_rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lance_tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ printk("%s: Memory error, status %04x\n", dev->name, csr0);
+
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ writereg(&ll->rdp, LE_C0_INEA);
+ writereg(&ll->rdp, LE_C0_INEA);
+ return IRQ_HANDLED;
+}
+
+static int lance_open(struct net_device *dev)
+{
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status = 0;
+
+ /* Stop the Lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ *lib_ptr(ib, mode, lp->type) = 0;
+ *lib_ptr(ib, filter[0], lp->type) = 0;
+ *lib_ptr(ib, filter[1], lp->type) = 0;
+ *lib_ptr(ib, filter[2], lp->type) = 0;
+ *lib_ptr(ib, filter[3], lp->type) = 0;
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ /* Associate IRQ with lance_interrupt */
+ if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
+ printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ if (request_irq(lp->dma_irq, lance_dma_merr_int, IRQF_ONESHOT,
+ "lance error", dev)) {
+ free_irq(dev->irq, dev);
+ printk("%s: Can't get DMA IRQ %d\n", dev->name,
+ lp->dma_irq);
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Enable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
+
+ fast_mb();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+ }
+
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ /* Stop the card */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioasic_ssr_lock, flags);
+
+ fast_mb();
+ /* Disable I/O ASIC LANCE DMA. */
+ ioasic_write(IO_REG_SSR,
+ ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
+
+ fast_iob();
+ spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
+
+ free_irq(lp->dma_irq, dev);
+ }
+ free_irq(dev->irq, dev);
+ return 0;
+}
+
+static inline int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ int status;
+
+ /* Stop the lance */
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+ load_csrs(lp);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, ll->rdp);
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile struct lance_regs *ll = lp->ll;
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ unsigned long flags;
+ int entry, len;
+
+ len = skb->len;
+
+ if (len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+ len = ETH_ZLEN;
+ }
+
+ dev->stats.tx_bytes += len;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ entry = lp->tx_new;
+ *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
+ *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
+
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
+
+ /* Now, give the packet to the lance */
+ *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
+ ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
+ (LE_T1_POK | LE_T1_OWN);
+ lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ struct netdev_hw_addr *ha;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ *lib_ptr(ib, filter[0], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[1], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[2], lp->type) = 0xffff;
+ *lib_ptr(ib, filter[3], lp->type) = 0xffff;
+ return;
+ }
+ /* clear the multicast filter */
+ *lib_ptr(ib, filter[0], lp->type) = 0;
+ *lib_ptr(ib, filter[1], lp->type) = 0;
+ *lib_ptr(ib, filter[2], lp->type) = 0;
+ *lib_ptr(ib, filter[3], lp->type) = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ crc = crc >> 26;
+ *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ volatile u16 *ib = (volatile u16 *)dev->mem_start;
+ volatile struct lance_regs *ll = lp->ll;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ writereg(&ll->rap, LE_CSR0);
+ writereg(&ll->rdp, LE_C0_STOP);
+
+ lance_init_ring(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
+ } else {
+ *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(unsigned long _opaque)
+{
+ struct net_device *dev = (struct net_device *) _opaque;
+
+ lance_set_multicast(dev);
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int dec_lance_probe(struct device *bdev, const int type)
+{
+ static unsigned version_printed;
+ static const char fmt[] = "declance%d";
+ char name[10];
+ struct net_device *dev;
+ struct lance_private *lp;
+ volatile struct lance_regs *ll;
+ resource_size_t start = 0, len = 0;
+ int i, ret;
+ unsigned long esar_base;
+ unsigned char *esar;
+
+ if (dec_lance_debug && version_printed++ == 0)
+ printk(version);
+
+ if (bdev)
+ snprintf(name, sizeof(name), "%s", dev_name(bdev));
+ else {
+ i = 0;
+ dev = root_lance_dev;
+ while (dev) {
+ i++;
+ lp = netdev_priv(dev);
+ dev = lp->next;
+ }
+ snprintf(name, sizeof(name), fmt, i);
+ }
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ /*
+ * alloc_etherdev ensures the data structures used by the LANCE
+ * are aligned.
+ */
+ lp = netdev_priv(dev);
+ spin_lock_init(&lp->lock);
+
+ lp->type = type;
+ switch (type) {
+ case ASIC_LANCE:
+ dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
+
+ /* buffer space for the on-board LANCE shared memory */
+ /*
+ * FIXME: ugly hack!
+ */
+ dev->mem_start = CKSEG1ADDR(0x00020000);
+ dev->mem_end = dev->mem_start + 0x00020000;
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
+
+ /* Workaround crash with booting KN04 2.1k from Disk */
+ memset((void *)dev->mem_start, 0,
+ dev->mem_end - dev->mem_start);
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ /* Setup I/O ASIC LANCE DMA. */
+ lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
+ ioasic_write(IO_REG_LANCE_DMA_P,
+ CPHYSADDR(dev->mem_start) << 3);
+
+ break;
+#ifdef CONFIG_TC
+ case PMAD_LANCE:
+ dev_set_drvdata(bdev, dev);
+
+ start = to_tc_dev(bdev)->resource.start;
+ len = to_tc_dev(bdev)->resource.end - start + 1;
+ if (!request_mem_region(start, len, dev_name(bdev))) {
+ printk(KERN_ERR
+ "%s: Unable to reserve MMIO resource\n",
+ dev_name(bdev));
+ ret = -EBUSY;
+ goto err_out_dev;
+ }
+
+ dev->mem_start = CKSEG1ADDR(start);
+ dev->mem_end = dev->mem_start + 0x100000;
+ dev->base_addr = dev->mem_start + 0x100000;
+ dev->irq = to_tc_dev(bdev)->interrupt;
+ esar_base = dev->mem_start + 0x1c0002;
+ lp->dma_irq = -1;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + BUF_OFFSET_CPU +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+#endif
+ case PMAX_LANCE:
+ dev->irq = dec_interrupt[DEC_IRQ_LANCE];
+ dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
+ dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
+ dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
+ esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
+ lp->dma_irq = -1;
+
+ /*
+ * setup the pointer arrays, this sucks [tm] :-(
+ */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ lp->rx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * i * RX_BUFF_SIZE);
+ lp->rx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_buf_ptr_cpu[i] =
+ (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
+ 2 * RX_RING_SIZE * RX_BUFF_SIZE +
+ 2 * i * TX_BUFF_SIZE);
+ lp->tx_buf_ptr_lnc[i] =
+ (BUF_OFFSET_LNC +
+ RX_RING_SIZE * RX_BUFF_SIZE +
+ i * TX_BUFF_SIZE);
+ }
+
+ break;
+
+ default:
+ printk(KERN_ERR "%s: declance_init called with unknown type\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_dev;
+ }
+
+ ll = (struct lance_regs *) dev->base_addr;
+ esar = (unsigned char *) esar_base;
+
+ /* prom checks */
+ /* First, check for test pattern */
+ if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
+ esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
+ printk(KERN_ERR
+ "%s: Ethernet station address prom not found!\n",
+ name);
+ ret = -ENODEV;
+ goto err_out_resource;
+ }
+ /* Check the prom contents */
+ for (i = 0; i < 8; i++) {
+ if (esar[i * 4] != esar[0x3c - i * 4] &&
+ esar[i * 4] != esar[0x40 + i * 4] &&
+ esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
+ printk(KERN_ERR "%s: Something is wrong with the "
+ "ethernet station address prom!\n", name);
+ ret = -ENODEV;
+ goto err_out_resource;
+ }
+ }
+
+ /* Copy the ethernet address to the device structure, later to the
+ * lance initialization block so the lance gets it every time it's
+ * (re)initialized.
+ */
+ switch (type) {
+ case ASIC_LANCE:
+ printk("%s: IOASIC onboard LANCE", name);
+ break;
+ case PMAD_LANCE:
+ printk("%s: PMAD-AA", name);
+ break;
+ case PMAX_LANCE:
+ printk("%s: PMAX onboard LANCE", name);
+ break;
+ }
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = esar[i * 4];
+
+ printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = 5*HZ;
+
+ /* lp->ll is the location of the registers for lance card */
+ lp->ll = ll;
+
+ /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
+ * specification.
+ */
+ lp->busmaster_regval = 0;
+
+ dev->dma = 0;
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ init_timer(&lp->multicast_timer);
+ lp->multicast_timer.data = (unsigned long) dev;
+ lp->multicast_timer.function = lance_set_multicast_retry;
+
+ ret = register_netdev(dev);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: Unable to register netdev, aborting.\n", name);
+ goto err_out_resource;
+ }
+
+ if (!bdev) {
+ lp->next = root_lance_dev;
+ root_lance_dev = dev;
+ }
+
+ printk("%s: registered as %s.\n", name, dev->name);
+ return 0;
+
+err_out_resource:
+ if (bdev)
+ release_mem_region(start, len);
+
+err_out_dev:
+ free_netdev(dev);
+
+err_out:
+ return ret;
+}
+
+static void __exit dec_lance_remove(struct device *bdev)
+{
+ struct net_device *dev = dev_get_drvdata(bdev);
+ resource_size_t start, len;
+
+ unregister_netdev(dev);
+ start = to_tc_dev(bdev)->resource.start;
+ len = to_tc_dev(bdev)->resource.end - start + 1;
+ release_mem_region(start, len);
+ free_netdev(dev);
+}
+
+/* Find all the lance cards on the system and initialize them */
+static int __init dec_lance_platform_probe(void)
+{
+ int count = 0;
+
+ if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
+ if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
+ if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
+ count++;
+ } else if (!TURBOCHANNEL) {
+ if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
+ count++;
+ }
+ }
+
+ return (count > 0) ? 0 : -ENODEV;
+}
+
+static void __exit dec_lance_platform_remove(void)
+{
+ while (root_lance_dev) {
+ struct net_device *dev = root_lance_dev;
+ struct lance_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ root_lance_dev = lp->next;
+ free_netdev(dev);
+ }
+}
+
+#ifdef CONFIG_TC
+static int dec_lance_tc_probe(struct device *dev);
+static int __exit dec_lance_tc_remove(struct device *dev);
+
+static const struct tc_device_id dec_lance_tc_table[] = {
+ { "DEC ", "PMAD-AA " },
+ { }
+};
+MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
+
+static struct tc_driver dec_lance_tc_driver = {
+ .id_table = dec_lance_tc_table,
+ .driver = {
+ .name = "declance",
+ .bus = &tc_bus_type,
+ .probe = dec_lance_tc_probe,
+ .remove = __exit_p(dec_lance_tc_remove),
+ },
+};
+
+static int dec_lance_tc_probe(struct device *dev)
+{
+ int status = dec_lance_probe(dev, PMAD_LANCE);
+ if (!status)
+ get_device(dev);
+ return status;
+}
+
+static int __exit dec_lance_tc_remove(struct device *dev)
+{
+ put_device(dev);
+ dec_lance_remove(dev);
+ return 0;
+}
+#endif
+
+static int __init dec_lance_init(void)
+{
+ int status;
+
+ status = tc_register_driver(&dec_lance_tc_driver);
+ if (!status)
+ dec_lance_platform_probe();
+ return status;
+}
+
+static void __exit dec_lance_exit(void)
+{
+ dec_lance_platform_remove();
+ tc_unregister_driver(&dec_lance_tc_driver);
+}
+
+
+module_init(dec_lance_init);
+module_exit(dec_lance_exit);
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
new file mode 100644
index 000000000..6c9de117f
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -0,0 +1,232 @@
+/* hplance.c : the Linux/hp300/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/dio.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#include "hplance.h"
+
+/* We have 16392 bytes of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct hplance_private {
+ struct lance_private lance;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent);
+static void hplance_init(struct net_device *dev, struct dio_dev *d);
+static void hplance_remove_one(struct dio_dev *d);
+static void hplance_writerap(void *priv, unsigned short value);
+static void hplance_writerdp(void *priv, unsigned short value);
+static unsigned short hplance_readrdp(void *priv);
+static int hplance_open(struct net_device *dev);
+static int hplance_close(struct net_device *dev);
+
+static struct dio_device_id hplance_dio_tbl[] = {
+ { DIO_ID_LAN },
+ { 0 }
+};
+
+static struct dio_driver hplance_driver = {
+ .name = "hplance",
+ .id_table = hplance_dio_tbl,
+ .probe = hplance_init_one,
+ .remove = hplance_remove_one,
+};
+
+static const struct net_device_ops hplance_netdev_ops = {
+ .ndo_open = hplance_open,
+ .ndo_stop = hplance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = lance_poll,
+#endif
+};
+
+/* Find all the HP Lance boards and initialise them... */
+static int hplance_init_one(struct dio_dev *d, const struct dio_device_id *ent)
+{
+ struct net_device *dev;
+ int err = -ENOMEM;
+
+ dev = alloc_etherdev(sizeof(struct hplance_private));
+ if (!dev)
+ goto out;
+
+ err = -EBUSY;
+ if (!request_mem_region(dio_resource_start(d),
+ dio_resource_len(d), d->name))
+ goto out_free_netdev;
+
+ hplance_init(dev, d);
+ err = register_netdev(dev);
+ if (err)
+ goto out_release_mem_region;
+
+ dio_set_drvdata(d, dev);
+
+ printk(KERN_INFO "%s: %s; select code %d, addr %pM, irq %d\n",
+ dev->name, d->name, d->scode, dev->dev_addr, d->ipl);
+
+ return 0;
+
+ out_release_mem_region:
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ out_free_netdev:
+ free_netdev(dev);
+ out:
+ return err;
+}
+
+static void hplance_remove_one(struct dio_dev *d)
+{
+ struct net_device *dev = dio_get_drvdata(d);
+
+ unregister_netdev(dev);
+ release_mem_region(dio_resource_start(d), dio_resource_len(d));
+ free_netdev(dev);
+}
+
+/* Initialise a single lance board at the given DIO device */
+static void hplance_init(struct net_device *dev, struct dio_dev *d)
+{
+ unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
+ struct hplance_private *lp;
+ int i;
+
+ /* reset the board */
+ out_8(va + DIO_IDOFF, 0xff);
+ udelay(100); /* ariba! ariba! udelay! udelay! */
+
+ /* Fill the dev fields */
+ dev->base_addr = va;
+ dev->netdev_ops = &hplance_netdev_ops;
+ dev->dma = 0;
+
+ for (i = 0; i < 6; i++) {
+ /* The NVRAM holds our ethernet address, one nibble per byte,
+ * at bytes NVRAMOFF+1,3,5,7,9...
+ */
+ dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
+ | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
+ }
+
+ lp = netdev_priv(dev);
+ lp->lance.name = d->name;
+ lp->lance.base = va;
+ lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
+ lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = d->ipl;
+ lp->lance.writerap = hplance_writerap;
+ lp->lance.writerdp = hplance_writerdp;
+ lp->lance.readrdp = hplance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+}
+
+/* This is disgusting. We have to check the DIO status register for ack every
+ * time we read or write the LANCE registers.
+ */
+static void hplance_writerap(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static void hplance_writerdp(void *priv, unsigned short value)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ do {
+ out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+}
+
+static unsigned short hplance_readrdp(void *priv)
+{
+ struct lance_private *lp = (struct lance_private *)priv;
+ __u16 value;
+ do {
+ value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
+ } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
+ return value;
+}
+
+static int hplance_open(struct net_device *dev)
+{
+ int status;
+ struct lance_private *lp = netdev_priv(dev);
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ out_8(lp->base + HPLANCE_STATUS, LE_IE);
+
+ return 0;
+}
+
+static int hplance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
+ lance_close(dev);
+ return 0;
+}
+
+static int __init hplance_init_module(void)
+{
+ return dio_register_driver(&hplance_driver);
+}
+
+static void __exit hplance_cleanup_module(void)
+{
+ dio_unregister_driver(&hplance_driver);
+}
+
+module_init(hplance_init_module);
+module_exit(hplance_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h
new file mode 100644
index 000000000..04aee9e03
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.h
@@ -0,0 +1,26 @@
+/* Random defines and structures for the HP Lance driver.
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ */
+
+/* Registers */
+#define HPLANCE_ID 0x01 /* DIO register: ID byte */
+#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */
+
+/* Control and status bits for the status register */
+#define LE_IE 0x80 /* interrupt enable */
+#define LE_IR 0x40 /* interrupt requested */
+#define LE_LOCK 0x08 /* lock status register */
+#define LE_ACK 0x04 /* ack of lock */
+#define LE_JAB 0x02 /* loss of tx clock (???) */
+/* We can also extract the IPL from the status register with the standard
+ * DIO_IPL(hplance) macro, or using dio_scodetoipl()
+ */
+
+/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg,
+ * memory and NVRAM:
+ */
+#define HPLANCE_IDOFF 0 /* board baseaddr */
+#define HPLANCE_REGOFF 0x4000 /* lance registers */
+#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */
+#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
new file mode 100644
index 000000000..256f590f6
--- /dev/null
+++ b/drivers/net/ethernet/amd/lance.c
@@ -0,0 +1,1312 @@
+/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
+/*
+ Written/copyright 1993-1998 by Donald Becker.
+
+ Copyright 1993 United States Government as represented by the
+ Director, National Security Agency.
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+
+ This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
+ with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
+
+ The author may be reached as becker@scyld.com, or C/O
+ Scyld Computing Corporation
+ 410 Severn Ave., Suite 210
+ Annapolis MD 21403
+
+ Andrey V. Savochkin:
+ - alignment problem with 1.3.* kernel and some minor changes.
+ Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
+ - added support for Linux/Alpha, but removed most of it, because
+ it worked only for the PCI chip.
+ - added hook for the 32bit lance driver
+ - added PCnetPCI II (79C970A) to chip table
+ Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
+ - hopefully fix above so Linux/Alpha can use ISA cards too.
+ 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
+ v1.12 10/27/97 Module support -djb
+ v1.14 2/3/98 Module support modified, made PCI support optional -djb
+ v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
+ before unregister_netdev() which caused NULL pointer
+ reference later in the chain (in rtnetlink_fill_ifinfo())
+ -- Mika Kuoppala <miku@iki.fi>
+
+ Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
+ the 2.1 version of the old driver - Alan Cox
+
+ Get rid of check_region, check kmalloc return in lance_probe1
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
+
+ Reworked detection, added support for Racal InterLan EtherBlaster cards
+ Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
+*/
+
+static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mm.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
+static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
+static int __init do_lance_probe(struct net_device *dev);
+
+
+static struct card {
+ char id_offset14;
+ char id_offset15;
+} cards[] = {
+ { //"normal"
+ .id_offset14 = 0x57,
+ .id_offset15 = 0x57,
+ },
+ { //NI6510EB
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x44,
+ },
+ { //Racal InterLan EtherBlaster
+ .id_offset14 = 0x52,
+ .id_offset15 = 0x49,
+ },
+};
+#define NUM_CARDS 3
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+
+/*
+ Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the AMD 79C960, the "PCnet-ISA
+single-chip ethernet controller for ISA". This chip is used in a wide
+variety of boards from vendors such as Allied Telesis, HP, Kingston,
+and Boca. This driver is also intended to work with older AMD 7990
+designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
+I use the name LANCE to refer to all of the AMD chips, even though it properly
+refers only to the original 7990.
+
+II. Board-specific settings
+
+The driver is designed to work the boards that use the faster
+bus-master mode, rather than in shared memory mode. (Only older designs
+have on-board buffer memory needed to support the slower shared memory mode.)
+
+Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
+channel. This driver probes the likely base addresses:
+{0x300, 0x320, 0x340, 0x360}.
+After the board is found it generates a DMA-timeout interrupt and uses
+autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
+of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
+probed for by enabling each free DMA channel in turn and checking if
+initialization succeeds.
+
+The HP-J2405A board is an exception: with this board it is easy to read the
+EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
+_know_ the base address -- that field is for writing the EEPROM.)
+
+III. Driver operation
+
+IIIa. Ring buffers
+The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
+the base and length of the data buffer, along with status bits. The length
+of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
+the buffer length (rather than being directly the buffer length) for
+implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
+ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
+needlessly uses extra space and reduces the chance that an upper layer will
+be able to reorder queued Tx packets based on priority. Decreasing the number
+of entries makes it more difficult to achieve back-to-back packet transmission
+and increases the chance that Rx ring will overflow. (Consider the worst case
+of receiving back-to-back minimum-sized packets.)
+
+The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
+statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
+avoid the administrative overhead. For the Rx side this avoids dynamically
+allocating full-sized buffers "just in case", at the expense of a
+memory-to-memory data copy for each packet received. For most systems this
+is a good tradeoff: the Rx buffer will always be in low memory, the copy
+is inexpensive, and it primes the cache for later packet processing. For Tx
+the buffers are only used when needed as low-memory bounce buffers.
+
+IIIB. 16M memory limitations.
+For the ISA bus master mode all structures used directly by the LANCE,
+the initialization block, Rx and Tx rings, and data buffers, must be
+accessible from the ISA bus, i.e. in the lower 16M of real memory.
+This is a problem for current Linux kernels on >16M machines. The network
+devices are initialized after memory initialization, and the kernel doles out
+memory from the top of memory downward. The current solution is to have a
+special network initialization routine that's called before memory
+initialization; this will eventually be generalized for all network devices.
+As mentioned before, low-memory "bounce-buffers" are used when needed.
+
+IIIC. Synchronization
+The driver runs as two independent, single-threaded flows of control. One
+is the send-packet routine, which enforces single-threaded use by the
+dev->tbusy flag. The other thread is the interrupt handler, which is single
+threaded by the hardware and other software.
+
+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+the 'lp->tx_full' flag.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+we can't avoid the interrupt overhead by having the Tx routine reap the Tx
+stats.) After reaping the stats, it marks the queue entry as empty by setting
+the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
+tx_full and tbusy flags.
+
+*/
+
+/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
+ That translates to 4 and 4 (16 == 2^^4).
+ This is a compile-time option for efficiency.
+ */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define LANCE_DATA 0x10
+#define LANCE_ADDR 0x12
+#define LANCE_RESET 0x14
+#define LANCE_BUS_IF 0x16
+#define LANCE_TOTAL_SIZE 0x18
+
+#define TX_TIMEOUT (HZ/5)
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ s32 base;
+ s16 buf_length; /* This length is 2s complement (negative)! */
+ s16 msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ s32 base;
+ s16 length; /* Length is 2s complement (negative)! */
+ s16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with extra bits. */
+ u32 rx_ring; /* Tx and Rx ring base pointers */
+ u32 tx_ring;
+};
+
+struct lance_private {
+ /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
+ struct lance_rx_head rx_ring[RX_RING_SIZE];
+ struct lance_tx_head tx_ring[TX_RING_SIZE];
+ struct lance_init_block init_block;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
+ /* Tx low-memory "bounce buffer" address. */
+ char (*tx_bounce_buffs)[PKT_BUF_SZ];
+ int cur_rx, cur_tx; /* The next free ring entry */
+ int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+ int dma;
+ unsigned char chip_version; /* See lance_chip_type. */
+ spinlock_t devlock;
+};
+
+#define LANCE_MUST_PAD 0x00000001
+#define LANCE_ENABLE_AUTOSELECT 0x00000002
+#define LANCE_MUST_REINIT_RING 0x00000004
+#define LANCE_MUST_UNRESET 0x00000008
+#define LANCE_HAS_MISSED_FRAME 0x00000010
+
+/* A mapping from the chip ID number to the part number and features.
+ These are from the datasheets -- in real life the '970 version
+ reportedly has the same ID as the '965. */
+static struct lance_chip_type {
+ int id_number;
+ const char *name;
+ int flags;
+} chip_table[] = {
+ {0x0000, "LANCE 7990", /* Ancient lance chip. */
+ LANCE_MUST_PAD + LANCE_MUST_UNRESET},
+ {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
+ it the PCnet32. */
+ {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+ {0x0, "PCnet (unknown)",
+ LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
+ LANCE_HAS_MISSED_FRAME},
+};
+
+enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
+
+
+/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
+ Assume yes until we know the memory size. */
+static unsigned char lance_need_isa_bounce_buffers = 1;
+
+static int lance_open(struct net_device *dev);
+static void lance_init_ring(struct net_device *dev, gfp_t mode);
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static int lance_rx(struct net_device *dev);
+static irqreturn_t lance_interrupt(int irq, void *dev_id);
+static int lance_close(struct net_device *dev);
+static struct net_device_stats *lance_get_stats(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static void lance_tx_timeout (struct net_device *dev);
+
+
+
+#ifdef MODULE
+#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
+
+static struct net_device *dev_lance[MAX_CARDS];
+static int io[MAX_CARDS];
+static int dma[MAX_CARDS];
+static int irq[MAX_CARDS];
+
+module_param_array(io, int, NULL, 0);
+module_param_array(dma, int, NULL, 0);
+module_param_array(irq, int, NULL, 0);
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
+MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
+MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
+MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
+
+int __init init_module(void)
+{
+ struct net_device *dev;
+ int this_dev, found = 0;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ if (io[this_dev] == 0) {
+ if (this_dev != 0) /* only complain once */
+ break;
+ printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
+ return -EPERM;
+ }
+ dev = alloc_etherdev(0);
+ if (!dev)
+ break;
+ dev->irq = irq[this_dev];
+ dev->base_addr = io[this_dev];
+ dev->dma = dma[this_dev];
+ if (do_lance_probe(dev) == 0) {
+ dev_lance[found++] = dev;
+ continue;
+ }
+ free_netdev(dev);
+ break;
+ }
+ if (found != 0)
+ return 0;
+ return -ENXIO;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+ release_region(dev->base_addr, LANCE_TOTAL_SIZE);
+ kfree(lp->tx_bounce_buffs);
+ kfree((void*)lp->rx_buffs);
+ kfree(lp);
+}
+
+void __exit cleanup_module(void)
+{
+ int this_dev;
+
+ for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
+ struct net_device *dev = dev_lance[this_dev];
+ if (dev) {
+ unregister_netdev(dev);
+ cleanup_card(dev);
+ free_netdev(dev);
+ }
+ }
+}
+#endif /* MODULE */
+MODULE_LICENSE("GPL");
+
+
+/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
+ board probes now that kmalloc() can allocate ISA DMA-able regions.
+ This also allows the LANCE driver to be used as a module.
+ */
+static int __init do_lance_probe(struct net_device *dev)
+{
+ unsigned int *port;
+ int result;
+
+ if (high_memory <= phys_to_virt(16*1024*1024))
+ lance_need_isa_bounce_buffers = 0;
+
+ for (port = lance_portlist; *port; port++) {
+ int ioaddr = *port;
+ struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
+ "lance-probe");
+
+ if (r) {
+ /* Detect the card with minimal I/O reads */
+ char offset14 = inb(ioaddr + 14);
+ int card;
+ for (card = 0; card < NUM_CARDS; ++card)
+ if (cards[card].id_offset14 == offset14)
+ break;
+ if (card < NUM_CARDS) {/*yes, the first byte matches*/
+ char offset15 = inb(ioaddr + 15);
+ for (card = 0; card < NUM_CARDS; ++card)
+ if ((cards[card].id_offset14 == offset14) &&
+ (cards[card].id_offset15 == offset15))
+ break;
+ }
+ if (card < NUM_CARDS) { /*Signature OK*/
+ result = lance_probe1(dev, ioaddr, 0, 0);
+ if (!result) {
+ struct lance_private *lp = dev->ml_priv;
+ int ver = lp->chip_version;
+
+ r->name = chip_table[ver].name;
+ return 0;
+ }
+ }
+ release_region(ioaddr, LANCE_TOTAL_SIZE);
+ }
+ }
+ return -ENODEV;
+}
+
+#ifndef MODULE
+struct net_device * __init lance_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ int err;
+
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+
+ err = do_lance_probe(dev);
+ if (err)
+ goto out;
+ return dev;
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+#endif
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_stop = lance_close,
+ .ndo_get_stats = lance_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
+{
+ struct lance_private *lp;
+ unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
+ int i, reset_val, lance_version;
+ const char *chipname;
+ /* Flags for specific chips or boards. */
+ unsigned char hpJ2405A = 0; /* HP ISA adaptor */
+ int hp_builtin = 0; /* HP on-board ethernet. */
+ static int did_version; /* Already printed version info. */
+ unsigned long flags;
+ int err = -ENOMEM;
+ void __iomem *bios;
+
+ /* First we look for special cases.
+ Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
+ There are two HP versions, check the BIOS for the configuration port.
+ This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
+ */
+ bios = ioremap(0xf00f0, 0x14);
+ if (!bios)
+ return -ENOMEM;
+ if (readw(bios + 0x12) == 0x5048) {
+ static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
+ int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
+ /* We can have boards other than the built-in! Verify this is on-board. */
+ if ((inb(hp_port) & 0xc0) == 0x80 &&
+ ioaddr_table[inb(hp_port) & 3] == ioaddr)
+ hp_builtin = hp_port;
+ }
+ iounmap(bios);
+ /* We also recognize the HP Vectra on-board here, but check below. */
+ hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
+ inb(ioaddr+2) == 0x09);
+
+ /* Reset the LANCE. */
+ reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
+
+ /* The Un-Reset needed is only needed for the real NE2100, and will
+ confuse the HP board. */
+ if (!hpJ2405A)
+ outw(reset_val, ioaddr+LANCE_RESET);
+
+ outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
+ if (inw(ioaddr+LANCE_DATA) != 0x0004)
+ return -ENODEV;
+
+ /* Get the version of the chip. */
+ outw(88, ioaddr+LANCE_ADDR);
+ if (inw(ioaddr+LANCE_ADDR) != 88) {
+ lance_version = 0;
+ } else { /* Good, it's a newer chip. */
+ int chip_version = inw(ioaddr+LANCE_DATA);
+ outw(89, ioaddr+LANCE_ADDR);
+ chip_version |= inw(ioaddr+LANCE_DATA) << 16;
+ if (lance_debug > 2)
+ printk(" LANCE chip version is %#x.\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003)
+ return -ENODEV;
+ chip_version = (chip_version >> 12) & 0xffff;
+ for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
+ if (chip_table[lance_version].id_number == chip_version)
+ break;
+ }
+ }
+
+ /* We can't allocate private data from alloc_etherdev() because it must
+ a ISA DMA-able region. */
+ chipname = chip_table[lance_version].name;
+ printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
+
+ /* There is a 16 byte station address PROM at the base address.
+ The first six bytes are the station address. */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = inb(ioaddr + i);
+ printk("%pM", dev->dev_addr);
+
+ dev->base_addr = ioaddr;
+ /* Make certain the data structures used by the LANCE are aligned and DMAble. */
+
+ lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
+ if(lp==NULL)
+ return -ENODEV;
+ if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
+ dev->ml_priv = lp;
+ lp->name = chipname;
+ lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->rx_buffs)
+ goto out_lp;
+ if (lance_need_isa_bounce_buffers) {
+ lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
+ GFP_DMA | GFP_KERNEL);
+ if (!lp->tx_bounce_buffs)
+ goto out_rx;
+ } else
+ lp->tx_bounce_buffs = NULL;
+
+ lp->chip_version = lance_version;
+ spin_lock_init(&lp->devlock);
+
+ lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ inw(ioaddr+LANCE_ADDR);
+
+ if (irq) { /* Set iff PCI card. */
+ dev->dma = 4; /* Native bus-master, no DMA channel needed. */
+ dev->irq = irq;
+ } else if (hp_builtin) {
+ static const char dma_tbl[4] = {3, 5, 6, 0};
+ static const char irq_tbl[4] = {3, 4, 5, 9};
+ unsigned char port_val = inb(hp_builtin);
+ dev->dma = dma_tbl[(port_val >> 4) & 3];
+ dev->irq = irq_tbl[(port_val >> 2) & 3];
+ printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (hpJ2405A) {
+ static const char dma_tbl[4] = {3, 5, 6, 7};
+ static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
+ short reset_val = inw(ioaddr+LANCE_RESET);
+ dev->dma = dma_tbl[(reset_val >> 2) & 3];
+ dev->irq = irq_tbl[(reset_val >> 4) & 7];
+ printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
+ } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
+ short bus_info;
+ outw(8, ioaddr+LANCE_ADDR);
+ bus_info = inw(ioaddr+LANCE_BUS_IF);
+ dev->dma = bus_info & 0x07;
+ dev->irq = (bus_info >> 4) & 0x0F;
+ } else {
+ /* The DMA channel may be passed in PARAM1. */
+ if (dev->mem_start & 0x07)
+ dev->dma = dev->mem_start & 0x07;
+ }
+
+ if (dev->dma == 0) {
+ /* Read the DMA channel status register, so that we can avoid
+ stuck DMA channels in the DMA detection below. */
+ dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
+ (inb(DMA2_STAT_REG) & 0xf0);
+ }
+ err = -ENODEV;
+ if (dev->irq >= 2)
+ printk(" assigned IRQ %d", dev->irq);
+ else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
+ unsigned long irq_mask;
+
+ /* To auto-IRQ we enable the initialization-done and DMA error
+ interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ boards will work. */
+ irq_mask = probe_irq_on();
+
+ /* Trigger an initialization just for the interrupt. */
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq)
+ printk(", probed IRQ %d", dev->irq);
+ else {
+ printk(", failed to detect IRQ line.\n");
+ goto out_tx;
+ }
+
+ /* Check for the initialization done bit, 0x0100, which means
+ that we don't need a DMA channel. */
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ dev->dma = 4;
+ }
+
+ if (dev->dma == 4) {
+ printk(", no DMA needed.\n");
+ } else if (dev->dma) {
+ if (request_dma(dev->dma, chipname)) {
+ printk("DMA %d allocation failed.\n", dev->dma);
+ goto out_tx;
+ } else
+ printk(", assigned DMA %d.\n", dev->dma);
+ } else { /* OK, we have to auto-DMA. */
+ for (i = 0; i < 4; i++) {
+ static const char dmas[] = { 5, 6, 7, 3 };
+ int dma = dmas[i];
+ int boguscnt;
+
+ /* Don't enable a permanently busy DMA channel, or the machine
+ will hang. */
+ if (test_bit(dma, &dma_channels))
+ continue;
+ outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
+ if (request_dma(dma, chipname))
+ continue;
+
+ flags=claim_dma_lock();
+ set_dma_mode(dma, DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ /* Trigger an initialization. */
+ outw(0x0001, ioaddr+LANCE_DATA);
+ for (boguscnt = 100; boguscnt > 0; --boguscnt)
+ if (inw(ioaddr+LANCE_DATA) & 0x0900)
+ break;
+ if (inw(ioaddr+LANCE_DATA) & 0x0100) {
+ dev->dma = dma;
+ printk(", DMA %d.\n", dev->dma);
+ break;
+ } else {
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ release_dma_lock(flags);
+ free_dma(dma);
+ }
+ }
+ if (i == 4) { /* Failure: bail. */
+ printk("DMA detection failed.\n");
+ goto out_tx;
+ }
+ }
+
+ if (lance_version == 0 && dev->irq == 0) {
+ /* We may auto-IRQ now that we have a DMA channel. */
+ /* Trigger an initialization just for the interrupt. */
+ unsigned long irq_mask;
+
+ irq_mask = probe_irq_on();
+ outw(0x0041, ioaddr+LANCE_DATA);
+
+ mdelay(40);
+ dev->irq = probe_irq_off(irq_mask);
+ if (dev->irq == 0) {
+ printk(" Failed to detect the 7990 IRQ line.\n");
+ goto out_dma;
+ }
+ printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
+ }
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* Turn on auto-select of media (10baseT or BNC) so that the user
+ can watch the LEDs even if the board isn't opened. */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Don't touch 10base2 power bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 0 && did_version++ == 0)
+ printk(version);
+
+ /* The LANCE-specific entries in the device structure. */
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out_dma;
+ return 0;
+out_dma:
+ if (dev->dma != 4)
+ free_dma(dev->dma);
+out_tx:
+ kfree(lp->tx_bounce_buffs);
+out_rx:
+ kfree((void*)lp->rx_buffs);
+out_lp:
+ kfree(lp);
+ return err;
+}
+
+
+static int
+lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ int i;
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
+ return -EAGAIN;
+ }
+
+ /* We used to allocate DMA here, but that was silly.
+ DMA lines can't be shared! We now permanently allocate them. */
+
+ /* Reset the LANCE */
+ inw(ioaddr+LANCE_RESET);
+
+ /* The DMA controller is used as a no-operation slave, "cascade mode". */
+ if (dev->dma != 4) {
+ unsigned long flags=claim_dma_lock();
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+ release_dma_lock(flags);
+ }
+
+ /* Un-Reset the LANCE, needed only for the NE2100. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
+ outw(0, ioaddr+LANCE_RESET);
+
+ if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
+ /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ /* Only touch autoselect bit. */
+ outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
+ }
+
+ if (lance_debug > 1)
+ printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
+ dev->name, dev->irq, dev->dma,
+ (u32) isa_virt_to_bus(lp->tx_ring),
+ (u32) isa_virt_to_bus(lp->rx_ring),
+ (u32) isa_virt_to_bus(&lp->init_block));
+
+ lance_init_ring(dev, GFP_KERNEL);
+ /* Re-initialize the LANCE, and start it when done. */
+ outw(0x0001, ioaddr+LANCE_ADDR);
+ outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
+ outw(0x0002, ioaddr+LANCE_ADDR);
+ outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
+
+ outw(0x0004, ioaddr+LANCE_ADDR);
+ outw(0x0915, ioaddr+LANCE_DATA);
+
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0001, ioaddr+LANCE_DATA);
+
+ netif_start_queue (dev);
+
+ i = 0;
+ while (i++ < 100)
+ if (inw(ioaddr+LANCE_DATA) & 0x0100)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ outw(0x0042, ioaddr+LANCE_DATA);
+
+ if (lance_debug > 2)
+ printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
+ dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
+
+ return 0; /* Always succeed */
+}
+
+/* The LANCE has been halted for one reason or another (busmaster memory
+ arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ etc.). Modern LANCE variants always reload their ring-buffer
+ configuration when restarted, so we must reinitialize our ring
+ context before restarting. As part of this reinitialization,
+ find all packets still on the Tx ring and pretend that they had been
+ sent (in effect, drop the packets on the floor) - the higher-level
+ protocols will time out and retransmit. It'd be better to shuffle
+ these skbs to a temp list and then actually re-Tx them after
+ restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+*/
+
+static void
+lance_purge_ring(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int i;
+
+ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = lp->rx_skbuff[i];
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
+ if (skb)
+ dev_kfree_skb_any(skb);
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (lp->tx_skbuff[i]) {
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ lp->tx_skbuff[i] = NULL;
+ }
+ }
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+static void
+lance_init_ring(struct net_device *dev, gfp_t gfp)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int i;
+
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+ void *rx_buff;
+
+ skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
+ lp->rx_skbuff[i] = skb;
+ if (skb)
+ rx_buff = skb->data;
+ else
+ rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
+ if (rx_buff == NULL)
+ lp->rx_ring[i].base = 0;
+ else
+ lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
+ lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ the upper ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_ring[i].base = 0;
+ }
+
+ lp->init_block.mode = 0x0000;
+ for (i = 0; i < 6; i++)
+ lp->init_block.phys_addr[i] = dev->dev_addr[i];
+ lp->init_block.filter[0] = 0x00000000;
+ lp->init_block.filter[1] = 0x00000000;
+ lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
+ lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
+}
+
+static void
+lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
+{
+ struct lance_private *lp = dev->ml_priv;
+
+ if (must_reinit ||
+ (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
+ lance_purge_ring(dev);
+ lance_init_ring(dev, GFP_ATOMIC);
+ }
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(csr0_bits, dev->base_addr + LANCE_DATA);
+}
+
+
+static void lance_tx_timeout (struct net_device *dev)
+{
+ struct lance_private *lp = (struct lance_private *) dev->ml_priv;
+ int ioaddr = dev->base_addr;
+
+ outw (0, ioaddr + LANCE_ADDR);
+ printk ("%s: transmit timed out, status %4.4x, resetting.\n",
+ dev->name, inw (ioaddr + LANCE_DATA));
+ outw (0x0004, ioaddr + LANCE_DATA);
+ dev->stats.tx_errors++;
+#ifndef final_version
+ if (lance_debug > 3) {
+ int i;
+ printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < RX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
+ lp->rx_ring[i].msg_length);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
+ lp->tx_ring[i].base, -lp->tx_ring[i].length,
+ lp->tx_ring[i].misc);
+ printk ("\n");
+ }
+#endif
+ lance_restart (dev, 0x0043, 1);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue (dev);
+}
+
+
+static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int ioaddr = dev->base_addr;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+
+ if (lance_debug > 3) {
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
+ inw(ioaddr+LANCE_DATA));
+ outw(0x0000, ioaddr+LANCE_DATA);
+ }
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+ /* Caution: the write order is important here, set the base address
+ with the "ownership" bits last. */
+
+ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
+ if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
+ if (skb->len < ETH_ZLEN) {
+ if (skb_padto(skb, ETH_ZLEN))
+ goto out;
+ lp->tx_ring[entry].length = -ETH_ZLEN;
+ }
+ else
+ lp->tx_ring[entry].length = -skb->len;
+ } else
+ lp->tx_ring[entry].length = -skb->len;
+
+ lp->tx_ring[entry].misc = 0x0000;
+
+ dev->stats.tx_bytes += skb->len;
+
+ /* If any part of this buffer is >16M we must copy it to a low-memory
+ buffer. */
+ if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
+ if (lance_debug > 5)
+ printk("%s: bouncing a high-memory packet (%#x).\n",
+ dev->name, (u32)isa_virt_to_bus(skb->data));
+ skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
+ lp->tx_ring[entry].base =
+ ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
+ dev_kfree_skb(skb);
+ } else {
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
+ }
+ lp->cur_tx++;
+
+ /* Trigger an immediate send poll. */
+ outw(0x0000, ioaddr+LANCE_ADDR);
+ outw(0x0048, ioaddr+LANCE_DATA);
+
+ if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
+ netif_stop_queue(dev);
+
+out:
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp;
+ int csr0, ioaddr, boguscnt=10;
+ int must_restart;
+
+ ioaddr = dev->base_addr;
+ lp = dev->ml_priv;
+
+ spin_lock (&lp->devlock);
+
+ outw(0x00, dev->base_addr + LANCE_ADDR);
+ while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
+ --boguscnt >= 0) {
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
+
+ must_restart = 0;
+
+ if (lance_debug > 5)
+ printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
+ dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
+
+ if (csr0 & 0x0400) /* Rx interrupt */
+ lance_rx(dev);
+
+ if (csr0 & 0x0200) { /* Tx-done interrupt */
+ int dirty_tx = lp->dirty_tx;
+
+ while (dirty_tx < lp->cur_tx) {
+ int entry = dirty_tx & TX_RING_MOD_MASK;
+ int status = lp->tx_ring[entry].base;
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x40000000) {
+ /* There was an major error, log it. */
+ int err_status = lp->tx_ring[entry].misc;
+ dev->stats.tx_errors++;
+ if (err_status & 0x0400)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & 0x0800)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & 0x1000)
+ dev->stats.tx_window_errors++;
+ if (err_status & 0x4000) {
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ dev->stats.tx_fifo_errors++;
+ /* Remove this verbosity later! */
+ printk("%s: Tx FIFO error! Status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+ } else {
+ if (status & 0x18000000)
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* We must free the original skb if it's not a data-only copy
+ in the bounce buffer. */
+ if (lp->tx_skbuff[entry]) {
+ dev_kfree_skb_irq(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ }
+ dirty_tx++;
+ }
+
+#ifndef final_version
+ if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+ printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
+ dirty_tx, lp->cur_tx,
+ netif_queue_stopped(dev) ? "yes" : "no");
+ dirty_tx += TX_RING_SIZE;
+ }
+#endif
+
+ /* if the ring is no longer full, accept more packets */
+ if (netif_queue_stopped(dev) &&
+ dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
+ netif_wake_queue (dev);
+
+ lp->dirty_tx = dirty_tx;
+ }
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000)
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & 0x0800) {
+ printk("%s: Bus master arbitration failure, status %4.4x.\n",
+ dev->name, csr0);
+ /* Restart the chip. */
+ must_restart = 1;
+ }
+
+ if (must_restart) {
+ /* stop the chip to clear the error condition, then restart */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x0004, dev->base_addr + LANCE_DATA);
+ lance_restart(dev, 0x0002, 0);
+ }
+ }
+
+ /* Clear any other interrupt, and set interrupt enable. */
+ outw(0x0000, dev->base_addr + LANCE_ADDR);
+ outw(0x7940, dev->base_addr + LANCE_DATA);
+
+ if (lance_debug > 4)
+ printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
+ dev->name, inw(ioaddr + LANCE_ADDR),
+ inw(dev->base_addr + LANCE_DATA));
+
+ spin_unlock (&lp->devlock);
+ return IRQ_HANDLED;
+}
+
+static int
+lance_rx(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+ int entry = lp->cur_rx & RX_RING_MOD_MASK;
+ int i;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (lp->rx_ring[entry].base >= 0) {
+ int status = lp->rx_ring[entry].base >> 24;
+
+ if (status != 0x03) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with full-sized
+ buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & 0x01) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & 0x20)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x10)
+ dev->stats.rx_over_errors++;
+ if (status & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x04)
+ dev->stats.rx_fifo_errors++;
+ lp->rx_ring[entry].base &= 0x03ffffff;
+ }
+ else
+ {
+ /* Malloc up new buffer, compatible with net3. */
+ short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
+ struct sk_buff *skb;
+
+ if(pkt_len<60)
+ {
+ printk("%s: Runt packet!\n",dev->name);
+ dev->stats.rx_errors++;
+ }
+ else
+ {
+ skb = dev_alloc_skb(pkt_len+2);
+ if (skb == NULL)
+ {
+ printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+ for (i=0; i < RX_RING_SIZE; i++)
+ if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
+ break;
+
+ if (i > RX_RING_SIZE -2)
+ {
+ dev->stats.rx_dropped++;
+ lp->rx_ring[entry].base |= 0x80000000;
+ lp->cur_rx++;
+ }
+ break;
+ }
+ skb_reserve(skb,2); /* 16 byte align */
+ skb_put(skb,pkt_len); /* Make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
+ pkt_len);
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+ /* The docs say that the buffer length isn't touched, but Andrew Boyd
+ of QNX reports that some revs of the 79C965 clear it. */
+ lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
+ lp->rx_ring[entry].base |= 0x80000000;
+ entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+ }
+
+ /* We should check that at least two ring entries are free. If not,
+ we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+static int
+lance_close(struct net_device *dev)
+{
+ int ioaddr = dev->base_addr;
+ struct lance_private *lp = dev->ml_priv;
+
+ netif_stop_queue (dev);
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ outw(112, ioaddr+LANCE_ADDR);
+ dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ }
+ outw(0, ioaddr+LANCE_ADDR);
+
+ if (lance_debug > 1)
+ printk("%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, inw(ioaddr+LANCE_DATA));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ outw(0x0004, ioaddr+LANCE_DATA);
+
+ if (dev->dma != 4)
+ {
+ unsigned long flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ }
+ free_irq(dev->irq, dev);
+
+ lance_purge_ring(dev);
+
+ return 0;
+}
+
+static struct net_device_stats *lance_get_stats(struct net_device *dev)
+{
+ struct lance_private *lp = dev->ml_priv;
+
+ if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
+ short ioaddr = dev->base_addr;
+ short saved_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->devlock, flags);
+ saved_addr = inw(ioaddr+LANCE_ADDR);
+ outw(112, ioaddr+LANCE_ADDR);
+ dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
+ outw(saved_addr, ioaddr+LANCE_ADDR);
+ spin_unlock_irqrestore(&lp->devlock, flags);
+ }
+
+ return &dev->stats;
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ short ioaddr = dev->base_addr;
+
+ outw(0, ioaddr+LANCE_ADDR);
+ outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
+
+ if (dev->flags&IFF_PROMISC) {
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int i;
+ int num_addrs=netdev_mc_count(dev);
+ if(dev->flags&IFF_ALLMULTI)
+ num_addrs=1;
+ /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
+ memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
+ for (i = 0; i < 4; i++) {
+ outw(8 + i, ioaddr+LANCE_ADDR);
+ outw(multicast_table[i], ioaddr+LANCE_DATA);
+ }
+ outw(15, ioaddr+LANCE_ADDR);
+ outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
+ }
+
+ lance_restart(dev, 0x0142, 0); /* Resume normal operation */
+
+}
+
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
new file mode 100644
index 000000000..0660ac584
--- /dev/null
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -0,0 +1,200 @@
+/* mvme147.c : the Linux/mvme147/lance ethernet driver
+ *
+ * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
+ * Based on the Sun Lance driver and the NetBSD HP Lance driver
+ * Uses the generic 7990.c LANCE code.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+/* Used for the temporal inet entries and routing */
+#include <linux/socket.h>
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/mvme147hw.h>
+
+/* We have 32K of RAM for the init block and buffers. This places
+ * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
+ */
+#define LANCE_LOG_TX_BUFFERS 1
+#define LANCE_LOG_RX_BUFFERS 3
+
+#include "7990.h" /* use generic LANCE code */
+
+/* Our private data structure */
+struct m147lance_private {
+ struct lance_private lance;
+ unsigned long ram;
+};
+
+/* function prototypes... This is easy because all the grot is in the
+ * generic LANCE support. All we have to support is probing for boards,
+ * plus board-specific init, open and close actions.
+ * Oh, and we need to tell the generic code how to read and write LANCE registers...
+ */
+static int m147lance_open(struct net_device *dev);
+static int m147lance_close(struct net_device *dev);
+static void m147lance_writerap(struct lance_private *lp, unsigned short value);
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value);
+static unsigned short m147lance_readrdp(struct lance_private *lp);
+
+typedef void (*writerap_t)(void *, unsigned short);
+typedef void (*writerdp_t)(void *, unsigned short);
+typedef unsigned short (*readrdp_t)(void *);
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = m147lance_open,
+ .ndo_stop = m147lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+/* Initialise the one and only on-board 7990 */
+struct net_device * __init mvme147lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int called;
+ static const char name[] = "MVME147 LANCE";
+ struct m147lance_private *lp;
+ u_long *addr;
+ u_long address;
+ int err;
+
+ if (!MACH_IS_MVME147 || called)
+ return ERR_PTR(-ENODEV);
+ called++;
+
+ dev = alloc_etherdev(sizeof(struct m147lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0)
+ sprintf(dev->name, "eth%d", unit);
+
+ /* Fill the dev fields */
+ dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
+ dev->netdev_ops = &lance_netdev_ops;
+ dev->dma = 0;
+
+ addr = (u_long *)ETHERNET_ADDRESS;
+ address = *addr;
+ dev->dev_addr[0] = 0x08;
+ dev->dev_addr[1] = 0x00;
+ dev->dev_addr[2] = 0x3e;
+ address = address >> 8;
+ dev->dev_addr[5] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[4] = address&0xff;
+ address = address >> 8;
+ dev->dev_addr[3] = address&0xff;
+
+ printk("%s: MVME147 at 0x%08lx, irq %d, Hardware Address %pM\n",
+ dev->name, dev->base_addr, MVME147_LANCE_IRQ,
+ dev->dev_addr);
+
+ lp = netdev_priv(dev);
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
+ if (!lp->ram) {
+ printk("%s: No memory for LANCE buffers\n", dev->name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ lp->lance.name = name;
+ lp->lance.base = dev->base_addr;
+ lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
+ lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
+ lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
+ lp->lance.irq = MVME147_LANCE_IRQ;
+ lp->lance.writerap = (writerap_t)m147lance_writerap;
+ lp->lance.writerdp = (writerdp_t)m147lance_writerdp;
+ lp->lance.readrdp = (readrdp_t)m147lance_readrdp;
+ lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
+ lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
+ lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
+ lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
+
+ err = register_netdev(dev);
+ if (err) {
+ free_pages(lp->ram, 3);
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+
+static void m147lance_writerap(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RAP, value);
+}
+
+static void m147lance_writerdp(struct lance_private *lp, unsigned short value)
+{
+ out_be16(lp->base + LANCE_RDP, value);
+}
+
+static unsigned short m147lance_readrdp(struct lance_private *lp)
+{
+ return in_be16(lp->base + LANCE_RDP);
+}
+
+static int m147lance_open(struct net_device *dev)
+{
+ int status;
+
+ status = lance_open(dev); /* call generic lance open code */
+ if (status)
+ return status;
+ /* enable interrupts at board level. */
+ m147_pcc->lan_cntrl = 0; /* clear the interrupts (if any) */
+ m147_pcc->lan_cntrl = 0x08 | 0x04; /* Enable irq 4 */
+
+ return 0;
+}
+
+static int m147lance_close(struct net_device *dev)
+{
+ /* disable interrupts at boardlevel */
+ m147_pcc->lan_cntrl = 0x0; /* disable interrupts */
+ lance_close(dev);
+ return 0;
+}
+
+#ifdef MODULE
+MODULE_LICENSE("GPL");
+
+static struct net_device *dev_mvme147_lance;
+int __init init_module(void)
+{
+ dev_mvme147_lance = mvme147lance_probe(-1);
+ return PTR_ERR_OR_ZERO(dev_mvme147_lance);
+}
+
+void __exit cleanup_module(void)
+{
+ struct m147lance_private *lp = netdev_priv(dev_mvme147_lance);
+ unregister_netdev(dev_mvme147_lance);
+ free_pages(lp->ram, 3);
+ free_netdev(dev_mvme147_lance);
+}
+
+#endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
new file mode 100644
index 000000000..1cf33addd
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -0,0 +1,1252 @@
+/*
+ * ni6510 (am7990 'lance' chip) driver for Linux-net-3
+ * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
+ * copyrights (c) 1994,1995,1996 by M.Hipp
+ *
+ * This driver can handle the old ni6510 board and the newer ni6510
+ * EtherBlaster. (probably it also works with every full NE2100
+ * compatible card)
+ *
+ * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
+ *
+ * This is an extension to the Linux operating system, and is covered by the
+ * same GNU General Public License that covers the Linux-kernel.
+ *
+ * comments/bugs/suggestions can be sent to:
+ * Michael Hipp
+ * email: hippm@informatik.uni-tuebingen.de
+ *
+ * sources:
+ * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
+ * and from the original drivers by D.Becker
+ *
+ * known problems:
+ * - on some PCI boards (including my own) the card/board/ISA-bridge has
+ * problems with bus master DMA. This results in lotsa overruns.
+ * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
+ * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
+ * Or just play with your BIOS options to optimize ISA-DMA access.
+ * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
+ * defines -> please report me your experience then
+ * - Harald reported for ASUS SP3G mainboards, that you should use
+ * the 'optimal settings' from the user's manual on page 3-12!
+ *
+ * credits:
+ * thanx to Jason Sullivan for sending me a ni6510 card!
+ * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
+ *
+ * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
+ * average: FTP -> 8384421 bytes received in 8.5 seconds
+ * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
+ * peak: FTP -> 8384421 bytes received in 7.5 seconds
+ * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
+ */
+
+/*
+ * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
+ * 96.Sept.29: virt_to_bus stuff added for new memory modell
+ * 96.April.29: Added Harald Koenig's Patches (MH)
+ * 96.April.13: enhanced error handling .. more tests (MH)
+ * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
+ * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
+ * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
+ * hopefully no more 16MB limit
+ *
+ * 95.Nov.18: multicast tweaked (AC).
+ *
+ * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
+ *
+ * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/bitops.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include "ni65.h"
+
+/*
+ * the current setting allows an acceptable performance
+ * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
+ * the header of this file
+ * 'invert' the defines for max. performance. This may cause DMA problems
+ * on some boards (e.g on my ASUS SP3G)
+ */
+#undef XMT_VIA_SKB
+#undef RCV_VIA_SKB
+#define RCV_PARANOIA_CHECK
+
+#define MID_PERFORMANCE
+
+#if defined( LOW_PERFORMANCE )
+ static int isa0=7,isa1=7,csr80=0x0c10;
+#elif defined( MID_PERFORMANCE )
+ static int isa0=5,isa1=5,csr80=0x2810;
+#else /* high performance */
+ static int isa0=4,isa1=4,csr80=0x0017;
+#endif
+
+/*
+ * a few card/vendor specific defines
+ */
+#define NI65_ID0 0x00
+#define NI65_ID1 0x55
+#define NI65_EB_ID0 0x52
+#define NI65_EB_ID1 0x44
+#define NE2100_ID0 0x57
+#define NE2100_ID1 0x57
+
+#define PORT p->cmdr_addr
+
+/*
+ * buffer configuration
+ */
+#if 1
+#define RMDNUM 16
+#define RMDNUMMASK 0x80000000
+#else
+#define RMDNUM 8
+#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
+#endif
+
+#if 0
+#define TMDNUM 1
+#define TMDNUMMASK 0x00000000
+#else
+#define TMDNUM 4
+#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
+#endif
+
+/* slightly oversized */
+#define R_BUF_SIZE 1544
+#define T_BUF_SIZE 1544
+
+/*
+ * lance register defines
+ */
+#define L_DATAREG 0x00
+#define L_ADDRREG 0x02
+#define L_RESET 0x04
+#define L_CONFIG 0x05
+#define L_BUSIF 0x06
+
+/*
+ * to access the lance/am7990-regs, you have to write
+ * reg-number into L_ADDRREG, then you can access it using L_DATAREG
+ */
+#define CSR0 0x00
+#define CSR1 0x01
+#define CSR2 0x02
+#define CSR3 0x03
+
+#define INIT_RING_BEFORE_START 0x1
+#define FULL_RESET_ON_ERROR 0x2
+
+#if 0
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
+ outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
+ inw(PORT+L_DATAREG))
+#if 0
+#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
+#else
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+#else
+#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
+#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
+#define writedatareg(val) { writereg(val,CSR0); }
+#endif
+
+static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
+
+static struct card {
+ unsigned char id0,id1;
+ short id_offset;
+ short total_size;
+ short cmd_offset;
+ short addr_offset;
+ unsigned char *vendor_id;
+ char *cardname;
+ unsigned long config;
+} cards[] = {
+ {
+ .id0 = NI65_ID0,
+ .id1 = NI65_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x10,
+ .cmd_offset = 0x0,
+ .addr_offset = 0x8,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510",
+ .config = 0x1,
+ },
+ {
+ .id0 = NI65_EB_ID0,
+ .id1 = NI65_EB_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = ni_vendor,
+ .cardname = "ni6510 EtherBlaster",
+ .config = 0x2,
+ },
+ {
+ .id0 = NE2100_ID0,
+ .id1 = NE2100_ID1,
+ .id_offset = 0x0e,
+ .total_size = 0x18,
+ .cmd_offset = 0x10,
+ .addr_offset = 0x0,
+ .vendor_id = NULL,
+ .cardname = "generic NE2100",
+ .config = 0x0,
+ },
+};
+#define NUM_CARDS 3
+
+struct priv
+{
+ struct rmd rmdhead[RMDNUM];
+ struct tmd tmdhead[TMDNUM];
+ struct init_block ib;
+ int rmdnum;
+ int tmdnum,tmdlast;
+#ifdef RCV_VIA_SKB
+ struct sk_buff *recv_skb[RMDNUM];
+#else
+ void *recvbounce[RMDNUM];
+#endif
+#ifdef XMT_VIA_SKB
+ struct sk_buff *tmd_skb[TMDNUM];
+#endif
+ void *tmdbounce[TMDNUM];
+ int tmdbouncenum;
+ int lock,xmit_queued;
+
+ void *self;
+ int cmdr_addr;
+ int cardno;
+ int features;
+ spinlock_t ring_lock;
+};
+
+static int ni65_probe1(struct net_device *dev,int);
+static irqreturn_t ni65_interrupt(int irq, void * dev_id);
+static void ni65_recv_intr(struct net_device *dev,int);
+static void ni65_xmit_intr(struct net_device *dev,int);
+static int ni65_open(struct net_device *dev);
+static int ni65_lance_reinit(struct net_device *dev);
+static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
+static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
+static void ni65_timeout(struct net_device *dev);
+static int ni65_close(struct net_device *dev);
+static int ni65_alloc_buffer(struct net_device *dev);
+static void ni65_free_buffer(struct priv *p);
+static void set_multicast_list(struct net_device *dev);
+
+static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
+static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
+
+static int debuglevel = 1;
+
+/*
+ * set 'performance' registers .. we must STOP lance for that
+ */
+static void ni65_set_performance(struct priv *p)
+{
+ writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
+
+ if( !(cards[p->cardno].config & 0x02) )
+ return;
+
+ outw(80,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) != 80)
+ return;
+
+ writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
+ outw(0,PORT+L_ADDRREG);
+ outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
+ outw(1,PORT+L_ADDRREG);
+ outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
+
+ outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
+}
+
+/*
+ * open interface (up)
+ */
+static int ni65_open(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+ int irqval = request_irq(dev->irq, ni65_interrupt,0,
+ cards[p->cardno].cardname,dev);
+ if (irqval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name,dev->irq, irqval);
+ return -EAGAIN;
+ }
+
+ if(ni65_lance_reinit(dev))
+ {
+ netif_start_queue(dev);
+ return 0;
+ }
+ else
+ {
+ free_irq(dev->irq,dev);
+ return -EAGAIN;
+ }
+}
+
+/*
+ * close interface (down)
+ */
+static int ni65_close(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+
+ netif_stop_queue(dev);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
+
+#ifdef XMT_VIA_SKB
+ {
+ int i;
+ for(i=0;i<TMDNUM;i++)
+ {
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+ }
+ }
+#endif
+ free_irq(dev->irq,dev);
+ return 0;
+}
+
+static void cleanup_card(struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+ disable_dma(dev->dma);
+ free_dma(dev->dma);
+ release_region(dev->base_addr, cards[p->cardno].total_size);
+ ni65_free_buffer(p);
+}
+
+/* set: io,irq,dma or set it when calling insmod */
+static int irq;
+static int io;
+static int dma;
+
+/*
+ * Probe The Card (not the lance-chip)
+ */
+struct net_device * __init ni65_probe(int unit)
+{
+ struct net_device *dev = alloc_etherdev(0);
+ static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
+ const int *port;
+ int err = 0;
+
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ irq = dev->irq;
+ dma = dev->dma;
+ } else {
+ dev->base_addr = io;
+ }
+
+ if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
+ err = ni65_probe1(dev, dev->base_addr);
+ } else if (dev->base_addr > 0) { /* Don't probe at all. */
+ err = -ENXIO;
+ } else {
+ for (port = ports; *port && ni65_probe1(dev, *port); port++)
+ ;
+ if (!*port)
+ err = -ENODEV;
+ }
+ if (err)
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ return dev;
+out1:
+ cleanup_card(dev);
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops ni65_netdev_ops = {
+ .ndo_open = ni65_open,
+ .ndo_stop = ni65_close,
+ .ndo_start_xmit = ni65_send_packet,
+ .ndo_tx_timeout = ni65_timeout,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * this is the real card probe ..
+ */
+static int __init ni65_probe1(struct net_device *dev,int ioaddr)
+{
+ int i,j;
+ struct priv *p;
+ unsigned long flags;
+
+ dev->irq = irq;
+ dev->dma = dma;
+
+ for(i=0;i<NUM_CARDS;i++) {
+ if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
+ continue;
+ if(cards[i].id_offset >= 0) {
+ if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
+ inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ if(cards[i].vendor_id) {
+ for(j=0;j<3;j++)
+ if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
+ release_region(ioaddr, cards[i].total_size);
+ continue;
+ }
+ }
+ break;
+ }
+ if(i == NUM_CARDS)
+ return -ENODEV;
+
+ for(j=0;j<6;j++)
+ dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
+
+ if( (j=ni65_alloc_buffer(dev)) < 0) {
+ release_region(ioaddr, cards[i].total_size);
+ return j;
+ }
+ p = dev->ml_priv;
+ p->cmdr_addr = ioaddr + cards[i].cmd_offset;
+ p->cardno = i;
+ spin_lock_init(&p->ring_lock);
+
+ printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (j=readreg(CSR0)) != 0x4) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ outw(88,PORT+L_ADDRREG);
+ if(inw(PORT+L_ADDRREG) == 88) {
+ unsigned long v;
+ v = inw(PORT+L_DATAREG);
+ v <<= 16;
+ outw(89,PORT+L_ADDRREG);
+ v |= inw(PORT+L_DATAREG);
+ printk("Version %#08lx, ",v);
+ p->features = INIT_RING_BEFORE_START;
+ }
+ else {
+ printk("ancient LANCE, ");
+ p->features = 0x0;
+ }
+
+ if(test_bit(0,&cards[i].config)) {
+ dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
+ dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
+ printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
+ }
+ else {
+ if(dev->dma == 0) {
+ /* 'stuck test' from lance.c */
+ unsigned long dma_channels =
+ ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
+ | (inb(DMA2_STAT_REG) & 0xf0);
+ for(i=1;i<5;i++) {
+ int dma = dmatab[i];
+ if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
+ continue;
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ set_dma_mode(dma,DMA_MODE_CASCADE);
+ enable_dma(dma);
+ release_dma_lock(flags);
+
+ ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
+
+ flags=claim_dma_lock();
+ disable_dma(dma);
+ free_dma(dma);
+ release_dma_lock(flags);
+
+ if(readreg(CSR0) & CSR0_IDON)
+ break;
+ }
+ if(i == 5) {
+ printk("failed.\n");
+ printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ dev->dma = dmatab[i];
+ printk("DMA %d (autodetected), ",dev->dma);
+ }
+ else
+ printk("DMA %d (assigned), ",dev->dma);
+
+ if(dev->irq < 2)
+ {
+ unsigned long irq_mask;
+
+ ni65_init_lance(p,dev->dev_addr,0,0);
+ irq_mask = probe_irq_on();
+ writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
+ msleep(20);
+ dev->irq = probe_irq_off(irq_mask);
+ if(!dev->irq)
+ {
+ printk("Failed to detect IRQ line!\n");
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+ printk("IRQ %d (autodetected).\n",dev->irq);
+ }
+ else
+ printk("IRQ %d (assigned).\n",dev->irq);
+ }
+
+ if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
+ {
+ printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
+ ni65_free_buffer(p);
+ release_region(ioaddr, cards[p->cardno].total_size);
+ return -EAGAIN;
+ }
+
+ dev->base_addr = ioaddr;
+ dev->netdev_ops = &ni65_netdev_ops;
+ dev->watchdog_timeo = HZ/2;
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * set lance register and trigger init
+ */
+static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
+{
+ int i;
+ u32 pib;
+
+ writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
+
+ for(i=0;i<6;i++)
+ p->ib.eaddr[i] = daddr[i];
+
+ for(i=0;i<8;i++)
+ p->ib.filter[i] = filter;
+ p->ib.mode = mode;
+
+ p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
+ p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
+ writereg(0,CSR3); /* busmaster/no word-swap */
+ pib = (u32) isa_virt_to_bus(&p->ib);
+ writereg(pib & 0xffff,CSR1);
+ writereg(pib >> 16,CSR2);
+
+ writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
+
+ for(i=0;i<32;i++)
+ {
+ mdelay(4);
+ if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
+ break; /* init ok ? */
+ }
+}
+
+/*
+ * allocate memory area and check the 16MB border
+ */
+static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
+{
+ struct sk_buff *skb=NULL;
+ unsigned char *ptr;
+ void *ret;
+
+ if(type) {
+ ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
+ if(!skb) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
+ return NULL;
+ }
+ skb_reserve(skb,2+16);
+ skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
+ ptr = skb->data;
+ }
+ else {
+ ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
+ if(!ret)
+ return NULL;
+ }
+ if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
+ printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
+ if(type)
+ kfree_skb(skb);
+ else
+ kfree(ptr);
+ return NULL;
+ }
+ return ret;
+}
+
+/*
+ * allocate all memory structures .. send/recv buffers etc ...
+ */
+static int ni65_alloc_buffer(struct net_device *dev)
+{
+ unsigned char *ptr;
+ struct priv *p;
+ int i;
+
+ /*
+ * we need 8-aligned memory ..
+ */
+ ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
+ if(!ptr)
+ return -ENOMEM;
+
+ p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
+ memset((char *)p, 0, sizeof(struct priv));
+ p->self = ptr;
+
+ for(i=0;i<TMDNUM;i++)
+ {
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = NULL;
+#endif
+ p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
+ if(!p->tmdbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
+ if(!p->recv_skb[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#else
+ p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
+ if(!p->recvbounce[i]) {
+ ni65_free_buffer(p);
+ return -ENOMEM;
+ }
+#endif
+ }
+
+ return 0; /* everything is OK */
+}
+
+/*
+ * free buffers and private struct
+ */
+static void ni65_free_buffer(struct priv *p)
+{
+ int i;
+
+ if(!p)
+ return;
+
+ for(i=0;i<TMDNUM;i++) {
+ kfree(p->tmdbounce[i]);
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i])
+ dev_kfree_skb(p->tmd_skb[i]);
+#endif
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+#ifdef RCV_VIA_SKB
+ if(p->recv_skb[i])
+ dev_kfree_skb(p->recv_skb[i]);
+#else
+ kfree(p->recvbounce[i]);
+#endif
+ }
+ kfree(p->self);
+}
+
+
+/*
+ * stop and (re)start lance .. e.g after an error
+ */
+static void ni65_stop_start(struct net_device *dev,struct priv *p)
+{
+ int csr0 = CSR0_INEA;
+
+ writedatareg(CSR0_STOP);
+
+ if(debuglevel > 1)
+ printk(KERN_DEBUG "ni65_stop_start\n");
+
+ if(p->features & INIT_RING_BEFORE_START) {
+ int i;
+#ifdef XMT_VIA_SKB
+ struct sk_buff *skb_save[TMDNUM];
+#endif
+ unsigned long buffer[TMDNUM];
+ short blen[TMDNUM];
+
+ if(p->xmit_queued) {
+ while(1) {
+ if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
+ break;
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ break;
+ }
+ }
+
+ for(i=0;i<TMDNUM;i++) {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ skb_save[i] = p->tmd_skb[i];
+#endif
+ buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
+ blen[i] = tmdp->blen;
+ tmdp->u.s.status = 0x0;
+ }
+
+ for(i=0;i<RMDNUM;i++) {
+ struct rmd *rmdp = p->rmdhead + i;
+ rmdp->u.s.status = RCV_OWN;
+ }
+ p->tmdnum = p->xmit_queued = 0;
+ writedatareg(CSR0_STRT | csr0);
+
+ for(i=0;i<TMDNUM;i++) {
+ int num = (i + p->tmdlast) & (TMDNUM-1);
+ p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
+ p->tmdhead[i].blen = blen[num];
+ if(p->tmdhead[i].u.s.status & XMIT_OWN) {
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+ p->xmit_queued = 1;
+ writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
+ }
+#ifdef XMT_VIA_SKB
+ p->tmd_skb[i] = skb_save[num];
+#endif
+ }
+ p->rmdnum = p->tmdlast = 0;
+ if(!p->lock)
+ if (p->tmdnum || !p->xmit_queued)
+ netif_wake_queue(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ }
+ else
+ writedatareg(CSR0_STRT | csr0);
+}
+
+/*
+ * init lance (write init-values .. init-buffers) (open-helper)
+ */
+static int ni65_lance_reinit(struct net_device *dev)
+{
+ int i;
+ struct priv *p = dev->ml_priv;
+ unsigned long flags;
+
+ p->lock = 0;
+ p->xmit_queued = 0;
+
+ flags=claim_dma_lock();
+ disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
+ set_dma_mode(dev->dma,DMA_MODE_CASCADE);
+ enable_dma(dev->dma);
+ release_dma_lock(flags);
+
+ outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
+ if( (i=readreg(CSR0) ) != 0x4)
+ {
+ printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
+ cards[p->cardno].cardname,(int) i);
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0;
+ }
+
+ p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
+ for(i=0;i<TMDNUM;i++)
+ {
+ struct tmd *tmdp = p->tmdhead + i;
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[i]) {
+ dev_kfree_skb(p->tmd_skb[i]);
+ p->tmd_skb[i] = NULL;
+ }
+#endif
+ tmdp->u.buffer = 0x0;
+ tmdp->u.s.status = XMIT_START | XMIT_END;
+ tmdp->blen = tmdp->status2 = 0;
+ }
+
+ for(i=0;i<RMDNUM;i++)
+ {
+ struct rmd *rmdp = p->rmdhead + i;
+#ifdef RCV_VIA_SKB
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
+#else
+ rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
+#endif
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN;
+ }
+
+ if(dev->flags & IFF_PROMISC)
+ ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
+ ni65_init_lance(p,dev->dev_addr,0xff,0x0);
+ else
+ ni65_init_lance(p,dev->dev_addr,0x00,0x00);
+
+ /*
+ * ni65_set_lance_mem() sets L_ADDRREG to CSR0
+ * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
+ */
+
+ if(inw(PORT+L_DATAREG) & CSR0_IDON) {
+ ni65_set_performance(p);
+ /* init OK: start lance , enable interrupts */
+ writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
+ return 1; /* ->OK */
+ }
+ printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
+ flags=claim_dma_lock();
+ disable_dma(dev->dma);
+ release_dma_lock(flags);
+ return 0; /* ->Error */
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t ni65_interrupt(int irq, void * dev_id)
+{
+ int csr0 = 0;
+ struct net_device *dev = dev_id;
+ struct priv *p;
+ int bcnt = 32;
+
+ p = dev->ml_priv;
+
+ spin_lock(&p->ring_lock);
+
+ while(--bcnt) {
+ csr0 = inw(PORT+L_DATAREG);
+
+#if 0
+ writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
+#else
+ writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
+#endif
+
+ if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
+ break;
+
+ if(csr0 & CSR0_RINT) /* RECV-int? */
+ ni65_recv_intr(dev,csr0);
+ if(csr0 & CSR0_TINT) /* XMIT-int? */
+ ni65_xmit_intr(dev,csr0);
+
+ if(csr0 & CSR0_ERR)
+ {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
+ if(csr0 & CSR0_BABL)
+ dev->stats.tx_errors++;
+ if(csr0 & CSR0_MISS) {
+ int i;
+ for(i=0;i<RMDNUM;i++)
+ printk("%02x ",p->rmdhead[i].u.s.status);
+ printk("\n");
+ dev->stats.rx_errors++;
+ }
+ if(csr0 & CSR0_MERR) {
+ if(debuglevel > 1)
+ printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
+ ni65_stop_start(dev,p);
+ }
+ }
+ }
+
+#ifdef RCV_PARANOIA_CHECK
+{
+ int j;
+ for(j=0;j<RMDNUM;j++)
+ {
+ int i, num2;
+ for(i=RMDNUM-1;i>0;i--) {
+ num2 = (p->rmdnum + i) & (RMDNUM-1);
+ if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
+ break;
+ }
+
+ if(i) {
+ int k, num1;
+ for(k=0;k<RMDNUM;k++) {
+ num1 = (p->rmdnum + k) & (RMDNUM-1);
+ if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
+ break;
+ }
+ if(!k)
+ break;
+
+ if(debuglevel > 0)
+ {
+ char buf[256],*buf1;
+ buf1 = buf;
+ for(k=0;k<RMDNUM;k++) {
+ sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
+ buf1 += 3;
+ }
+ *buf1 = 0;
+ printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
+ }
+
+ p->rmdnum = num1;
+ ni65_recv_intr(dev,csr0);
+ if((p->rmdhead[num2].u.s.status & RCV_OWN))
+ break; /* ok, we are 'in sync' again */
+ }
+ else
+ break;
+ }
+}
+#endif
+
+ if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
+ printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
+ ni65_stop_start(dev,p);
+ }
+ else
+ writedatareg(CSR0_INEA);
+
+ spin_unlock(&p->ring_lock);
+ return IRQ_HANDLED;
+}
+
+/*
+ * We have received an Xmit-Interrupt ..
+ * send a new packet if necessary
+ */
+static void ni65_xmit_intr(struct net_device *dev,int csr0)
+{
+ struct priv *p = dev->ml_priv;
+
+ while(p->xmit_queued)
+ {
+ struct tmd *tmdp = p->tmdhead + p->tmdlast;
+ int tmdstat = tmdp->u.s.status;
+
+ if(tmdstat & XMIT_OWN)
+ break;
+
+ if(tmdstat & XMIT_ERR)
+ {
+#if 0
+ if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
+ printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
+#endif
+ /* checking some errors */
+ if(tmdp->status2 & XMIT_RTRY)
+ dev->stats.tx_aborted_errors++;
+ if(tmdp->status2 & XMIT_LCAR)
+ dev->stats.tx_carrier_errors++;
+ if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
+ /* this stops the xmitter */
+ dev->stats.tx_fifo_errors++;
+ if(debuglevel > 0)
+ printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
+ if(p->features & INIT_RING_BEFORE_START) {
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
+ ni65_stop_start(dev,p);
+ break; /* no more Xmit processing .. */
+ }
+ else
+ ni65_stop_start(dev,p);
+ }
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
+ if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
+ dev->stats.tx_errors++;
+ tmdp->status2 = 0;
+ }
+ else {
+ dev->stats.tx_bytes -= (short)(tmdp->blen);
+ dev->stats.tx_packets++;
+ }
+
+#ifdef XMT_VIA_SKB
+ if(p->tmd_skb[p->tmdlast]) {
+ dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
+ p->tmd_skb[p->tmdlast] = NULL;
+ }
+#endif
+
+ p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
+ if(p->tmdlast == p->tmdnum)
+ p->xmit_queued = 0;
+ }
+ netif_wake_queue(dev);
+}
+
+/*
+ * We have received a packet
+ */
+static void ni65_recv_intr(struct net_device *dev,int csr0)
+{
+ struct rmd *rmdp;
+ int rmdstat,len;
+ int cnt=0;
+ struct priv *p = dev->ml_priv;
+
+ rmdp = p->rmdhead + p->rmdnum;
+ while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
+ {
+ cnt++;
+ if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
+ {
+ if(!(rmdstat & RCV_ERR)) {
+ if(rmdstat & RCV_START)
+ {
+ dev->stats.rx_length_errors++;
+ printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
+ }
+ }
+ else {
+ if(debuglevel > 2)
+ printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
+ dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
+ if(rmdstat & RCV_FRAM)
+ dev->stats.rx_frame_errors++;
+ if(rmdstat & RCV_OFLO)
+ dev->stats.rx_over_errors++;
+ if(rmdstat & RCV_CRC)
+ dev->stats.rx_crc_errors++;
+ if(rmdstat & RCV_BUF_ERR)
+ dev->stats.rx_fifo_errors++;
+ }
+ if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
+ dev->stats.rx_errors++;
+ }
+ else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
+ {
+#ifdef RCV_VIA_SKB
+ struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
+ if (skb)
+ skb_reserve(skb,16);
+#else
+ struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
+#endif
+ if(skb)
+ {
+ skb_reserve(skb,2);
+#ifdef RCV_VIA_SKB
+ if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
+ skb_put(skb,len);
+ skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
+ }
+ else {
+ struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
+ skb_put(skb,R_BUF_SIZE);
+ p->recv_skb[p->rmdnum] = skb;
+ rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ skb = skb1;
+ skb_trim(skb,len);
+ }
+#else
+ skb_put(skb,len);
+ skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
+#endif
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ skb->protocol=eth_type_trans(skb,dev);
+ netif_rx(skb);
+ }
+ else
+ {
+ printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
+ dev->stats.rx_dropped++;
+ }
+ }
+ else {
+ printk(KERN_INFO "%s: received runt packet\n",dev->name);
+ dev->stats.rx_errors++;
+ }
+ rmdp->blen = -(R_BUF_SIZE-8);
+ rmdp->mlen = 0;
+ rmdp->u.s.status = RCV_OWN; /* change owner */
+ p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
+ rmdp = p->rmdhead + p->rmdnum;
+ }
+}
+
+/*
+ * kick xmitter ..
+ */
+
+static void ni65_timeout(struct net_device *dev)
+{
+ int i;
+ struct priv *p = dev->ml_priv;
+
+ printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
+ for(i=0;i<TMDNUM;i++)
+ printk("%02x ",p->tmdhead[i].u.s.status);
+ printk("\n");
+ ni65_lance_reinit(dev);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+/*
+ * Send a packet
+ */
+
+static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct priv *p = dev->ml_priv;
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit(0, (void*)&p->lock)) {
+ printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
+ return NETDEV_TX_BUSY;
+ }
+
+ {
+ short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
+ struct tmd *tmdp;
+ unsigned long flags;
+
+#ifdef XMT_VIA_SKB
+ if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
+#endif
+
+ skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
+ skb->len > T_BUF_SIZE ? T_BUF_SIZE :
+ skb->len);
+ if (len > skb->len)
+ memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
+ dev_kfree_skb (skb);
+
+ spin_lock_irqsave(&p->ring_lock, flags);
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
+ p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
+
+#ifdef XMT_VIA_SKB
+ }
+ else {
+ spin_lock_irqsave(&p->ring_lock, flags);
+
+ tmdp = p->tmdhead + p->tmdnum;
+ tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
+ p->tmd_skb[p->tmdnum] = skb;
+ }
+#endif
+ tmdp->blen = -len;
+
+ tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
+ writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
+
+ p->xmit_queued = 1;
+ p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
+
+ if(p->tmdnum != p->tmdlast)
+ netif_wake_queue(dev);
+
+ p->lock = 0;
+
+ spin_unlock_irqrestore(&p->ring_lock, flags);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void set_multicast_list(struct net_device *dev)
+{
+ if(!ni65_lance_reinit(dev))
+ printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
+ netif_wake_queue(dev);
+}
+
+#ifdef MODULE
+static struct net_device *dev_ni65;
+
+module_param(irq, int, 0);
+module_param(io, int, 0);
+module_param(dma, int, 0);
+MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
+MODULE_PARM_DESC(io, "ni6510 I/O base address");
+MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
+
+int __init init_module(void)
+{
+ dev_ni65 = ni65_probe(-1);
+ return PTR_ERR_OR_ZERO(dev_ni65);
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(dev_ni65);
+ cleanup_card(dev_ni65);
+ free_netdev(dev_ni65);
+}
+#endif /* MODULE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
new file mode 100644
index 000000000..e6217e35e
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.h
@@ -0,0 +1,121 @@
+/* am7990 (lance) definitions
+ *
+ * This is an extension to the Linux operating system, and is covered by
+ * same GNU General Public License that covers that work.
+ *
+ * Michael Hipp
+ * email: mhipp@student.uni-tuebingen.de
+ *
+ * sources: (mail me or ask archie if you need them)
+ * crynwr-packet-driver
+ */
+
+/*
+ * Control and Status Register 0 (CSR0) bit definitions
+ * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
+ *
+ */
+
+#define CSR0_ERR 0x8000 /* Error summary (R) */
+#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
+#define CSR0_CERR 0x2000 /* Collision Error (RC) */
+#define CSR0_MISS 0x1000 /* Missed packet (RC) */
+#define CSR0_MERR 0x0800 /* Memory Error (RC) */
+#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
+#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
+#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
+#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
+#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
+#define CSR0_RXON 0x0020 /* Receiver on (R) */
+#define CSR0_TXON 0x0010 /* Transmitter on (R) */
+#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
+#define CSR0_STOP 0x0004 /* Stop (RS) */
+#define CSR0_STRT 0x0002 /* Start (RS) */
+#define CSR0_INIT 0x0001 /* Initialize (RS) */
+
+#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
+/*
+ * Initialization Block Mode operation Bit Definitions.
+ */
+
+#define M_PROM 0x8000 /* Promiscuous Mode */
+#define M_INTL 0x0040 /* Internal Loopback */
+#define M_DRTY 0x0020 /* Disable Retry */
+#define M_COLL 0x0010 /* Force Collision */
+#define M_DTCR 0x0008 /* Disable Transmit CRC) */
+#define M_LOOP 0x0004 /* Loopback */
+#define M_DTX 0x0002 /* Disable the Transmitter */
+#define M_DRX 0x0001 /* Disable the Receiver */
+
+
+/*
+ * Receive message descriptor bit definitions.
+ */
+
+#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define RCV_ERR 0x40 /* Error Summary */
+#define RCV_FRAM 0x20 /* Framing Error */
+#define RCV_OFLO 0x10 /* Overflow Error */
+#define RCV_CRC 0x08 /* CRC Error */
+#define RCV_BUF_ERR 0x04 /* Buffer Error */
+#define RCV_START 0x02 /* Start of Packet */
+#define RCV_END 0x01 /* End of Packet */
+
+
+/*
+ * Transmit message descriptor bit definitions.
+ */
+
+#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
+#define XMIT_ERR 0x40 /* Error Summary */
+#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
+#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
+#define XMIT_DEF 0x04 /* Deferred */
+#define XMIT_START 0x02 /* Start of Packet */
+#define XMIT_END 0x01 /* End of Packet */
+
+/*
+ * transmit status (2) (valid if XMIT_ERR == 1)
+ */
+
+#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
+#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
+#define XMIT_LCAR 0x0800 /* Loss of Carrier */
+#define XMIT_LCOL 0x1000 /* Late collision */
+#define XMIT_RESERV 0x2000 /* Reserved */
+#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
+#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
+
+struct init_block {
+ unsigned short mode;
+ unsigned char eaddr[6];
+ unsigned char filter[8];
+ /* bit 29-31: number of rmd's (power of 2) */
+ u32 rrp; /* receive ring pointer (align 8) */
+ /* bit 29-31: number of tmd's (power of 2) */
+ u32 trp; /* transmit ring pointer (align 8) */
+};
+
+struct rmd { /* Receive Message Descriptor */
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile short blen;
+ volatile unsigned short mlen;
+};
+
+struct tmd {
+ union {
+ volatile u32 buffer;
+ struct {
+ volatile unsigned char dummy[3];
+ volatile unsigned char status;
+ } s;
+ } u;
+ volatile unsigned short blen;
+ volatile unsigned short status2;
+};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
new file mode 100644
index 000000000..27245efe9
--- /dev/null
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -0,0 +1,1512 @@
+/* ----------------------------------------------------------------------------
+Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
+ nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
+
+ The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
+ Access Controller for Ethernet (MACE). It is essentially the Am2150
+ PCMCIA Ethernet card contained in the Am2150 Demo Kit.
+
+Written by Roger C. Pao <rpao@paonet.org>
+ Copyright 1995 Roger C. Pao
+ Linux 2.5 cleanups Copyright Red Hat 2003
+
+ This software may be used and distributed according to the terms of
+ the GNU General Public License.
+
+Ported to Linux 1.3.* network driver environment by
+ Matti Aarnio <mea@utu.fi>
+
+References
+
+ Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
+ Am79C940 (MACE) Data Sheet, 1994
+ Am79C90 (C-LANCE) Data Sheet, 1994
+ Linux PCMCIA Programmer's Guide v1.17
+ /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
+
+ Eric Mears, New Media Corporation
+ Tom Pollard, New Media Corporation
+ Dean Siasoyco, New Media Corporation
+ Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
+ Donald Becker <becker@scyld.com>
+ David Hinds <dahinds@users.sourceforge.net>
+
+ The Linux client driver is based on the 3c589_cs.c client driver by
+ David Hinds.
+
+ The Linux network driver outline is based on the 3c589_cs.c driver,
+ the 8390.c driver, and the example skeleton.c kernel code, which are
+ by Donald Becker.
+
+ The Am2150 network driver hardware interface code is based on the
+ OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
+
+ Special thanks for testing and help in debugging this driver goes
+ to Ken Lesniak.
+
+-------------------------------------------------------------------------------
+Driver Notes and Issues
+-------------------------------------------------------------------------------
+
+1. Developed on a Dell 320SLi
+ PCMCIA Card Services 2.6.2
+ Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
+
+2. rc.pcmcia may require loading pcmcia_core with io_speed=300:
+ 'insmod pcmcia_core.o io_speed=300'.
+ This will avoid problems with fast systems which causes rx_framecnt
+ to return random values.
+
+3. If hot extraction does not work for you, use 'ifconfig eth0 down'
+ before extraction.
+
+4. There is a bad slow-down problem in this driver.
+
+5. Future: Multicast processing. In the meantime, do _not_ compile your
+ kernel with multicast ip enabled.
+
+-------------------------------------------------------------------------------
+History
+-------------------------------------------------------------------------------
+Log: nmclan_cs.c,v
+ * 2.5.75-ac1 2003/07/11 Alan Cox <alan@lxorguk.ukuu.org.uk>
+ * Fixed hang on card eject as we probe it
+ * Cleaned up to use new style locking.
+ *
+ * Revision 0.16 1995/07/01 06:42:17 rpao
+ * Bug fix: nmclan_reset() called CardServices incorrectly.
+ *
+ * Revision 0.15 1995/05/24 08:09:47 rpao
+ * Re-implement MULTI_TX dev->tbusy handling.
+ *
+ * Revision 0.14 1995/05/23 03:19:30 rpao
+ * Added, in nmclan_config(), "tuple.Attributes = 0;".
+ * Modified MACE ID check to ignore chip revision level.
+ * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
+ *
+ * Revision 0.13 1995/05/18 05:56:34 rpao
+ * Statistics changes.
+ * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
+ * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
+ *
+ * Revision 0.12 1995/05/14 00:12:23 rpao
+ * Statistics overhaul.
+ *
+
+95/05/13 rpao V0.10a
+ Bug fix: MACE statistics counters used wrong I/O ports.
+ Bug fix: mace_interrupt() needed to allow statistics to be
+ processed without RX or TX interrupts pending.
+95/05/11 rpao V0.10
+ Multiple transmit request processing.
+ Modified statistics to use MACE counters where possible.
+95/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
+ *Released
+95/05/10 rpao V0.08
+ Bug fix: Make all non-exported functions private by using
+ static keyword.
+ Bug fix: Test IntrCnt _before_ reading MACE_IR.
+95/05/10 rpao V0.07 Statistics.
+95/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
+
+---------------------------------------------------------------------------- */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "nmclan_cs"
+#define DRV_VERSION "0.16"
+
+
+/* ----------------------------------------------------------------------------
+Conditional Compilation Options
+---------------------------------------------------------------------------- */
+
+#define MULTI_TX 0
+#define RESET_ON_TIMEOUT 1
+#define TX_INTERRUPTABLE 1
+#define RESET_XILINX 0
+
+/* ----------------------------------------------------------------------------
+Include Files
+---------------------------------------------------------------------------- */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ioport.h>
+#include <linux/bitops.h>
+
+#include <pcmcia/cisreg.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/ds.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+/* ----------------------------------------------------------------------------
+Defines
+---------------------------------------------------------------------------- */
+
+#define MACE_LADRF_LEN 8
+ /* 8 bytes in Logical Address Filter */
+
+/* Loop Control Defines */
+#define MACE_MAX_IR_ITERATIONS 10
+#define MACE_MAX_RX_ITERATIONS 12
+ /*
+ TBD: Dean brought this up, and I assumed the hardware would
+ handle it:
+
+ If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
+ non-zero when the isr exits. We may not get another interrupt
+ to process the remaining packets for some time.
+ */
+
+/*
+The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
+which manages the interface between the MACE and the PCMCIA bus. It
+also includes buffer management for the 32K x 8 SRAM to control up to
+four transmit and 12 receive frames at a time.
+*/
+#define AM2150_MAX_TX_FRAMES 4
+#define AM2150_MAX_RX_FRAMES 12
+
+/* Am2150 Ethernet Card I/O Mapping */
+#define AM2150_RCV 0x00
+#define AM2150_XMT 0x04
+#define AM2150_XMT_SKIP 0x09
+#define AM2150_RCV_NEXT 0x0A
+#define AM2150_RCV_FRAME_COUNT 0x0B
+#define AM2150_MACE_BANK 0x0C
+#define AM2150_MACE_BASE 0x10
+
+/* MACE Registers */
+#define MACE_RCVFIFO 0
+#define MACE_XMTFIFO 1
+#define MACE_XMTFC 2
+#define MACE_XMTFS 3
+#define MACE_XMTRC 4
+#define MACE_RCVFC 5
+#define MACE_RCVFS 6
+#define MACE_FIFOFC 7
+#define MACE_IR 8
+#define MACE_IMR 9
+#define MACE_PR 10
+#define MACE_BIUCC 11
+#define MACE_FIFOCC 12
+#define MACE_MACCC 13
+#define MACE_PLSCC 14
+#define MACE_PHYCC 15
+#define MACE_CHIPIDL 16
+#define MACE_CHIPIDH 17
+#define MACE_IAC 18
+/* Reserved */
+#define MACE_LADRF 20
+#define MACE_PADR 21
+/* Reserved */
+/* Reserved */
+#define MACE_MPC 24
+/* Reserved */
+#define MACE_RNTPC 26
+#define MACE_RCVCC 27
+/* Reserved */
+#define MACE_UTR 29
+#define MACE_RTR1 30
+#define MACE_RTR2 31
+
+/* MACE Bit Masks */
+#define MACE_XMTRC_EXDEF 0x80
+#define MACE_XMTRC_XMTRC 0x0F
+
+#define MACE_XMTFS_XMTSV 0x80
+#define MACE_XMTFS_UFLO 0x40
+#define MACE_XMTFS_LCOL 0x20
+#define MACE_XMTFS_MORE 0x10
+#define MACE_XMTFS_ONE 0x08
+#define MACE_XMTFS_DEFER 0x04
+#define MACE_XMTFS_LCAR 0x02
+#define MACE_XMTFS_RTRY 0x01
+
+#define MACE_RCVFS_RCVSTS 0xF000
+#define MACE_RCVFS_OFLO 0x8000
+#define MACE_RCVFS_CLSN 0x4000
+#define MACE_RCVFS_FRAM 0x2000
+#define MACE_RCVFS_FCS 0x1000
+
+#define MACE_FIFOFC_RCVFC 0xF0
+#define MACE_FIFOFC_XMTFC 0x0F
+
+#define MACE_IR_JAB 0x80
+#define MACE_IR_BABL 0x40
+#define MACE_IR_CERR 0x20
+#define MACE_IR_RCVCCO 0x10
+#define MACE_IR_RNTPCO 0x08
+#define MACE_IR_MPCO 0x04
+#define MACE_IR_RCVINT 0x02
+#define MACE_IR_XMTINT 0x01
+
+#define MACE_MACCC_PROM 0x80
+#define MACE_MACCC_DXMT2PD 0x40
+#define MACE_MACCC_EMBA 0x20
+#define MACE_MACCC_RESERVED 0x10
+#define MACE_MACCC_DRCVPA 0x08
+#define MACE_MACCC_DRCVBC 0x04
+#define MACE_MACCC_ENXMT 0x02
+#define MACE_MACCC_ENRCV 0x01
+
+#define MACE_PHYCC_LNKFL 0x80
+#define MACE_PHYCC_DLNKTST 0x40
+#define MACE_PHYCC_REVPOL 0x20
+#define MACE_PHYCC_DAPC 0x10
+#define MACE_PHYCC_LRT 0x08
+#define MACE_PHYCC_ASEL 0x04
+#define MACE_PHYCC_RWAKE 0x02
+#define MACE_PHYCC_AWAKE 0x01
+
+#define MACE_IAC_ADDRCHG 0x80
+#define MACE_IAC_PHYADDR 0x04
+#define MACE_IAC_LOGADDR 0x02
+
+#define MACE_UTR_RTRE 0x80
+#define MACE_UTR_RTRD 0x40
+#define MACE_UTR_RPA 0x20
+#define MACE_UTR_FCOLL 0x10
+#define MACE_UTR_RCVFCSE 0x08
+#define MACE_UTR_LOOP_INCL_MENDEC 0x06
+#define MACE_UTR_LOOP_NO_MENDEC 0x04
+#define MACE_UTR_LOOP_EXTERNAL 0x02
+#define MACE_UTR_LOOP_NONE 0x00
+#define MACE_UTR_RESERVED 0x01
+
+/* Switch MACE register bank (only 0 and 1 are valid) */
+#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
+
+#define MACE_IMR_DEFAULT \
+ (0xFF - \
+ ( \
+ MACE_IR_CERR | \
+ MACE_IR_RCVCCO | \
+ MACE_IR_RNTPCO | \
+ MACE_IR_MPCO | \
+ MACE_IR_RCVINT | \
+ MACE_IR_XMTINT \
+ ) \
+ )
+#undef MACE_IMR_DEFAULT
+#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
+
+#define TX_TIMEOUT ((400*HZ)/1000)
+
+/* ----------------------------------------------------------------------------
+Type Definitions
+---------------------------------------------------------------------------- */
+
+typedef struct _mace_statistics {
+ /* MACE_XMTFS */
+ int xmtsv;
+ int uflo;
+ int lcol;
+ int more;
+ int one;
+ int defer;
+ int lcar;
+ int rtry;
+
+ /* MACE_XMTRC */
+ int exdef;
+ int xmtrc;
+
+ /* RFS1--Receive Status (RCVSTS) */
+ int oflo;
+ int clsn;
+ int fram;
+ int fcs;
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ int rfs_rntpc;
+
+ /* RFS3--Receive Collision Count (RCVCC) */
+ int rfs_rcvcc;
+
+ /* MACE_IR */
+ int jab;
+ int babl;
+ int cerr;
+ int rcvcco;
+ int rntpco;
+ int mpco;
+
+ /* MACE_MPC */
+ int mpc;
+
+ /* MACE_RNTPC */
+ int rntpc;
+
+ /* MACE_RCVCC */
+ int rcvcc;
+} mace_statistics;
+
+typedef struct _mace_private {
+ struct pcmcia_device *p_dev;
+ struct net_device_stats linux_stats; /* Linux statistics counters */
+ mace_statistics mace_stats; /* MACE chip statistics counters */
+
+ /* restore_multicast_list() state variables */
+ int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
+ int multicast_num_addrs;
+
+ char tx_free_frames; /* Number of free transmit frame buffers */
+ char tx_irq_disabled; /* MACE TX interrupt disabled */
+
+ spinlock_t bank_lock; /* Must be held if you step off bank 0 */
+} mace_private;
+
+/* ----------------------------------------------------------------------------
+Private Global Variables
+---------------------------------------------------------------------------- */
+
+static const char *if_names[]={
+ "Auto", "10baseT", "BNC",
+};
+
+/* ----------------------------------------------------------------------------
+Parameters
+ These are the parameters that can be set during loading with
+ 'insmod'.
+---------------------------------------------------------------------------- */
+
+MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
+MODULE_LICENSE("GPL");
+
+#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
+
+/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
+INT_MODULE_PARM(if_port, 0);
+
+
+/* ----------------------------------------------------------------------------
+Function Prototypes
+---------------------------------------------------------------------------- */
+
+static int nmclan_config(struct pcmcia_device *link);
+static void nmclan_release(struct pcmcia_device *link);
+
+static void nmclan_reset(struct net_device *dev);
+static int mace_config(struct net_device *dev, struct ifmap *map);
+static int mace_open(struct net_device *dev);
+static int mace_close(struct net_device *dev);
+static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static void mace_tx_timeout(struct net_device *dev);
+static irqreturn_t mace_interrupt(int irq, void *dev_id);
+static struct net_device_stats *mace_get_stats(struct net_device *dev);
+static int mace_rx(struct net_device *dev, unsigned char RxCnt);
+static void restore_multicast_list(struct net_device *dev);
+static void set_multicast_list(struct net_device *dev);
+static const struct ethtool_ops netdev_ethtool_ops;
+
+
+static void nmclan_detach(struct pcmcia_device *p_dev);
+
+static const struct net_device_ops mace_netdev_ops = {
+ .ndo_open = mace_open,
+ .ndo_stop = mace_close,
+ .ndo_start_xmit = mace_start_xmit,
+ .ndo_tx_timeout = mace_tx_timeout,
+ .ndo_set_config = mace_config,
+ .ndo_get_stats = mace_get_stats,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int nmclan_probe(struct pcmcia_device *link)
+{
+ mace_private *lp;
+ struct net_device *dev;
+
+ dev_dbg(&link->dev, "nmclan_attach()\n");
+
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(mace_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ lp->p_dev = link;
+ link->priv = dev;
+
+ spin_lock_init(&lp->bank_lock);
+ link->resource[0]->end = 32;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
+ link->config_regs = PRESENT_OPTION;
+
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ dev->netdev_ops = &mace_netdev_ops;
+ dev->ethtool_ops = &netdev_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return nmclan_config(link);
+} /* nmclan_attach */
+
+static void nmclan_detach(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ dev_dbg(&link->dev, "nmclan_detach\n");
+
+ unregister_netdev(dev);
+
+ nmclan_release(link);
+
+ free_netdev(dev);
+} /* nmclan_detach */
+
+/* ----------------------------------------------------------------------------
+mace_read
+ Reads a MACE register. This is bank independent; however, the
+ caller must ensure that this call is not interruptable. We are
+ assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
+{
+ int data = 0xFF;
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ data = inb(ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+ return data & 0xFF;
+} /* mace_read */
+
+/* ----------------------------------------------------------------------------
+mace_write
+ Writes to a MACE register. This is bank independent; however,
+ the caller must ensure that this call is not interruptable. We
+ are assuming that during normal operation, the MACE is always in
+ bank 0.
+---------------------------------------------------------------------------- */
+static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
+ int data)
+{
+ unsigned long flags;
+
+ switch (reg >> 4) {
+ case 0: /* register 0-15 */
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
+ break;
+ case 1: /* register 16-31 */
+ spin_lock_irqsave(&lp->bank_lock, flags);
+ MACEBANK(1);
+ outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
+ MACEBANK(0);
+ spin_unlock_irqrestore(&lp->bank_lock, flags);
+ break;
+ }
+} /* mace_write */
+
+/* ----------------------------------------------------------------------------
+mace_init
+ Resets the MACE chip.
+---------------------------------------------------------------------------- */
+static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+{
+ int i;
+ int ct = 0;
+
+ /* MACE Software reset */
+ mace_write(lp, ioaddr, MACE_BIUCC, 1);
+ while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) {
+ /* Wait for reset bit to be cleared automatically after <= 200ns */;
+ if(++ct > 500)
+ {
+ pr_err("reset failed, card removed?\n");
+ return -1;
+ }
+ udelay(1);
+ }
+ mace_write(lp, ioaddr, MACE_BIUCC, 0);
+
+ /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
+ mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F);
+
+ mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
+ mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
+
+ /*
+ * Bit 2-1 PORTSEL[1-0] Port Select.
+ * 00 AUI/10Base-2
+ * 01 10Base-T
+ * 10 DAI Port (reserved in Am2150)
+ * 11 GPSI
+ * For this card, only the first two are valid.
+ * So, PLSCC should be set to
+ * 0x00 for 10Base-2
+ * 0x02 for 10Base-T
+ * Or just set ASEL in PHYCC below!
+ */
+ switch (if_port) {
+ case 1:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
+ break;
+ case 2:
+ mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
+ break;
+ default:
+ mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
+ /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
+ and the MACE device will automatically select the operating media
+ interface port. */
+ break;
+ }
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
+ /* Poll ADDRCHG bit */
+ ct = 0;
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ {
+ if(++ ct > 500)
+ {
+ pr_err("ADDRCHG timeout, card removed?\n");
+ return -1;
+ }
+ }
+ /* Set PADR register */
+ for (i = 0; i < ETH_ALEN; i++)
+ mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
+
+ /* MAC Configuration Control Register should be written last */
+ /* Let set_multicast_list set this. */
+ /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
+ mace_write(lp, ioaddr, MACE_MACCC, 0x00);
+ return 0;
+} /* mace_init */
+
+static int nmclan_config(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+ mace_private *lp = netdev_priv(dev);
+ u8 *buf;
+ size_t len;
+ int i, ret;
+ unsigned int ioaddr;
+
+ dev_dbg(&link->dev, "nmclan_config\n");
+
+ link->io_lines = 5;
+ ret = pcmcia_request_io(link);
+ if (ret)
+ goto failed;
+ ret = pcmcia_request_irq(link, mace_interrupt);
+ if (ret)
+ goto failed;
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+
+ ioaddr = dev->base_addr;
+
+ /* Read the ethernet address from the CIS. */
+ len = pcmcia_get_tuple(link, 0x80, &buf);
+ if (!buf || len < ETH_ALEN) {
+ kfree(buf);
+ goto failed;
+ }
+ memcpy(dev->dev_addr, buf, ETH_ALEN);
+ kfree(buf);
+
+ /* Verify configuration by reading the MACE ID. */
+ {
+ char sig[2];
+
+ sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
+ sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
+ if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
+ dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
+ sig[0], sig[1]);
+ } else {
+ pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
+ sig[0], sig[1]);
+ return -ENODEV;
+ }
+ }
+
+ if(mace_init(lp, ioaddr, dev->dev_addr) == -1)
+ goto failed;
+
+ /* The if_port symbol can be set when the module is loaded */
+ if (if_port <= 2)
+ dev->if_port = if_port;
+ else
+ pr_notice("invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ i = register_netdev(dev);
+ if (i != 0) {
+ pr_notice("register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
+ dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
+ return 0;
+
+failed:
+ nmclan_release(link);
+ return -ENODEV;
+} /* nmclan_config */
+
+static void nmclan_release(struct pcmcia_device *link)
+{
+ dev_dbg(&link->dev, "nmclan_release\n");
+ pcmcia_disable_device(link);
+}
+
+static int nmclan_suspend(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open)
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int nmclan_resume(struct pcmcia_device *link)
+{
+ struct net_device *dev = link->priv;
+
+ if (link->open) {
+ nmclan_reset(dev);
+ netif_device_attach(dev);
+ }
+
+ return 0;
+}
+
+
+/* ----------------------------------------------------------------------------
+nmclan_reset
+ Reset and restore all of the Xilinx and MACE registers.
+---------------------------------------------------------------------------- */
+static void nmclan_reset(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#if RESET_XILINX
+ struct pcmcia_device *link = &lp->link;
+ u8 OrigCorValue;
+
+ /* Save original COR value */
+ pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue);
+
+ /* Reset Xilinx */
+ dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n",
+ OrigCorValue);
+ pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET);
+ /* Need to wait for 20 ms for PCMCIA to finish reset. */
+
+ /* Restore original COR configuration index */
+ pcmcia_write_config_byte(link, CISREG_COR,
+ (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK)));
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+#endif /* #if RESET_XILINX */
+
+ /* Xilinx is now completely reset along with the MACE chip. */
+ lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
+
+ /* Reinitialize the MACE chip for operation. */
+ mace_init(lp, dev->base_addr, dev->dev_addr);
+ mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
+
+ /* Restore the multicast list and enable TX and RX. */
+ restore_multicast_list(dev);
+} /* nmclan_reset */
+
+/* ----------------------------------------------------------------------------
+mace_config
+ [Someone tell me what this is supposed to do? Is if_port a defined
+ standard? If so, there should be defines to indicate 1=10Base-T,
+ 2=10Base-2, etc. including limited automatic detection.]
+---------------------------------------------------------------------------- */
+static int mace_config(struct net_device *dev, struct ifmap *map)
+{
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 2) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ } else
+ return -EINVAL;
+ }
+ return 0;
+} /* mace_config */
+
+/* ----------------------------------------------------------------------------
+mace_open
+ Open device driver.
+---------------------------------------------------------------------------- */
+static int mace_open(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
+
+ link->open++;
+
+ MACEBANK(0);
+
+ netif_start_queue(dev);
+ nmclan_reset(dev);
+
+ return 0; /* Always succeed */
+} /* mace_open */
+
+/* ----------------------------------------------------------------------------
+mace_close
+ Closes device driver.
+---------------------------------------------------------------------------- */
+static int mace_close(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ /* Mask off all interrupts from the MACE chip. */
+ outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+
+ link->open--;
+ netif_stop_queue(dev);
+
+ return 0;
+} /* mace_close */
+
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
+}
+
+static const struct ethtool_ops netdev_ethtool_ops = {
+ .get_drvinfo = netdev_get_drvinfo,
+};
+
+/* ----------------------------------------------------------------------------
+mace_start_xmit
+ This routine begins the packet transmit function. When completed,
+ it will generate a transmit interrupt.
+
+ According to /usr/src/linux/net/inet/dev.c, if _start_xmit
+ returns 0, the "packet is now solely the responsibility of the
+ driver." If _start_xmit returns non-zero, the "transmission
+ failed, put skb back into a list."
+---------------------------------------------------------------------------- */
+
+static void mace_tx_timeout(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+
+ netdev_notice(dev, "transmit timed out -- ");
+#if RESET_ON_TIMEOUT
+ pr_cont("resetting card\n");
+ pcmcia_reset_card(link->socket);
+#else /* #if RESET_ON_TIMEOUT */
+ pr_cont("NOT resetting card\n");
+#endif /* #if RESET_ON_TIMEOUT */
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+}
+
+static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netif_stop_queue(dev);
+
+ pr_debug("%s: mace_start_xmit(length = %ld) called.\n",
+ dev->name, (long)skb->len);
+
+#if (!TX_INTERRUPTABLE)
+ /* Disable MACE TX interrupts. */
+ outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
+ ioaddr + AM2150_MACE_BASE + MACE_IMR);
+ lp->tx_irq_disabled=1;
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ {
+ /* This block must not be interrupted by another transmit request!
+ mace_tx_timeout will take care of timer-based retransmissions from
+ the upper layers. The interrupt handler is guaranteed never to
+ service a transmit interrupt while we are in here.
+ */
+
+ lp->linux_stats.tx_bytes += skb->len;
+ lp->tx_free_frames--;
+
+ /* WARNING: Write the _exact_ number of bytes written in the header! */
+ /* Put out the word header [must be an outw()] . . . */
+ outw(skb->len, ioaddr + AM2150_XMT);
+ /* . . . and the packet [may be any combination of outw() and outb()] */
+ outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
+ if (skb->len & 1) {
+ /* Odd byte transfer */
+ outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
+ }
+
+#if MULTI_TX
+ if (lp->tx_free_frames > 0)
+ netif_start_queue(dev);
+#endif /* #if MULTI_TX */
+ }
+
+#if (!TX_INTERRUPTABLE)
+ /* Re-enable MACE TX interrupts. */
+ lp->tx_irq_disabled=0;
+ outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
+#endif /* #if (!TX_INTERRUPTABLE) */
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+} /* mace_start_xmit */
+
+/* ----------------------------------------------------------------------------
+mace_interrupt
+ The interrupt handler.
+---------------------------------------------------------------------------- */
+static irqreturn_t mace_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *) dev_id;
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ int status;
+ int IntrCnt = MACE_MAX_IR_ITERATIONS;
+
+ if (dev == NULL) {
+ pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
+ irq);
+ return IRQ_NONE;
+ }
+
+ ioaddr = dev->base_addr;
+
+ if (lp->tx_irq_disabled) {
+ const char *msg;
+ if (lp->tx_irq_disabled)
+ msg = "Interrupt with tx_irq_disabled";
+ else
+ msg = "Re-entering the interrupt handler";
+ netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
+ msg,
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
+ inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
+ /* WARNING: MACE_IR has been read! */
+ return IRQ_NONE;
+ }
+
+ if (!netif_device_present(dev)) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ return IRQ_NONE;
+ }
+
+ do {
+ /* WARNING: MACE_IR is a READ/CLEAR port! */
+ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
+ if (!(status & ~MACE_IMR_DEFAULT) && IntrCnt == MACE_MAX_IR_ITERATIONS)
+ return IRQ_NONE;
+
+ pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
+
+ if (status & MACE_IR_RCVINT) {
+ mace_rx(dev, MACE_MAX_RX_ITERATIONS);
+ }
+
+ if (status & MACE_IR_XMTINT) {
+ unsigned char fifofc;
+ unsigned char xmtrc;
+ unsigned char xmtfs;
+
+ fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
+ if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
+ lp->linux_stats.tx_errors++;
+ outb(0xFF, ioaddr + AM2150_XMT_SKIP);
+ }
+
+ /* Transmit Retry Count (XMTRC, reg 4) */
+ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
+ if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
+ lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
+
+ if (
+ (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
+ MACE_XMTFS_XMTSV /* Transmit Status Valid */
+ ) {
+ lp->mace_stats.xmtsv++;
+
+ if (xmtfs & ~MACE_XMTFS_XMTSV) {
+ if (xmtfs & MACE_XMTFS_UFLO) {
+ /* Underflow. Indicates that the Transmit FIFO emptied before
+ the end of frame was reached. */
+ lp->mace_stats.uflo++;
+ }
+ if (xmtfs & MACE_XMTFS_LCOL) {
+ /* Late Collision */
+ lp->mace_stats.lcol++;
+ }
+ if (xmtfs & MACE_XMTFS_MORE) {
+ /* MORE than one retry was needed */
+ lp->mace_stats.more++;
+ }
+ if (xmtfs & MACE_XMTFS_ONE) {
+ /* Exactly ONE retry occurred */
+ lp->mace_stats.one++;
+ }
+ if (xmtfs & MACE_XMTFS_DEFER) {
+ /* Transmission was defered */
+ lp->mace_stats.defer++;
+ }
+ if (xmtfs & MACE_XMTFS_LCAR) {
+ /* Loss of carrier */
+ lp->mace_stats.lcar++;
+ }
+ if (xmtfs & MACE_XMTFS_RTRY) {
+ /* Retry error: transmit aborted after 16 attempts */
+ lp->mace_stats.rtry++;
+ }
+ } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
+
+ } /* if (xmtfs & MACE_XMTFS_XMTSV) */
+
+ lp->linux_stats.tx_packets++;
+ lp->tx_free_frames++;
+ netif_wake_queue(dev);
+ } /* if (status & MACE_IR_XMTINT) */
+
+ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
+ if (status & MACE_IR_JAB) {
+ /* Jabber Error. Excessive transmit duration (20-150ms). */
+ lp->mace_stats.jab++;
+ }
+ if (status & MACE_IR_BABL) {
+ /* Babble Error. >1518 bytes transmitted. */
+ lp->mace_stats.babl++;
+ }
+ if (status & MACE_IR_CERR) {
+ /* Collision Error. CERR indicates the absence of the
+ Signal Quality Error Test message after a packet
+ transmission. */
+ lp->mace_stats.cerr++;
+ }
+ if (status & MACE_IR_RCVCCO) {
+ /* Receive Collision Count Overflow; */
+ lp->mace_stats.rcvcco++;
+ }
+ if (status & MACE_IR_RNTPCO) {
+ /* Runt Packet Count Overflow */
+ lp->mace_stats.rntpco++;
+ }
+ if (status & MACE_IR_MPCO) {
+ /* Missed Packet Count Overflow */
+ lp->mace_stats.mpco++;
+ }
+ } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
+
+ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
+
+ return IRQ_HANDLED;
+} /* mace_interrupt */
+
+/* ----------------------------------------------------------------------------
+mace_rx
+ Receives packets.
+---------------------------------------------------------------------------- */
+static int mace_rx(struct net_device *dev, unsigned char RxCnt)
+{
+ mace_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ unsigned char rx_framecnt;
+ unsigned short rx_status;
+
+ while (
+ ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
+ (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
+ (RxCnt--)
+ ) {
+ rx_status = inw(ioaddr + AM2150_RCV);
+
+ pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
+ " 0x%X.\n", dev->name, rx_framecnt, rx_status);
+
+ if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
+ lp->linux_stats.rx_errors++;
+ if (rx_status & MACE_RCVFS_OFLO) {
+ lp->mace_stats.oflo++;
+ }
+ if (rx_status & MACE_RCVFS_CLSN) {
+ lp->mace_stats.clsn++;
+ }
+ if (rx_status & MACE_RCVFS_FRAM) {
+ lp->mace_stats.fram++;
+ }
+ if (rx_status & MACE_RCVFS_FCS) {
+ lp->mace_stats.fcs++;
+ }
+ } else {
+ short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
+ /* Auto Strip is off, always subtract 4 */
+ struct sk_buff *skb;
+
+ lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
+ /* runt packet count */
+ lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
+ /* rcv collision count */
+
+ pr_debug(" receiving packet size 0x%X rx_status"
+ " 0x%X.\n", pkt_len, rx_status);
+
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
+ if (pkt_len & 1)
+ *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
+ skb->protocol = eth_type_trans(skb, dev);
+
+ netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+
+ lp->linux_stats.rx_packets++;
+ lp->linux_stats.rx_bytes += pkt_len;
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ continue;
+ } else {
+ pr_debug("%s: couldn't allocate a sk_buff of size"
+ " %d.\n", dev->name, pkt_len);
+ lp->linux_stats.rx_dropped++;
+ }
+ }
+ outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
+ } /* while */
+
+ return 0;
+} /* mace_rx */
+
+/* ----------------------------------------------------------------------------
+pr_linux_stats
+---------------------------------------------------------------------------- */
+static void pr_linux_stats(struct net_device_stats *pstats)
+{
+ pr_debug("pr_linux_stats\n");
+ pr_debug(" rx_packets=%-7ld tx_packets=%ld\n",
+ (long)pstats->rx_packets, (long)pstats->tx_packets);
+ pr_debug(" rx_errors=%-7ld tx_errors=%ld\n",
+ (long)pstats->rx_errors, (long)pstats->tx_errors);
+ pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n",
+ (long)pstats->rx_dropped, (long)pstats->tx_dropped);
+ pr_debug(" multicast=%-7ld collisions=%ld\n",
+ (long)pstats->multicast, (long)pstats->collisions);
+
+ pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n",
+ (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
+ pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
+ (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
+ pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
+ (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
+
+ pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
+ (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
+ pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
+ (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
+ pr_debug(" tx_window_errors=%ld\n",
+ (long)pstats->tx_window_errors);
+} /* pr_linux_stats */
+
+/* ----------------------------------------------------------------------------
+pr_mace_stats
+---------------------------------------------------------------------------- */
+static void pr_mace_stats(mace_statistics *pstats)
+{
+ pr_debug("pr_mace_stats\n");
+
+ pr_debug(" xmtsv=%-7d uflo=%d\n",
+ pstats->xmtsv, pstats->uflo);
+ pr_debug(" lcol=%-7d more=%d\n",
+ pstats->lcol, pstats->more);
+ pr_debug(" one=%-7d defer=%d\n",
+ pstats->one, pstats->defer);
+ pr_debug(" lcar=%-7d rtry=%d\n",
+ pstats->lcar, pstats->rtry);
+
+ /* MACE_XMTRC */
+ pr_debug(" exdef=%-7d xmtrc=%d\n",
+ pstats->exdef, pstats->xmtrc);
+
+ /* RFS1--Receive Status (RCVSTS) */
+ pr_debug(" oflo=%-7d clsn=%d\n",
+ pstats->oflo, pstats->clsn);
+ pr_debug(" fram=%-7d fcs=%d\n",
+ pstats->fram, pstats->fcs);
+
+ /* RFS2--Runt Packet Count (RNTPC) */
+ /* RFS3--Receive Collision Count (RCVCC) */
+ pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n",
+ pstats->rfs_rntpc, pstats->rfs_rcvcc);
+
+ /* MACE_IR */
+ pr_debug(" jab=%-7d babl=%d\n",
+ pstats->jab, pstats->babl);
+ pr_debug(" cerr=%-7d rcvcco=%d\n",
+ pstats->cerr, pstats->rcvcco);
+ pr_debug(" rntpco=%-7d mpco=%d\n",
+ pstats->rntpco, pstats->mpco);
+
+ /* MACE_MPC */
+ pr_debug(" mpc=%d\n", pstats->mpc);
+
+ /* MACE_RNTPC */
+ pr_debug(" rntpc=%d\n", pstats->rntpc);
+
+ /* MACE_RCVCC */
+ pr_debug(" rcvcc=%d\n", pstats->rcvcc);
+
+} /* pr_mace_stats */
+
+/* ----------------------------------------------------------------------------
+update_stats
+ Update statistics. We change to register window 1, so this
+ should be run single-threaded if the device is active. This is
+ expected to be a rare operation, and it's simpler for the rest
+ of the driver to assume that window 0 is always valid rather
+ than use a special window-state variable.
+
+ oflo & uflo should _never_ occur since it would mean the Xilinx
+ was not able to transfer data between the MACE FIFO and the
+ card's SRAM fast enough. If this happens, something is
+ seriously wrong with the hardware.
+---------------------------------------------------------------------------- */
+static void update_stats(unsigned int ioaddr, struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC);
+ lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
+ lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
+ /* At this point, mace_stats is fully updated for this call.
+ We may now update the linux_stats. */
+
+ /* The MACE has no equivalent for linux_stats field which are commented
+ out. */
+
+ /* lp->linux_stats.multicast; */
+ lp->linux_stats.collisions =
+ lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
+ /* Collision: The MACE may retry sending a packet 15 times
+ before giving up. The retry count is in XMTRC.
+ Does each retry constitute a collision?
+ If so, why doesn't the RCVCC record these collisions? */
+
+ /* detailed rx_errors: */
+ lp->linux_stats.rx_length_errors =
+ lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
+ /* lp->linux_stats.rx_over_errors */
+ lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
+ lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
+ lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
+ lp->linux_stats.rx_missed_errors =
+ lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
+
+ /* detailed tx_errors */
+ lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
+ lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
+ /* LCAR usually results from bad cabling. */
+ lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
+ lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
+ /* lp->linux_stats.tx_window_errors; */
+} /* update_stats */
+
+/* ----------------------------------------------------------------------------
+mace_get_stats
+ Gathers ethernet statistics from the MACE chip.
+---------------------------------------------------------------------------- */
+static struct net_device_stats *mace_get_stats(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+ update_stats(dev->base_addr, dev);
+
+ pr_debug("%s: updating the statistics.\n", dev->name);
+ pr_linux_stats(&lp->linux_stats);
+ pr_mace_stats(&lp->mace_stats);
+
+ return &lp->linux_stats;
+} /* net_device_stats */
+
+/* ----------------------------------------------------------------------------
+updateCRC
+ Modified from Am79C90 data sheet.
+---------------------------------------------------------------------------- */
+
+#ifdef BROKEN_MULTICAST
+
+static void updateCRC(int *CRC, int bit)
+{
+ static const int poly[]={
+ 1,1,1,0, 1,1,0,1,
+ 1,0,1,1, 1,0,0,0,
+ 1,0,0,0, 0,0,1,1,
+ 0,0,1,0, 0,0,0,0
+ }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
+ CRC generator polynomial. */
+
+ int j;
+
+ /* shift CRC and control bit (CRC[32]) */
+ for (j = 32; j > 0; j--)
+ CRC[j] = CRC[j-1];
+ CRC[0] = 0;
+
+ /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
+ if (bit ^ CRC[32])
+ for (j = 0; j < 32; j++)
+ CRC[j] ^= poly[j];
+} /* updateCRC */
+
+/* ----------------------------------------------------------------------------
+BuildLAF
+ Build logical address filter.
+ Modified from Am79C90 data sheet.
+
+Input
+ ladrf: logical address filter (contents initialized to 0)
+ adr: ethernet address
+---------------------------------------------------------------------------- */
+static void BuildLAF(int *ladrf, int *adr)
+{
+ int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
+
+ int i, byte; /* temporary array indices */
+ int hashcode; /* the output object */
+
+ CRC[32]=0;
+
+ for (byte = 0; byte < 6; byte++)
+ for (i = 0; i < 8; i++)
+ updateCRC(CRC, (adr[byte] >> i) & 1);
+
+ hashcode = 0;
+ for (i = 0; i < 6; i++)
+ hashcode = (hashcode << 1) + CRC[i];
+
+ byte = hashcode >> 3;
+ ladrf[byte] |= (1 << (hashcode & 7));
+
+#ifdef PCMCIA_DEBUG
+ if (0)
+ printk(KERN_DEBUG " adr =%pM\n", adr);
+ printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
+ for (i = 0; i < 8; i++)
+ pr_cont(" %02X", ladrf[i]);
+ pr_cont("\n");
+#endif
+} /* BuildLAF */
+
+/* ----------------------------------------------------------------------------
+restore_multicast_list
+ Restores the multicast filter for MACE chip to the last
+ set_multicast_list() call.
+
+Input
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+static void restore_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int num_addrs = lp->multicast_num_addrs;
+ int *ladrf = lp->multicast_ladrf;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ pr_debug("%s: restoring Rx mode to %d addresses.\n",
+ dev->name, num_addrs);
+
+ if (num_addrs > 0) {
+
+ pr_debug("Attempt to restore multicast list detected.\n");
+
+ mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
+ /* Poll ADDRCHG bit */
+ while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
+ ;
+ /* Set LADRF register */
+ for (i = 0; i < MACE_LADRF_LEN; i++)
+ mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]);
+
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ } else if (num_addrs < 0) {
+
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+
+ } else {
+
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+
+ }
+} /* restore_multicast_list */
+
+/* ----------------------------------------------------------------------------
+set_multicast_list
+ Set or clear the multicast filter for this adaptor.
+
+Input
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+Output
+ multicast_num_addrs
+ multicast_ladrf[]
+---------------------------------------------------------------------------- */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+ int adr[ETH_ALEN] = {0}; /* Ethernet address */
+ struct netdev_hw_addr *ha;
+
+#ifdef PCMCIA_DEBUG
+ {
+ static int old;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
+ pr_debug("%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ /* Set multicast_num_addrs. */
+ lp->multicast_num_addrs = netdev_mc_count(dev);
+
+ /* Set multicast_ladrf. */
+ if (num_addrs > 0) {
+ /* Calculate multicast logical address filter */
+ memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
+ netdev_for_each_mc_addr(ha, dev) {
+ memcpy(adr, ha->addr, ETH_ALEN);
+ BuildLAF(lp->multicast_ladrf, adr);
+ }
+ }
+
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+#endif /* BROKEN_MULTICAST */
+
+static void restore_multicast_list(struct net_device *dev)
+{
+ unsigned int ioaddr = dev->base_addr;
+ mace_private *lp = netdev_priv(dev);
+
+ pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name,
+ lp->multicast_num_addrs);
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Promiscuous mode: receive all packets */
+ mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC,
+ MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
+ );
+ } else {
+ /* Normal mode */
+ mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
+ mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
+ }
+} /* restore_multicast_list */
+
+static void set_multicast_list(struct net_device *dev)
+{
+ mace_private *lp = netdev_priv(dev);
+
+#ifdef PCMCIA_DEBUG
+ {
+ static int old;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
+ pr_debug("%s: setting Rx mode to %d addresses.\n",
+ dev->name, old);
+ }
+ }
+#endif
+
+ lp->multicast_num_addrs = netdev_mc_count(dev);
+ restore_multicast_list(dev);
+
+} /* set_multicast_list */
+
+static const struct pcmcia_device_id nmclan_ids[] = {
+ PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941),
+ PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf),
+ PCMCIA_DEVICE_NULL,
+};
+MODULE_DEVICE_TABLE(pcmcia, nmclan_ids);
+
+static struct pcmcia_driver nmclan_cs_driver = {
+ .owner = THIS_MODULE,
+ .name = "nmclan_cs",
+ .probe = nmclan_probe,
+ .remove = nmclan_detach,
+ .id_table = nmclan_ids,
+ .suspend = nmclan_suspend,
+ .resume = nmclan_resume,
+};
+module_pcmcia_driver(nmclan_cs_driver);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
new file mode 100644
index 000000000..bc8b04f42
--- /dev/null
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -0,0 +1,2989 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ * Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency.
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ * This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ * 23 Oct, 2000.
+ * Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define DRV_NAME "pcnet32"
+#define DRV_VERSION "1.35"
+#define DRV_RELDATE "21.Apr.2008"
+#define PFX DRV_NAME ": "
+
+static const char *const version =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/moduleparam.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static const struct pci_device_id pcnet32_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
+
+ /*
+ * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
+ * the incorrect vendor id.
+ */
+ { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
+
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
+
+static int cards_found;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] =
+ { 0x300, 0x320, 0x340, 0x360, 0 };
+
+static int pcnet32_debug;
+static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct net_device *pcnet32_dev;
+
+static int max_interrupt_work = 2;
+static int rx_copybreak = 200;
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
+#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static const unsigned char options_mapping[] = {
+ PCNET32_PORT_ASEL, /* 0 Auto-select */
+ PCNET32_PORT_AUI, /* 1 BNC/AUI */
+ PCNET32_PORT_AUI, /* 2 AUI/BNC */
+ PCNET32_PORT_ASEL, /* 3 not supported */
+ PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
+ PCNET32_PORT_ASEL, /* 5 not supported */
+ PCNET32_PORT_ASEL, /* 6 not supported */
+ PCNET32_PORT_ASEL, /* 7 not supported */
+ PCNET32_PORT_ASEL, /* 8 not supported */
+ PCNET32_PORT_MII, /* 9 MII 10baseT */
+ PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
+ PCNET32_PORT_MII, /* 11 MII (autosel) */
+ PCNET32_PORT_10BT, /* 12 10BaseT */
+ PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
+ /* 14 MII 100BaseTx-FD */
+ PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
+ PCNET32_PORT_ASEL /* 15 not supported */
+};
+
+static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Loopback test (offline)"
+};
+
+#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
+
+#define PCNET32_NUM_REGS 136
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+static int homepna[MAX_UNITS];
+
+/*
+ * Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 5
+#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
+#define PCNET32_LOG_MAX_RX_BUFFERS 9
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
+
+#define PKT_BUF_SKB 1544
+/* actual buffer length after being aligned */
+#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
+/* chip wants twos complement of the (aligned) buffer length */
+#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+#define CSR0 0
+#define CSR0_INIT 0x1
+#define CSR0_START 0x2
+#define CSR0_STOP 0x4
+#define CSR0_TXPOLL 0x8
+#define CSR0_INTEN 0x40
+#define CSR0_IDON 0x0100
+#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
+#define PCNET32_INIT_LOW 1
+#define PCNET32_INIT_HIGH 2
+#define CSR3 3
+#define CSR4 4
+#define CSR5 5
+#define CSR5_SUSPEND 0x0001
+#define CSR15 15
+#define PCNET32_MC_FILTER 8
+
+#define PCNET32_79C970A 0x2621
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+ __le32 base;
+ __le16 buf_length; /* two`s complement of length */
+ __le16 status;
+ __le32 msg_length;
+ __le32 reserved;
+};
+
+struct pcnet32_tx_head {
+ __le32 base;
+ __le16 length; /* two`s complement of length */
+ __le16 status;
+ __le32 misc;
+ __le32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+ __le16 mode;
+ __le16 tlen_rlen;
+ u8 phys_addr[6];
+ __le16 reserved;
+ __le32 filter[2];
+ /* Receive and transmit ring base, along with extra bits. */
+ __le32 rx_ring;
+ __le32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+ u16 (*read_csr) (unsigned long, int);
+ void (*write_csr) (unsigned long, int, u16);
+ u16 (*read_bcr) (unsigned long, int);
+ void (*write_bcr) (unsigned long, int, u16);
+ u16 (*read_rap) (unsigned long);
+ void (*write_rap) (unsigned long, u16);
+ void (*reset) (unsigned long);
+};
+
+/*
+ * The first field of pcnet32_private is read by the ethernet device
+ * so the structure should be allocated using pci_alloc_consistent().
+ */
+struct pcnet32_private {
+ struct pcnet32_init_block *init_block;
+ /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+ struct pcnet32_rx_head *rx_ring;
+ struct pcnet32_tx_head *tx_ring;
+ dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
+ returned by pci_alloc_consistent */
+ struct pci_dev *pci_dev;
+ const char *name;
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff **tx_skbuff;
+ struct sk_buff **rx_skbuff;
+ dma_addr_t *tx_dma_addr;
+ dma_addr_t *rx_dma_addr;
+ const struct pcnet32_access *a;
+ spinlock_t lock; /* Guard lock */
+ unsigned int cur_rx, cur_tx; /* The next free ring entry */
+ unsigned int rx_ring_size; /* current rx ring size */
+ unsigned int tx_ring_size; /* current tx ring size */
+ unsigned int rx_mod_mask; /* rx ring modular mask */
+ unsigned int tx_mod_mask; /* tx ring modular mask */
+ unsigned short rx_len_bits;
+ unsigned short tx_len_bits;
+ dma_addr_t rx_ring_dma_addr;
+ dma_addr_t tx_ring_dma_addr;
+ unsigned int dirty_rx, /* ring entries to be freed. */
+ dirty_tx;
+
+ struct net_device *dev;
+ struct napi_struct napi;
+ char tx_full;
+ char phycount; /* number of phys found */
+ int options;
+ unsigned int shared_irq:1, /* shared irq possible */
+ dxsuflo:1, /* disable transmit stop on uflo */
+ mii:1; /* mii port available */
+ struct net_device *next;
+ struct mii_if_info mii_if;
+ struct timer_list watchdog_timer;
+ u32 msg_enable; /* debug message level */
+
+ /* each bit indicates an available PHY */
+ u32 phymask;
+ unsigned short chip_version; /* which variant this is */
+
+ /* saved registers during ethtool blink */
+ u16 save_regs[4];
+};
+
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
+static int pcnet32_open(struct net_device *);
+static int pcnet32_init_ring(struct net_device *);
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
+ struct net_device *);
+static void pcnet32_tx_timeout(struct net_device *dev);
+static irqreturn_t pcnet32_interrupt(int, void *);
+static int pcnet32_close(struct net_device *);
+static struct net_device_stats *pcnet32_get_stats(struct net_device *);
+static void pcnet32_load_multicast(struct net_device *dev);
+static void pcnet32_set_multicast_list(struct net_device *);
+static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+static void pcnet32_watchdog(struct net_device *);
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
+ int val);
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *eth_test, u64 * data);
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
+static int pcnet32_get_regs_len(struct net_device *dev);
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr);
+static void pcnet32_purge_tx_ring(struct net_device *dev);
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
+static void pcnet32_free_ring(struct net_device *dev);
+static void pcnet32_check_media(struct net_device *dev, int verbose);
+
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outw(index, addr + PCNET32_WIO_RAP);
+ outw(val, addr + PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap(unsigned long addr)
+{
+ return inw(addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
+{
+ outw(val, addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset(unsigned long addr)
+{
+ inw(addr + PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check(unsigned long addr)
+{
+ outw(88, addr + PCNET32_WIO_RAP);
+ return inw(addr + PCNET32_WIO_RAP) == 88;
+}
+
+static const struct pcnet32_access pcnet32_wio = {
+ .read_csr = pcnet32_wio_read_csr,
+ .write_csr = pcnet32_wio_write_csr,
+ .read_bcr = pcnet32_wio_read_bcr,
+ .write_bcr = pcnet32_wio_write_bcr,
+ .read_rap = pcnet32_wio_read_rap,
+ .write_rap = pcnet32_wio_write_rap,
+ .reset = pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
+{
+ outl(index, addr + PCNET32_DWIO_RAP);
+ outl(val, addr + PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
+{
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
+}
+
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
+{
+ outl(val, addr + PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset(unsigned long addr)
+{
+ inl(addr + PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check(unsigned long addr)
+{
+ outl(88, addr + PCNET32_DWIO_RAP);
+ return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
+}
+
+static const struct pcnet32_access pcnet32_dwio = {
+ .read_csr = pcnet32_dwio_read_csr,
+ .write_csr = pcnet32_dwio_write_csr,
+ .read_bcr = pcnet32_dwio_read_bcr,
+ .write_bcr = pcnet32_dwio_write_bcr,
+ .read_rap = pcnet32_dwio_read_rap,
+ .write_rap = pcnet32_dwio_write_rap,
+ .reset = pcnet32_dwio_reset
+};
+
+static void pcnet32_netif_stop(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ napi_disable(&lp->napi);
+ netif_tx_disable(dev);
+}
+
+static void pcnet32_netif_start(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ ulong ioaddr = dev->base_addr;
+ u16 val;
+
+ netif_wake_queue(dev);
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a->write_csr(ioaddr, CSR3, val);
+ napi_enable(&lp->napi);
+}
+
+/*
+ * Allocate space for the new sized tx ring.
+ * Free old resources
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_tx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_tx_head *new_tx_ring;
+ struct sk_buff **new_skb_list;
+ unsigned int entries = BIT(size);
+
+ pcnet32_purge_tx_ring(dev);
+
+ new_tx_ring =
+ pci_zalloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) * entries,
+ &new_ring_dma_addr);
+ if (new_tx_ring == NULL)
+ return;
+
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
+ goto free_new_tx_ring;
+
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
+ if (!new_skb_list)
+ goto free_new_lists;
+
+ kfree(lp->tx_skbuff);
+ kfree(lp->tx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+ lp->tx_ring, lp->tx_ring_dma_addr);
+
+ lp->tx_ring_size = entries;
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->tx_len_bits = (size << 12);
+ lp->tx_ring = new_tx_ring;
+ lp->tx_ring_dma_addr = new_ring_dma_addr;
+ lp->tx_dma_addr = new_dma_addr_list;
+ lp->tx_skbuff = new_skb_list;
+ return;
+
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_tx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) * entries,
+ new_tx_ring,
+ new_ring_dma_addr);
+}
+
+/*
+ * Allocate space for the new sized rx ring.
+ * Re-use old receive buffers.
+ * alloc extra buffers
+ * free unneeded buffers
+ * free unneeded buffers
+ * Save new resources.
+ * Any failure keeps old resources.
+ * Must be called with lp->lock held.
+ */
+static void pcnet32_realloc_rx_ring(struct net_device *dev,
+ struct pcnet32_private *lp,
+ unsigned int size)
+{
+ dma_addr_t new_ring_dma_addr;
+ dma_addr_t *new_dma_addr_list;
+ struct pcnet32_rx_head *new_rx_ring;
+ struct sk_buff **new_skb_list;
+ int new, overlap;
+ unsigned int entries = BIT(size);
+
+ new_rx_ring =
+ pci_zalloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) * entries,
+ &new_ring_dma_addr);
+ if (new_rx_ring == NULL)
+ return;
+
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
+ if (!new_dma_addr_list)
+ goto free_new_rx_ring;
+
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
+ if (!new_skb_list)
+ goto free_new_lists;
+
+ /* first copy the current receive buffers */
+ overlap = min(entries, lp->rx_ring_size);
+ for (new = 0; new < overlap; new++) {
+ new_rx_ring[new] = lp->rx_ring[new];
+ new_dma_addr_list[new] = lp->rx_dma_addr[new];
+ new_skb_list[new] = lp->rx_skbuff[new];
+ }
+ /* now allocate any new buffers needed */
+ for (; new < entries; new++) {
+ struct sk_buff *rx_skbuff;
+ new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
+ /* keep the original lists and buffers */
+ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
+ __func__);
+ goto free_all_new;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+
+ new_dma_addr_list[new] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new])) {
+ netif_err(lp, drv, dev, "%s dma mapping failed\n",
+ __func__);
+ dev_kfree_skb(new_skb_list[new]);
+ goto free_all_new;
+ }
+ new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
+ new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ new_rx_ring[new].status = cpu_to_le16(0x8000);
+ }
+ /* and free any unneeded buffers */
+ for (; new < lp->rx_ring_size; new++) {
+ if (lp->rx_skbuff[new]) {
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[new]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(lp->rx_skbuff[new]);
+ }
+ }
+
+ kfree(lp->rx_skbuff);
+ kfree(lp->rx_dma_addr);
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+
+ lp->rx_ring_size = entries;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->rx_len_bits = (size << 4);
+ lp->rx_ring = new_rx_ring;
+ lp->rx_ring_dma_addr = new_ring_dma_addr;
+ lp->rx_dma_addr = new_dma_addr_list;
+ lp->rx_skbuff = new_skb_list;
+ return;
+
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
+ if (new_skb_list[new]) {
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new]))
+ pci_unmap_single(lp->pci_dev,
+ new_dma_addr_list[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(new_skb_list[new]);
+ }
+ }
+ kfree(new_skb_list);
+free_new_lists:
+ kfree(new_dma_addr_list);
+free_new_rx_ring:
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) * entries,
+ new_rx_ring,
+ new_ring_dma_addr);
+}
+
+static void pcnet32_purge_rx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ /* free all allocated skbuffs */
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ lp->rx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->rx_skbuff[i]) {
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[i],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(lp->rx_skbuff[i]);
+ }
+ lp->rx_skbuff[i] = NULL;
+ lp->rx_dma_addr[i] = 0;
+ }
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void pcnet32_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ pcnet32_interrupt(0, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ mii_ethtool_gset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ r = 0;
+ }
+ return r;
+}
+
+static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_ethtool_sset(&lp->mii_if, cmd);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ if (lp->pci_dev)
+ strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ sizeof(info->bus_info));
+ else
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "VLB 0x%lx", dev->base_addr);
+}
+
+static u32 pcnet32_get_link(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (lp->mii) {
+ r = mii_link_ok(&lp->mii_if);
+ } else if (lp->chip_version >= PCNET32_79C970A) {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ } else { /* can not detect link on really old chips */
+ r = 1;
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return r;
+}
+
+static u32 pcnet32_get_msglevel(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ return lp->msg_enable;
+}
+
+static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ lp->msg_enable = value;
+}
+
+static int pcnet32_nway_reset(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ int r = -EOPNOTSUPP;
+
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ r = mii_nway_restart(&lp->mii_if);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return r;
+}
+
+static void pcnet32_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ ering->tx_max_pending = TX_MAX_RING_SIZE;
+ ering->tx_pending = lp->tx_ring_size;
+ ering->rx_max_pending = RX_MAX_RING_SIZE;
+ ering->rx_pending = lp->rx_ring_size;
+}
+
+static int pcnet32_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int size;
+ ulong ioaddr = dev->base_addr;
+ int i;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
+
+ /* set the minimum ring size to 4, to allow the loopback test to work
+ * unchanged.
+ */
+ for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->tx_ring_size)
+ pcnet32_realloc_tx_ring(dev, lp, i);
+
+ size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
+ for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
+ if (size <= (1 << i))
+ break;
+ }
+ if ((1 << i) != lp->rx_ring_size)
+ pcnet32_realloc_rx_ring(dev, lp, i);
+
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
+
+ return 0;
+}
+
+static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
+}
+
+static int pcnet32_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return PCNET32_TEST_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void pcnet32_ethtool_test(struct net_device *dev,
+ struct ethtool_test *test, u64 * data)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+
+ if (test->flags == ETH_TEST_FL_OFFLINE) {
+ rc = pcnet32_loopback_test(dev, data);
+ if (rc) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
+ test->flags |= ETH_TEST_FL_FAILED;
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
+} /* end pcnet32_ethtool_test */
+
+static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a; /* access to registers */
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ struct sk_buff *skb; /* sk buff */
+ int x, i; /* counters */
+ int numbuffs = 4; /* number of TX/RX buffers and descs */
+ u16 status = 0x8300; /* TX ring status */
+ __le16 teststatus; /* test of ring status */
+ int rc; /* return code */
+ int size; /* size of packets */
+ unsigned char *packet; /* source packet data */
+ static const int data_len = 60; /* length of source packets */
+ unsigned long flags;
+ unsigned long ticks;
+
+ rc = 1; /* default to fail */
+
+ if (netif_running(dev))
+ pcnet32_netif_stop(dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
+
+ numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
+
+ /* Reset the PCNET32 */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a->write_bcr(ioaddr, 20, 2);
+
+ /* purge & init rings but don't actually restart */
+ pcnet32_restart(dev, 0x0000);
+
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+
+ /* Initialize Transmit buffers. */
+ size = data_len + 15;
+ for (x = 0; x < numbuffs; x++) {
+ skb = netdev_alloc_skb(dev, size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "DMA mapping error at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
+ }
+
+ x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
+ a->write_bcr(ioaddr, 32, x | 0x0002);
+
+ /* set int loopback in CSR15 */
+ x = a->read_csr(ioaddr, CSR15) & 0xfffc;
+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
+
+ teststatus = cpu_to_le16(0x8000);
+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
+
+ /* Check status of descriptors */
+ for (x = 0; x < numbuffs; x++) {
+ ticks = 0;
+ rmb();
+ while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ msleep(1);
+ spin_lock_irqsave(&lp->lock, flags);
+ rmb();
+ ticks++;
+ }
+ if (ticks == 200) {
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
+ break;
+ }
+ }
+
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
+ wmb();
+ if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
+
+ for (x = 0; x < numbuffs; x++) {
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
+ skb = lp->rx_skbuff[x];
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
+ }
+ }
+
+ x = 0;
+ rc = 0;
+ while (x < numbuffs && !rc) {
+ skb = lp->rx_skbuff[x];
+ packet = lp->tx_skbuff[x]->data;
+ for (i = 0; i < size; i++) {
+ if (*(skb->data + i) != packet[i]) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
+ rc = 1;
+ break;
+ }
+ }
+ x++;
+ }
+
+clean_up:
+ *data1 = rc;
+ pcnet32_purge_tx_ring(dev);
+
+ x = a->read_csr(ioaddr, CSR15);
+ a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
+
+ x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
+ a->write_bcr(ioaddr, 32, (x & ~0x0002));
+
+ if (netif_running(dev)) {
+ pcnet32_netif_start(dev);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ } else {
+ pcnet32_purge_rx_ring(dev);
+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+} /* end pcnet32_loopback_test */
+
+static int pcnet32_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+ int i;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 2; /* cycle on/off twice per second */
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ /* Blink the led */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return 0;
+}
+
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+ int can_sleep)
+{
+ int csr5;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ int ticks;
+
+ /* really old chips have to be stopped. */
+ if (lp->chip_version < PCNET32_79C970A)
+ return 0;
+
+ /* set SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+ /* poll waiting for bit to be set */
+ ticks = 0;
+ while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+ spin_unlock_irqrestore(&lp->lock, *flags);
+ if (can_sleep)
+ msleep(1);
+ else
+ mdelay(1);
+ spin_lock_irqsave(&lp->lock, *flags);
+ ticks++;
+ if (ticks > 200) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/*
+ * process one receive descriptor entry
+ */
+
+static void pcnet32_rx_entry(struct net_device *dev,
+ struct pcnet32_private *lp,
+ struct pcnet32_rx_head *rxp,
+ int entry)
+{
+ int status = (short)le16_to_cpu(rxp->status) >> 8;
+ int rx_in_place = 0;
+ struct sk_buff *skb;
+ short pkt_len;
+
+ if (status != 0x03) { /* There was an error. */
+ /*
+ * There is a tricky error noted by John Murphy,
+ * <murf@perftech.com> to Russ Nelson: Even with full-sized
+ * buffers it's possible for a jabber packet to use two
+ * buffers, with only the last correctly noting the error.
+ */
+ if (status & 0x01) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet. */
+ if (status & 0x20)
+ dev->stats.rx_frame_errors++;
+ if (status & 0x10)
+ dev->stats.rx_over_errors++;
+ if (status & 0x08)
+ dev->stats.rx_crc_errors++;
+ if (status & 0x04)
+ dev->stats.rx_fifo_errors++;
+ return;
+ }
+
+ pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
+
+ /* Discard oversize frames. */
+ if (unlikely(pkt_len > PKT_BUF_SIZE)) {
+ netif_err(lp, drv, dev, "Impossible packet size %d!\n",
+ pkt_len);
+ dev->stats.rx_errors++;
+ return;
+ }
+ if (pkt_len < 60) {
+ netif_err(lp, rx_err, dev, "Runt packet!\n");
+ dev->stats.rx_errors++;
+ return;
+ }
+
+ if (pkt_len > rx_copybreak) {
+ struct sk_buff *newskb;
+ dma_addr_t new_dma_addr;
+
+ newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ /*
+ * map the new buffer, if mapping fails, drop the packet and
+ * reuse the old buffer
+ */
+ if (newskb) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+ new_dma_addr = pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+ netif_err(lp, rx_err, dev,
+ "DMA mapping error.\n");
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ } else {
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] = new_dma_addr;
+ rxp->base = cpu_to_le32(new_dma_addr);
+ rx_in_place = 1;
+ }
+ } else
+ skb = NULL;
+ } else
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ return;
+ }
+ if (!rx_in_place) {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb_put(skb, pkt_len); /* Make room */
+ pci_dma_sync_single_for_cpu(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ pkt_len,
+ PCI_DMA_FROMDEVICE);
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)(lp->rx_skbuff[entry]->data),
+ pkt_len);
+ pci_dma_sync_single_for_device(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ pkt_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ dev->stats.rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev->stats.rx_packets++;
+}
+
+static int pcnet32_rx(struct net_device *dev, int budget)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int entry = lp->cur_rx & lp->rx_mod_mask;
+ struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
+ int npackets = 0;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
+ pcnet32_rx_entry(dev, lp, rxp, entry);
+ npackets += 1;
+ /*
+ * The docs say that the buffer length isn't touched, but Andrew
+ * Boyd of QNX reports that some revs of the 79C965 clear it.
+ */
+ rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ wmb(); /* Make sure owner changes after others are visible */
+ rxp->status = cpu_to_le16(0x8000);
+ entry = (++lp->cur_rx) & lp->rx_mod_mask;
+ rxp = &lp->rx_ring[entry];
+ }
+
+ return npackets;
+}
+
+static int pcnet32_tx(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned int dirty_tx = lp->dirty_tx;
+ int delta;
+ int must_restart = 0;
+
+ while (dirty_tx != lp->cur_tx) {
+ int entry = dirty_tx & lp->tx_mod_mask;
+ int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
+
+ if (status < 0)
+ break; /* It still hasn't been Txed */
+
+ lp->tx_ring[entry].base = 0;
+
+ if (status & 0x4000) {
+ /* There was a major error, log it. */
+ int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
+ dev->stats.tx_errors++;
+ netif_err(lp, tx_err, dev,
+ "Tx error status=%04x err_status=%08x\n",
+ status, err_status);
+ if (err_status & 0x04000000)
+ dev->stats.tx_aborted_errors++;
+ if (err_status & 0x08000000)
+ dev->stats.tx_carrier_errors++;
+ if (err_status & 0x10000000)
+ dev->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+ if (err_status & 0x40000000) {
+ dev->stats.tx_fifo_errors++;
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
+ must_restart = 1;
+ }
+#else
+ if (err_status & 0x40000000) {
+ dev->stats.tx_fifo_errors++;
+ if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+ /* Ackk! On FIFO errors the Tx unit is turned off! */
+ /* Remove this verbosity later! */
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
+ must_restart = 1;
+ }
+ }
+#endif
+ } else {
+ if (status & 0x1800)
+ dev->stats.collisions++;
+ dev->stats.tx_packets++;
+ }
+
+ /* We must free the original skb */
+ if (lp->tx_skbuff[entry]) {
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[entry],
+ lp->tx_skbuff[entry]->
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[entry]);
+ lp->tx_skbuff[entry] = NULL;
+ lp->tx_dma_addr[entry] = 0;
+ }
+ dirty_tx++;
+ }
+
+ delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
+ if (delta > lp->tx_ring_size) {
+ netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
+ dirty_tx += lp->tx_ring_size;
+ delta -= lp->tx_ring_size;
+ }
+
+ if (lp->tx_full &&
+ netif_queue_stopped(dev) &&
+ delta < lp->tx_ring_size - 2) {
+ /* The ring is no longer full, clear tbusy. */
+ lp->tx_full = 0;
+ netif_wake_queue(dev);
+ }
+ lp->dirty_tx = dirty_tx;
+
+ return must_restart;
+}
+
+static int pcnet32_poll(struct napi_struct *napi, int budget)
+{
+ struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
+ struct net_device *dev = lp->dev;
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+ int work_done;
+ u16 val;
+
+ work_done = pcnet32_rx(dev, budget);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ if (pcnet32_tx(dev)) {
+ /* reset the chip to clear the error condition, then restart */
+ lp->a->reset(ioaddr);
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ pcnet32_restart(dev, CSR0_START);
+ netif_wake_queue(dev);
+ }
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ if (work_done < budget) {
+ spin_lock_irqsave(&lp->lock, flags);
+
+ __napi_complete(napi);
+
+ /* clear interrupt masks */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val &= 0x00ff;
+ lp->a->write_csr(ioaddr, CSR3, val);
+
+ /* Set interrupt enable. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return work_done;
+}
+
+#define PCNET32_REGS_PER_PHY 32
+#define PCNET32_MAX_PHYS 32
+static int pcnet32_get_regs_len(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int j = lp->phycount * PCNET32_REGS_PER_PHY;
+
+ return (PCNET32_NUM_REGS + j) * sizeof(u16);
+}
+
+static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ int i, csr0;
+ u16 *buff = ptr;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ const struct pcnet32_access *a = lp->a;
+ ulong ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ csr0 = a->read_csr(ioaddr, CSR0);
+ if (!(csr0 & CSR0_STOP)) /* If not stopped */
+ pcnet32_suspend(dev, &flags, 1);
+
+ /* read address PROM */
+ for (i = 0; i < 16; i += 2)
+ *buff++ = inw(ioaddr + i);
+
+ /* read control and status registers */
+ for (i = 0; i < 90; i++)
+ *buff++ = a->read_csr(ioaddr, i);
+
+ *buff++ = a->read_csr(ioaddr, 112);
+ *buff++ = a->read_csr(ioaddr, 114);
+
+ /* read bus configuration registers */
+ for (i = 0; i < 30; i++)
+ *buff++ = a->read_bcr(ioaddr, i);
+
+ *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
+
+ for (i = 31; i < 36; i++)
+ *buff++ = a->read_bcr(ioaddr, i);
+
+ /* read mii phy registers */
+ if (lp->mii) {
+ int j;
+ for (j = 0; j < PCNET32_MAX_PHYS; j++) {
+ if (lp->phymask & (1 << j)) {
+ for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
+ lp->a->write_bcr(ioaddr, 33,
+ (j << 5) | i);
+ *buff++ = lp->a->read_bcr(ioaddr, 34);
+ }
+ }
+ }
+ }
+
+ if (!(csr0 & CSR0_STOP)) { /* If not stopped */
+ int csr5;
+
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = a->read_csr(ioaddr, CSR5);
+ a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static const struct ethtool_ops pcnet32_ethtool_ops = {
+ .get_settings = pcnet32_get_settings,
+ .set_settings = pcnet32_set_settings,
+ .get_drvinfo = pcnet32_get_drvinfo,
+ .get_msglevel = pcnet32_get_msglevel,
+ .set_msglevel = pcnet32_set_msglevel,
+ .nway_reset = pcnet32_nway_reset,
+ .get_link = pcnet32_get_link,
+ .get_ringparam = pcnet32_get_ringparam,
+ .set_ringparam = pcnet32_set_ringparam,
+ .get_strings = pcnet32_get_strings,
+ .self_test = pcnet32_ethtool_test,
+ .set_phys_id = pcnet32_set_phys_id,
+ .get_regs_len = pcnet32_get_regs_len,
+ .get_regs = pcnet32_get_regs,
+ .get_sset_count = pcnet32_get_sset_count,
+};
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
+{
+ unsigned int *port, ioaddr;
+
+ /* search for PCnet32 VLB cards at known addresses */
+ for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+ if (request_region
+ (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
+ /* check if there is really a pcnet chip on that ioaddr */
+ if ((inb(ioaddr + 14) == 0x57) &&
+ (inb(ioaddr + 15) == 0x57)) {
+ pcnet32_probe1(ioaddr, 0, NULL);
+ } else {
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ }
+ }
+ }
+}
+
+static int
+pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned long ioaddr;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err < 0) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("failed to enable device -- err=%d\n", err);
+ return err;
+ }
+ pci_set_master(pdev);
+
+ ioaddr = pci_resource_start(pdev, 0);
+ if (!ioaddr) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("card has no PCI IO resources, aborting\n");
+ return -ENODEV;
+ }
+
+ if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("architecture does not support 32bit PCI busmaster DMA\n");
+ return -ENODEV;
+ }
+ if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("io address range already allocated\n");
+ return -EBUSY;
+ }
+
+ err = pcnet32_probe1(ioaddr, 1, pdev);
+ if (err < 0)
+ pci_disable_device(pdev);
+
+ return err;
+}
+
+static const struct net_device_ops pcnet32_netdev_ops = {
+ .ndo_open = pcnet32_open,
+ .ndo_stop = pcnet32_close,
+ .ndo_start_xmit = pcnet32_start_xmit,
+ .ndo_tx_timeout = pcnet32_tx_timeout,
+ .ndo_get_stats = pcnet32_get_stats,
+ .ndo_set_rx_mode = pcnet32_set_multicast_list,
+ .ndo_do_ioctl = pcnet32_ioctl,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = pcnet32_poll_controller,
+#endif
+};
+
+/* pcnet32_probe1
+ * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ * pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int
+pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
+{
+ struct pcnet32_private *lp;
+ int i, media;
+ int fdx, mii, fset, dxsuflo, sram;
+ int chip_version;
+ char *chipname;
+ struct net_device *dev;
+ const struct pcnet32_access *a = NULL;
+ u8 promaddr[ETH_ALEN];
+ int ret = -ENODEV;
+
+ /* reset the chip */
+ pcnet32_wio_reset(ioaddr);
+
+ /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+ if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+ a = &pcnet32_wio;
+ } else {
+ pcnet32_dwio_reset(ioaddr);
+ if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
+ pcnet32_dwio_check(ioaddr)) {
+ a = &pcnet32_dwio;
+ } else {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("No access methods\n");
+ goto err_release_region;
+ }
+ }
+
+ chip_version =
+ a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+ if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
+ pr_info(" PCnet chip version is %#x\n", chip_version);
+ if ((chip_version & 0xfff) != 0x003) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("Unsupported chip version\n");
+ goto err_release_region;
+ }
+
+ /* initialize variables */
+ fdx = mii = fset = dxsuflo = sram = 0;
+ chip_version = (chip_version >> 12) & 0xffff;
+
+ switch (chip_version) {
+ case 0x2420:
+ chipname = "PCnet/PCI 79C970"; /* PCI */
+ break;
+ case 0x2430:
+ if (shared)
+ chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+ else
+ chipname = "PCnet/32 79C965"; /* 486/VL bus */
+ break;
+ case 0x2621:
+ chipname = "PCnet/PCI II 79C970A"; /* PCI */
+ fdx = 1;
+ break;
+ case 0x2623:
+ chipname = "PCnet/FAST 79C971"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2624:
+ chipname = "PCnet/FAST+ 79C972"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ fset = 1;
+ break;
+ case 0x2625:
+ chipname = "PCnet/FAST III 79C973"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ sram = 1;
+ break;
+ case 0x2626:
+ chipname = "PCnet/Home 79C978"; /* PCI */
+ fdx = 1;
+ /*
+ * This is based on specs published at www.amd.com. This section
+ * assumes that a card with a 79C978 wants to go into standard
+ * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
+ * and the module option homepna=1 can select this instead.
+ */
+ media = a->read_bcr(ioaddr, 49);
+ media &= ~3; /* default to 10Mb ethernet */
+ if (cards_found < MAX_UNITS && homepna[cards_found])
+ media |= 1; /* switch to home wiring mode */
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
+ (media & 1) ? "1" : "10");
+ a->write_bcr(ioaddr, 49, media);
+ break;
+ case 0x2627:
+ chipname = "PCnet/FAST III 79C975"; /* PCI */
+ fdx = 1;
+ mii = 1;
+ sram = 1;
+ break;
+ case 0x2628:
+ chipname = "PCnet/PRO 79C976";
+ fdx = 1;
+ mii = 1;
+ break;
+ default:
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("PCnet version %#x, no PCnet32 chip\n",
+ chip_version);
+ goto err_release_region;
+ }
+
+ /*
+ * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+ * starting until the packet is loaded. Strike one for reliability, lose
+ * one for latency - although on PCI this isn't a big loss. Older chips
+ * have FIFO's smaller than a packet, so you can't do this.
+ * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
+ */
+
+ if (fset) {
+ a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
+ a->write_csr(ioaddr, 80,
+ (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+ dxsuflo = 1;
+ }
+
+ /*
+ * The Am79C973/Am79C975 controllers come with 12K of SRAM
+ * which we can use for the Tx/Rx buffers but most importantly,
+ * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
+ * Tx fifo underflows.
+ */
+ if (sram) {
+ /*
+ * The SRAM is being configured in two steps. First we
+ * set the SRAM size in the BCR25:SRAM_SIZE bits. According
+ * to the datasheet, each bit corresponds to a 512-byte
+ * page so we can have at most 24 pages. The SRAM_SIZE
+ * holds the value of the upper 8 bits of the 16-bit SRAM size.
+ * The low 8-bits start at 0x00 and end at 0xff. So the
+ * address range is from 0x0000 up to 0x17ff. Therefore,
+ * the SRAM_SIZE is set to 0x17. The next step is to set
+ * the BCR26:SRAM_BND midway through so the Tx and Rx
+ * buffers can share the SRAM equally.
+ */
+ a->write_bcr(ioaddr, 25, 0x17);
+ a->write_bcr(ioaddr, 26, 0xc);
+ /* And finally enable the NOUFLO bit */
+ a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
+ }
+
+ dev = alloc_etherdev(sizeof(*lp));
+ if (!dev) {
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ if (pdev)
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("%s at %#3lx,", chipname, ioaddr);
+
+ /* In most chips, after a chip reset, the ethernet address is read from the
+ * station address PROM at the base address and programmed into the
+ * "Physical Address Registers" CSR12-14.
+ * As a precautionary measure, we read the PROM values and complain if
+ * they disagree with the CSRs. If they miscompare, and the PROM addr
+ * is valid, then the PROM addr is used.
+ */
+ for (i = 0; i < 3; i++) {
+ unsigned int val;
+ val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+ /* There may be endianness issues here. */
+ dev->dev_addr[2 * i] = val & 0x0ff;
+ dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ }
+
+ /* read PROM address and compare with CSR address */
+ for (i = 0; i < ETH_ALEN; i++)
+ promaddr[i] = inb(ioaddr + i);
+
+ if (!ether_addr_equal(promaddr, dev->dev_addr) ||
+ !is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(promaddr)) {
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ pr_cont(" warning: CSR address invalid,\n");
+ pr_info(" using instead PROM address of");
+ }
+ memcpy(dev->dev_addr, promaddr, ETH_ALEN);
+ }
+ }
+
+ /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+ if (!is_valid_ether_addr(dev->dev_addr))
+ eth_zero_addr(dev->dev_addr);
+
+ if (pcnet32_debug & NETIF_MSG_PROBE) {
+ pr_cont(" %pM", dev->dev_addr);
+
+ /* Version 0x2623 and 0x2624 */
+ if (((chip_version + 1) & 0xfffe) == 0x2624) {
+ i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+ pr_info(" tx_start_pt(0x%04x):", i);
+ switch (i >> 10) {
+ case 0:
+ pr_cont(" 20 bytes,");
+ break;
+ case 1:
+ pr_cont(" 64 bytes,");
+ break;
+ case 2:
+ pr_cont(" 128 bytes,");
+ break;
+ case 3:
+ pr_cont("~220 bytes,");
+ break;
+ }
+ i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+ pr_cont(" BCR18(%x):", i & 0xffff);
+ if (i & (1 << 5))
+ pr_cont("BurstWrEn ");
+ if (i & (1 << 6))
+ pr_cont("BurstRdEn ");
+ if (i & (1 << 7))
+ pr_cont("DWordIO ");
+ if (i & (1 << 11))
+ pr_cont("NoUFlow ");
+ i = a->read_bcr(ioaddr, 25);
+ pr_info(" SRAMSIZE=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 26);
+ pr_cont(" SRAM_BND=0x%04x,", i << 8);
+ i = a->read_bcr(ioaddr, 27);
+ if (i & (1 << 14))
+ pr_cont("LowLatRx");
+ }
+ }
+
+ dev->base_addr = ioaddr;
+ lp = netdev_priv(dev);
+ /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
+ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
+ &lp->init_dma_addr);
+ if (!lp->init_block) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_err("Consistent memory allocation failed\n");
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
+ lp->pci_dev = pdev;
+
+ lp->dev = dev;
+
+ spin_lock_init(&lp->lock);
+
+ lp->name = chipname;
+ lp->shared_irq = shared;
+ lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
+ lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
+ lp->tx_mod_mask = lp->tx_ring_size - 1;
+ lp->rx_mod_mask = lp->rx_ring_size - 1;
+ lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
+ lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
+ lp->mii_if.full_duplex = fdx;
+ lp->mii_if.phy_id_mask = 0x1f;
+ lp->mii_if.reg_num_mask = 0x1f;
+ lp->dxsuflo = dxsuflo;
+ lp->mii = mii;
+ lp->chip_version = chip_version;
+ lp->msg_enable = pcnet32_debug;
+ if ((cards_found >= MAX_UNITS) ||
+ (options[cards_found] >= sizeof(options_mapping)))
+ lp->options = PCNET32_PORT_ASEL;
+ else
+ lp->options = options_mapping[options[cards_found]];
+ lp->mii_if.dev = dev;
+ lp->mii_if.mdio_read = mdio_read;
+ lp->mii_if.mdio_write = mdio_write;
+
+ /* napi.weight is used in both the napi and non-napi cases */
+ lp->napi.weight = lp->rx_ring_size / 2;
+
+ netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
+
+ if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+ ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+ lp->options |= PCNET32_PORT_FD;
+
+ lp->a = a;
+
+ /* prior to register_netdev, dev->name is not yet correct */
+ if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
+ ret = -ENOMEM;
+ goto err_free_ring;
+ }
+ /* detect special T1/E1 WAN card by checking for MAC address */
+ if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
+ dev->dev_addr[2] == 0x75)
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+ lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
+ lp->init_block->tlen_rlen =
+ cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block->phys_addr[i] = dev->dev_addr[i];
+ lp->init_block->filter[0] = 0x00000000;
+ lp->init_block->filter[1] = 0x00000000;
+ lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
+ lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
+
+ /* switch pcnet32 to 32bit mode */
+ a->write_bcr(ioaddr, 20, 2);
+
+ a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+
+ if (pdev) { /* use the IRQ provided by PCI */
+ dev->irq = pdev->irq;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(" assigned IRQ %d\n", dev->irq);
+ } else {
+ unsigned long irq_mask = probe_irq_on();
+
+ /*
+ * To auto-IRQ we enable the initialization-done and DMA error
+ * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+ * boards will work.
+ */
+ /* Trigger an initialization just for the interrupt. */
+ a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
+ mdelay(1);
+
+ dev->irq = probe_irq_off(irq_mask);
+ if (!dev->irq) {
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(", failed to detect IRQ line\n");
+ ret = -ENODEV;
+ goto err_free_ring;
+ }
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_cont(", probed IRQ %d\n", dev->irq);
+ }
+
+ /* Set the mii phy_id so that we can query the link state */
+ if (lp->mii) {
+ /* lp->phycount and lp->phymask are set to 0 by memset above */
+
+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
+ /* scan for PHYs */
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ unsigned short id1, id2;
+
+ id1 = mdio_read(dev, i, MII_PHYSID1);
+ if (id1 == 0xffff)
+ continue;
+ id2 = mdio_read(dev, i, MII_PHYSID2);
+ if (id2 == 0xffff)
+ continue;
+ if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
+ continue; /* 79C971 & 79C972 have phantom phy at id 31 */
+ lp->phycount++;
+ lp->phymask |= (1 << i);
+ lp->mii_if.phy_id = i;
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("Found PHY %04x:%04x at address %d\n",
+ id1, id2, i);
+ }
+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
+ if (lp->phycount > 1)
+ lp->options |= PCNET32_PORT_MII;
+ }
+
+ init_timer(&lp->watchdog_timer);
+ lp->watchdog_timer.data = (unsigned long)dev;
+ lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
+
+ /* The PCNET32-specific entries in the device structure. */
+ dev->netdev_ops = &pcnet32_netdev_ops;
+ dev->ethtool_ops = &pcnet32_ethtool_ops;
+ dev->watchdog_timeo = (5 * HZ);
+
+ /* Fill in the generic fields of the device structure. */
+ if (register_netdev(dev))
+ goto err_free_ring;
+
+ if (pdev) {
+ pci_set_drvdata(pdev, dev);
+ } else {
+ lp->next = pcnet32_dev;
+ pcnet32_dev = dev;
+ }
+
+ if (pcnet32_debug & NETIF_MSG_PROBE)
+ pr_info("%s: registered as %s\n", dev->name, lp->name);
+ cards_found++;
+
+ /* enable LED writes */
+ a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
+
+ return 0;
+
+err_free_ring:
+ pcnet32_free_ring(dev);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+err_free_netdev:
+ free_netdev(dev);
+err_release_region:
+ release_region(ioaddr, PCNET32_TOTAL_SIZE);
+ return ret;
+}
+
+/* if any allocation fails, caller must also call pcnet32_free_ring */
+static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size,
+ &lp->tx_ring_dma_addr);
+ if (lp->tx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size,
+ &lp->rx_ring_dma_addr);
+ if (lp->rx_ring == NULL) {
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!lp->tx_dma_addr)
+ return -ENOMEM;
+
+ lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
+ GFP_ATOMIC);
+ if (!lp->rx_dma_addr)
+ return -ENOMEM;
+
+ lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!lp->tx_skbuff)
+ return -ENOMEM;
+
+ lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
+ GFP_ATOMIC);
+ if (!lp->rx_skbuff)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void pcnet32_free_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ kfree(lp->tx_skbuff);
+ lp->tx_skbuff = NULL;
+
+ kfree(lp->rx_skbuff);
+ lp->rx_skbuff = NULL;
+
+ kfree(lp->tx_dma_addr);
+ lp->tx_dma_addr = NULL;
+
+ kfree(lp->rx_dma_addr);
+ lp->rx_dma_addr = NULL;
+
+ if (lp->tx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_tx_head) *
+ lp->tx_ring_size, lp->tx_ring,
+ lp->tx_ring_dma_addr);
+ lp->tx_ring = NULL;
+ }
+
+ if (lp->rx_ring) {
+ pci_free_consistent(lp->pci_dev,
+ sizeof(struct pcnet32_rx_head) *
+ lp->rx_ring_size, lp->rx_ring,
+ lp->rx_ring_dma_addr);
+ lp->rx_ring = NULL;
+ }
+}
+
+static int pcnet32_open(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct pci_dev *pdev = lp->pci_dev;
+ unsigned long ioaddr = dev->base_addr;
+ u16 val;
+ int i;
+ int rc;
+ unsigned long flags;
+
+ if (request_irq(dev->irq, pcnet32_interrupt,
+ lp->shared_irq ? IRQF_SHARED : 0, dev->name,
+ (void *)dev)) {
+ return -EAGAIN;
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Check for a valid station address */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ rc = -EINVAL;
+ goto err_free_irq;
+ }
+
+ /* Reset the PCNET32 */
+ lp->a->reset(ioaddr);
+
+ /* switch pcnet32 to 32bit mode */
+ lp->a->write_bcr(ioaddr, 20, 2);
+
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
+ __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->init_dma_addr));
+
+ /* set/reset autoselect bit */
+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
+ if (lp->options & PCNET32_PORT_ASEL)
+ val |= 2;
+ lp->a->write_bcr(ioaddr, 2, val);
+
+ /* handle full duplex setting */
+ if (lp->mii_if.full_duplex) {
+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
+ if (lp->options & PCNET32_PORT_FD) {
+ val |= 1;
+ if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+ val |= 2;
+ } else if (lp->options & PCNET32_PORT_ASEL) {
+ /* workaround of xSeries250, turn on for 79C975 only */
+ if (lp->chip_version == 0x2627)
+ val |= 3;
+ }
+ lp->a->write_bcr(ioaddr, 9, val);
+ }
+
+ /* set/reset GPSI bit in test register */
+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
+ if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+ val |= 0x10;
+ lp->a->write_csr(ioaddr, 124, val);
+
+ /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
+ if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
+ (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
+ pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
+ netif_printk(lp, link, KERN_DEBUG, dev,
+ "Setting 100Mb-Full Duplex\n");
+ }
+ }
+ if (lp->phycount < 2) {
+ /*
+ * 24 Jun 2004 according AMD, in order to change the PHY,
+ * DANAS (or DISPM for 79C976) must be set; then select the speed,
+ * duplex, and/or enable auto negotiation, and clear DANAS
+ */
+ if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
+ /* disable Auto Negotiation, set 10Mpbs, HD */
+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
+ if (lp->options & PCNET32_PORT_FD)
+ val |= 0x10;
+ if (lp->options & PCNET32_PORT_100)
+ val |= 0x08;
+ lp->a->write_bcr(ioaddr, 32, val);
+ } else {
+ if (lp->options & PCNET32_PORT_ASEL) {
+ lp->a->write_bcr(ioaddr, 32,
+ lp->a->read_bcr(ioaddr,
+ 32) | 0x0080);
+ /* enable auto negotiate, setup, disable fd */
+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
+ val |= 0x20;
+ lp->a->write_bcr(ioaddr, 32, val);
+ }
+ }
+ } else {
+ int first_phy = -1;
+ u16 bmcr;
+ u32 bcr9;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+
+ /*
+ * There is really no good other way to handle multiple PHYs
+ * other than turning off all automatics
+ */
+ val = lp->a->read_bcr(ioaddr, 2);
+ lp->a->write_bcr(ioaddr, 2, val & ~2);
+ val = lp->a->read_bcr(ioaddr, 32);
+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
+
+ if (!(lp->options & PCNET32_PORT_ASEL)) {
+ /* setup ecmd */
+ ecmd.port = PORT_MII;
+ ecmd.transceiver = XCVR_INTERNAL;
+ ecmd.autoneg = AUTONEG_DISABLE;
+ ethtool_cmd_speed_set(&ecmd,
+ (lp->options & PCNET32_PORT_100) ?
+ SPEED_100 : SPEED_10);
+ bcr9 = lp->a->read_bcr(ioaddr, 9);
+
+ if (lp->options & PCNET32_PORT_FD) {
+ ecmd.duplex = DUPLEX_FULL;
+ bcr9 |= (1 << 0);
+ } else {
+ ecmd.duplex = DUPLEX_HALF;
+ bcr9 |= ~(1 << 0);
+ }
+ lp->a->write_bcr(ioaddr, 9, bcr9);
+ }
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (lp->phymask & (1 << i)) {
+ /* isolate all but the first PHY */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ if (first_phy == -1) {
+ first_phy = i;
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+ } else {
+ mdio_write(dev, i, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+ }
+ /* use mii_ethtool_sset to setup PHY */
+ lp->mii_if.phy_id = i;
+ ecmd.phy_address = i;
+ if (lp->options & PCNET32_PORT_ASEL) {
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ ecmd.autoneg = AUTONEG_ENABLE;
+ }
+ mii_ethtool_sset(&lp->mii_if, &ecmd);
+ }
+ }
+ lp->mii_if.phy_id = first_phy;
+ netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
+ }
+
+#ifdef DO_DXSUFLO
+ if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val |= 0x40;
+ lp->a->write_csr(ioaddr, CSR3, val);
+ }
+#endif
+
+ lp->init_block->mode =
+ cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ pcnet32_load_multicast(dev);
+
+ if (pcnet32_init_ring(dev)) {
+ rc = -ENOMEM;
+ goto err_free_ring;
+ }
+
+ napi_enable(&lp->napi);
+
+ /* Re-initialize the PCNET32, and start it when done. */
+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
+
+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
+
+ netif_start_queue(dev);
+
+ if (lp->chip_version >= PCNET32_79C970A) {
+ /* Print the link status and start the watchdog */
+ pcnet32_check_media(dev, 1);
+ mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
+ }
+
+ i = 0;
+ while (i++ < 100)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
+ break;
+ /*
+ * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+ * reports that doing so triggers a bug in the '974.
+ */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
+
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
+ i,
+ (u32) (lp->init_dma_addr),
+ lp->a->read_csr(ioaddr, CSR0));
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0; /* Always succeed */
+
+err_free_ring:
+ /* free any allocated skbuffs */
+ pcnet32_purge_rx_ring(dev);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a->write_bcr(ioaddr, 20, 4);
+
+err_free_irq:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ free_irq(dev->irq, dev);
+ return rc;
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.). Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting. As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit. It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
+ */
+
+static void pcnet32_purge_tx_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ if (lp->tx_skbuff[i]) {
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->tx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(lp->tx_skbuff[i]);
+ }
+ lp->tx_skbuff[i] = NULL;
+ lp->tx_dma_addr[i] = 0;
+ }
+}
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int pcnet32_init_ring(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->tx_full = 0;
+ lp->cur_rx = lp->cur_tx = 0;
+ lp->dirty_rx = lp->dirty_tx = 0;
+
+ for (i = 0; i < lp->rx_ring_size; i++) {
+ struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
+ if (rx_skbuff == NULL) {
+ lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ rx_skbuff = lp->rx_skbuff[i];
+ if (!rx_skbuff) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev, "%s netdev_alloc_skb failed\n",
+ __func__);
+ return -1;
+ }
+ skb_reserve(rx_skbuff, NET_IP_ALIGN);
+ }
+
+ rmb();
+ if (lp->rx_dma_addr[i] == 0) {
+ lp->rx_dma_addr[i] =
+ pci_map_single(lp->pci_dev, rx_skbuff->data,
+ PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i])) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev,
+ "%s pci dma mapping error\n",
+ __func__);
+ return -1;
+ }
+ }
+ lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
+ lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->rx_ring[i].status = cpu_to_le16(0x8000);
+ }
+ /* The Tx buffer address is filled in as needed, but we do need to clear
+ * the upper ownership bit. */
+ for (i = 0; i < lp->tx_ring_size; i++) {
+ lp->tx_ring[i].status = 0; /* CPU owns buffer */
+ wmb(); /* Make sure adapter sees owner change */
+ lp->tx_ring[i].base = 0;
+ lp->tx_dma_addr[i] = 0;
+ }
+
+ lp->init_block->tlen_rlen =
+ cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
+ for (i = 0; i < 6; i++)
+ lp->init_block->phys_addr[i] = dev->dev_addr[i];
+ lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
+ lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
+ wmb(); /* Make sure all changes are visible */
+ return 0;
+}
+
+/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
+ * then flush the pending transmit operations, re-initialize the ring,
+ * and tell the chip to initialize.
+ */
+static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+
+ /* wait for stop */
+ for (i = 0; i < 100; i++)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
+ break;
+
+ if (i >= 100)
+ netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
+ __func__);
+
+ pcnet32_purge_tx_ring(dev);
+ if (pcnet32_init_ring(dev))
+ return;
+
+ /* ReInit Ring */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
+ i = 0;
+ while (i++ < 1000)
+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
+ break;
+
+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
+}
+
+static void pcnet32_tx_timeout(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr, flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ /* Transmitter timeout, serious problems. */
+ if (pcnet32_debug & NETIF_MSG_DRV)
+ pr_err("%s: transmit timed out, status %4.4x, resetting\n",
+ dev->name, lp->a->read_csr(ioaddr, CSR0));
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+ dev->stats.tx_errors++;
+ if (netif_msg_tx_err(lp)) {
+ int i;
+ printk(KERN_DEBUG
+ " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
+ lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
+ lp->cur_rx);
+ for (i = 0; i < lp->rx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->rx_ring[i].base),
+ (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
+ 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
+ le16_to_cpu(lp->rx_ring[i].status));
+ for (i = 0; i < lp->tx_ring_size; i++)
+ printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
+ le32_to_cpu(lp->tx_ring[i].base),
+ (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
+ le32_to_cpu(lp->tx_ring[i].misc),
+ le16_to_cpu(lp->tx_ring[i].status));
+ printk("\n");
+ }
+ pcnet32_restart(dev, CSR0_NORMAL);
+
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ u16 status;
+ int entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ netif_printk(lp, tx_queued, KERN_DEBUG, dev,
+ "%s() called, csr0 %4.4x\n",
+ __func__, lp->a->read_csr(ioaddr, CSR0));
+
+ /* Default status -- will not enable Successful-TxDone
+ * interrupt when that option is available to us.
+ */
+ status = 0x8300;
+
+ /* Fill in a Tx ring entry */
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->cur_tx & lp->tx_mod_mask;
+
+ /* Caution: the write order is important here, set the status
+ * with the "ownership" bits last. */
+
+ lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
+
+ lp->tx_ring[entry].misc = 0x00000000;
+
+ lp->tx_dma_addr[entry] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ goto drop_packet;
+ }
+ lp->tx_skbuff[entry] = skb;
+ lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[entry].status = cpu_to_le16(status);
+
+ lp->cur_tx++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
+
+ if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
+ lp->tx_full = 1;
+ netif_stop_queue(dev);
+ }
+drop_packet:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return NETDEV_TX_OK;
+}
+
+/* The PCNET32 interrupt handler. */
+static irqreturn_t
+pcnet32_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct pcnet32_private *lp;
+ unsigned long ioaddr;
+ u16 csr0;
+ int boguscnt = max_interrupt_work;
+
+ ioaddr = dev->base_addr;
+ lp = netdev_priv(dev);
+
+ spin_lock(&lp->lock);
+
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
+ while ((csr0 & 0x8f00) && --boguscnt >= 0) {
+ if (csr0 == 0xffff)
+ break; /* PCMCIA remove happened */
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
+
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "interrupt csr0=%#2.2x new csr=%#2.2x\n",
+ csr0, lp->a->read_csr(ioaddr, CSR0));
+
+ /* Log misc errors. */
+ if (csr0 & 0x4000)
+ dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & 0x1000) {
+ /*
+ * This happens when our receive ring is full. This
+ * shouldn't be a problem as we will see normal rx
+ * interrupts for the frames in the receive ring. But
+ * there are some PCI chipsets (I can reproduce this
+ * on SP3G with Intel saturn chipset) which have
+ * sometimes problems and will fill up the receive
+ * ring with error descriptors. In this situation we
+ * don't get a rx interrupt, but a missed frame
+ * interrupt sooner or later.
+ */
+ dev->stats.rx_errors++; /* Missed a Rx frame. */
+ }
+ if (csr0 & 0x0800) {
+ netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
+ csr0);
+ /* unlike for the lance, there is no restart needed */
+ }
+ if (napi_schedule_prep(&lp->napi)) {
+ u16 val;
+ /* set interrupt masks */
+ val = lp->a->read_csr(ioaddr, CSR3);
+ val |= 0x5f00;
+ lp->a->write_csr(ioaddr, CSR3, val);
+
+ __napi_schedule(&lp->napi);
+ break;
+ }
+ csr0 = lp->a->read_csr(ioaddr, CSR0);
+ }
+
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "exiting interrupt, csr0=%#4.4x\n",
+ lp->a->read_csr(ioaddr, CSR0));
+
+ spin_unlock(&lp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static int pcnet32_close(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ del_timer_sync(&lp->watchdog_timer);
+
+ netif_stop_queue(dev);
+ napi_disable(&lp->napi);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
+
+ netif_printk(lp, ifdown, KERN_DEBUG, dev,
+ "Shutting down ethercard, status was %2.2x\n",
+ lp->a->read_csr(ioaddr, CSR0));
+
+ /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+
+ /*
+ * Switch back to 16bit mode to avoid problems with dumb
+ * DOS packet driver after a warm reboot
+ */
+ lp->a->write_bcr(ioaddr, 20, 4);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ free_irq(dev->irq, dev);
+
+ spin_lock_irqsave(&lp->lock, flags);
+
+ pcnet32_purge_rx_ring(dev);
+ pcnet32_purge_tx_ring(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return 0;
+}
+
+static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return &dev->stats;
+}
+
+/* taken from the sunlance driver, which it took from the depca driver */
+static void pcnet32_load_multicast(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ volatile struct pcnet32_init_block *ib = lp->init_block;
+ volatile __le16 *mcast_table = (__le16 *)ib->filter;
+ struct netdev_hw_addr *ha;
+ unsigned long ioaddr = dev->base_addr;
+ int i;
+ u32 crc;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI) {
+ ib->filter[0] = cpu_to_le32(~0U);
+ ib->filter[1] = cpu_to_le32(~0U);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
+ return;
+ }
+ /* clear the multicast filter */
+ ib->filter[0] = 0;
+ ib->filter[1] = 0;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
+ }
+ for (i = 0; i < 4; i++)
+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
+ le16_to_cpu(mcast_table[i]));
+}
+
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void pcnet32_set_multicast_list(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr, flags;
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int csr15, suspended;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ suspended = pcnet32_suspend(dev, &flags, 0);
+ csr15 = lp->a->read_csr(ioaddr, CSR15);
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
+ lp->init_block->mode =
+ cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
+ 7);
+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
+ } else {
+ lp->init_block->mode =
+ cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
+ pcnet32_load_multicast(dev);
+ }
+
+ if (suspended) {
+ int csr5;
+ /* clear SUSPEND (SPND) - CSR5 bit 0 */
+ csr5 = lp->a->read_csr(ioaddr, CSR5);
+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+ } else {
+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+ pcnet32_restart(dev, CSR0_NORMAL);
+ netif_wake_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+/* This routine assumes that the lp->lock is held */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+ u16 val_out;
+
+ if (!lp->mii)
+ return 0;
+
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ val_out = lp->a->read_bcr(ioaddr, 34);
+
+ return val_out;
+}
+
+/* This routine assumes that the lp->lock is held */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long ioaddr = dev->base_addr;
+
+ if (!lp->mii)
+ return;
+
+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
+ lp->a->write_bcr(ioaddr, 34, val);
+}
+
+static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int rc;
+ unsigned long flags;
+
+ /* SIOC[GS]MIIxxx ioctls */
+ if (lp->mii) {
+ spin_lock_irqsave(&lp->lock, flags);
+ rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ } else {
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int pcnet32_check_otherphy(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ struct mii_if_info mii = lp->mii_if;
+ u16 bmcr;
+ int i;
+
+ for (i = 0; i < PCNET32_MAX_PHYS; i++) {
+ if (i == lp->mii_if.phy_id)
+ continue; /* skip active phy */
+ if (lp->phymask & (1 << i)) {
+ mii.phy_id = i;
+ if (mii_link_ok(&mii)) {
+ /* found PHY with active link */
+ netif_info(lp, link, dev, "Using PHY number %d\n",
+ i);
+
+ /* isolate inactive phy */
+ bmcr =
+ mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
+ mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
+ bmcr | BMCR_ISOLATE);
+
+ /* de-isolate new phy */
+ bmcr = mdio_read(dev, i, MII_BMCR);
+ mdio_write(dev, i, MII_BMCR,
+ bmcr & ~BMCR_ISOLATE);
+
+ /* set new phy address */
+ lp->mii_if.phy_id = i;
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+ * Show the status of the media. Similar to mii_check_media however it
+ * correctly shows the link speed for all (tested) pcnet32 variants.
+ * Devices with no mii just report link state without speed.
+ *
+ * Caller is assumed to hold and release the lp->lock.
+ */
+
+static void pcnet32_check_media(struct net_device *dev, int verbose)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ int curr_link;
+ int prev_link = netif_carrier_ok(dev) ? 1 : 0;
+ u32 bcr9;
+
+ if (lp->mii) {
+ curr_link = mii_link_ok(&lp->mii_if);
+ } else {
+ ulong ioaddr = dev->base_addr; /* card base I/O address */
+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+ }
+ if (!curr_link) {
+ if (prev_link || verbose) {
+ netif_carrier_off(dev);
+ netif_info(lp, link, dev, "link down\n");
+ }
+ if (lp->phycount > 1) {
+ curr_link = pcnet32_check_otherphy(dev);
+ prev_link = 0;
+ }
+ } else if (verbose || !prev_link) {
+ netif_carrier_on(dev);
+ if (lp->mii) {
+ if (netif_msg_link(lp)) {
+ struct ethtool_cmd ecmd = {
+ .cmd = ETHTOOL_GSET };
+ mii_ethtool_gset(&lp->mii_if, &ecmd);
+ netdev_info(dev, "link up, %uMbps, %s-duplex\n",
+ ethtool_cmd_speed(&ecmd),
+ (ecmd.duplex == DUPLEX_FULL)
+ ? "full" : "half");
+ }
+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
+ if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
+ if (lp->mii_if.full_duplex)
+ bcr9 |= (1 << 0);
+ else
+ bcr9 &= ~(1 << 0);
+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
+ }
+ } else {
+ netif_info(lp, link, dev, "link up\n");
+ }
+ }
+}
+
+/*
+ * Check for loss of link and link establishment.
+ * Could possibly be changed to use mii_check_media instead.
+ */
+
+static void pcnet32_watchdog(struct net_device *dev)
+{
+ struct pcnet32_private *lp = netdev_priv(dev);
+ unsigned long flags;
+
+ /* Print the link status if it has changed */
+ spin_lock_irqsave(&lp->lock, flags);
+ pcnet32_check_media(dev, 0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
+}
+
+static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ pcnet32_close(dev);
+ }
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int pcnet32_pm_resume(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ if (netif_running(dev)) {
+ pcnet32_open(dev);
+ netif_device_attach(dev);
+ }
+ return 0;
+}
+
+static void pcnet32_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+
+ if (dev) {
+ struct pcnet32_private *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ pcnet32_free_ring(dev);
+ release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+ free_netdev(dev);
+ pci_disable_device(pdev);
+ }
+}
+
+static struct pci_driver pcnet32_driver = {
+ .name = DRV_NAME,
+ .probe = pcnet32_probe_pci,
+ .remove = pcnet32_remove_one,
+ .id_table = pcnet32_pci_tbl,
+ .suspend = pcnet32_pm_suspend,
+ .resume = pcnet32_pm_resume,
+};
+
+/* An additional parameter that may be passed in... */
+static int debug = -1;
+static int tx_start_pt = -1;
+static int pcnet32_have_pci;
+
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, DRV_NAME " debug level");
+module_param(max_interrupt_work, int, 0);
+MODULE_PARM_DESC(max_interrupt_work,
+ DRV_NAME " maximum events handled per interrupt");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak,
+ DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+module_param(tx_start_pt, int, 0);
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+module_param(pcnet32vlb, int, 0);
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+module_param_array(options, int, NULL, 0);
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+module_param_array(full_duplex, int, NULL, 0);
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
+module_param_array(homepna, int, NULL, 0);
+MODULE_PARM_DESC(homepna,
+ DRV_NAME
+ " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
+
+MODULE_AUTHOR("Thomas Bogendoerfer");
+MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int __init pcnet32_init_module(void)
+{
+ pr_info("%s", version);
+
+ pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
+
+ if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+ tx_start = tx_start_pt;
+
+ /* find the PCI devices */
+ if (!pci_register_driver(&pcnet32_driver))
+ pcnet32_have_pci = 1;
+
+ /* should we find any remaining VLbus devices ? */
+ if (pcnet32vlb)
+ pcnet32_probe_vlbus(pcnet32_portlist);
+
+ if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
+ pr_info("%d cards_found\n", cards_found);
+
+ return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+ struct net_device *next_dev;
+
+ while (pcnet32_dev) {
+ struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
+ next_dev = lp->next;
+ unregister_netdev(pcnet32_dev);
+ pcnet32_free_ring(pcnet32_dev);
+ release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+ pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
+ lp->init_block, lp->init_dma_addr);
+ free_netdev(pcnet32_dev);
+ pcnet32_dev = next_dev;
+ }
+
+ if (pcnet32_have_pci)
+ pci_unregister_driver(&pcnet32_driver);
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
+
+/*
+ * Local variables:
+ * c-indent-level: 4
+ * tab-width: 8
+ * End:
+ */
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
new file mode 100644
index 000000000..3d8c6b2cd
--- /dev/null
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -0,0 +1,956 @@
+/* sun3lance.c: Ethernet driver for SUN3 Lance chip */
+/*
+
+ Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net).
+ This driver is a part of the linux kernel, and is thus distributed
+ under the GNU General Public License.
+
+ The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically
+ true for the correct IRQ and address of the lance registers. They
+ have not been widely tested, however. What we probably need is a
+ "proper" way to search for a device in the sun3's prom, but, alas,
+ linux has no such thing.
+
+ This driver is largely based on atarilance.c, by Roman Hodek. Other
+ sources of inspiration were the NetBSD sun3 am7990 driver, and the
+ linux sparc lance driver (sunlance.c).
+
+ There are more assumptions made throughout this driver, it almost
+ certainly still needs work, but it does work at least for RARP/BOOTP and
+ mounting the root NFS filesystem.
+
+*/
+
+static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
+
+#include <linux/module.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dvma.h>
+#include <asm/idprom.h>
+#include <asm/machines.h>
+
+#ifdef CONFIG_SUN3
+#include <asm/sun3mmu.h>
+#else
+#include <asm/sun3xprom.h>
+#endif
+
+/* sun3/60 addr/irq for the lance chip. If your sun is different,
+ change this. */
+#define LANCE_OBIO 0x120000
+#define LANCE_IRQ IRQ_AUTO_3
+
+/* Debug level:
+ * 0 = silent, print only serious errors
+ * 1 = normal, print error messages
+ * 2 = debug, print debug infos
+ * 3 = debug, print even more debug infos (packet data)
+ */
+
+#define LANCE_DEBUG 0
+
+#ifdef LANCE_DEBUG
+static int lance_debug = LANCE_DEBUG;
+#else
+static int lance_debug = 1;
+#endif
+module_param(lance_debug, int, 0);
+MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_LICENSE("GPL");
+
+#define DPRINTK(n,a) \
+ do { \
+ if (lance_debug >= n) \
+ printk a; \
+ } while( 0 )
+
+
+/* we're only using 32k of memory, so we use 4 TX
+ buffers and 16 RX buffers. These values are expressed as log2. */
+
+#define TX_LOG_RING_SIZE 3
+#define RX_LOG_RING_SIZE 5
+
+/* These are the derived values */
+
+#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
+#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+
+#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
+#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+
+/* Definitions for packet buffer access: */
+#define PKT_BUF_SZ 1544
+
+/* Get the address of a packet buffer corresponding to a given buffer head */
+#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base)
+
+
+/* The LANCE Rx and Tx ring descriptors. */
+struct lance_rx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short buf_length; /* This length is 2s complement! */
+ volatile short msg_length; /* This length is "normal". */
+};
+
+struct lance_tx_head {
+ unsigned short base; /* Low word of base addr */
+ volatile unsigned char flag;
+ unsigned char base_hi; /* High word of base addr (unused) */
+ short length; /* Length is 2s complement! */
+ volatile short misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+struct lance_init_block {
+ unsigned short mode; /* Pre-set mode */
+ unsigned char hwaddr[6]; /* Physical ethernet address */
+ unsigned int filter[2]; /* Multicast filter (unused). */
+ /* Receive and transmit ring base, along with length bits. */
+ unsigned short rdra;
+ unsigned short rlen;
+ unsigned short tdra;
+ unsigned short tlen;
+ unsigned short pad[4]; /* is thie needed? */
+};
+
+/* The whole layout of the Lance shared memory */
+struct lance_memory {
+ struct lance_init_block init;
+ struct lance_tx_head tx_head[TX_RING_SIZE];
+ struct lance_rx_head rx_head[RX_RING_SIZE];
+ char rx_data[RX_RING_SIZE][PKT_BUF_SZ];
+ char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
+};
+
+/* The driver's private device structure */
+
+struct lance_private {
+ volatile unsigned short *iobase;
+ struct lance_memory *mem;
+ int new_rx, new_tx; /* The next free ring entry */
+ int old_tx, old_rx; /* ring entry to be processed */
+/* These two must be longs for set_bit() */
+ long tx_full;
+ long lock;
+};
+
+/* I/O register access macros */
+
+#define MEM lp->mem
+#define DREG lp->iobase[0]
+#define AREG lp->iobase[1]
+#define REGA(a) (*( AREG = (a), &DREG ))
+
+/* Definitions for the Lance */
+
+/* tx_head flags */
+#define TMD1_ENP 0x01 /* end of packet */
+#define TMD1_STP 0x02 /* start of packet */
+#define TMD1_DEF 0x04 /* deferred */
+#define TMD1_ONE 0x08 /* one retry needed */
+#define TMD1_MORE 0x10 /* more than one retry needed */
+#define TMD1_ERR 0x40 /* error summary */
+#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
+
+#define TMD1_OWN_CHIP TMD1_OWN
+#define TMD1_OWN_HOST 0
+
+/* tx_head misc field */
+#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
+#define TMD3_RTRY 0x0400 /* failed after 16 retries */
+#define TMD3_LCAR 0x0800 /* carrier lost */
+#define TMD3_LCOL 0x1000 /* late collision */
+#define TMD3_UFLO 0x4000 /* underflow (late memory) */
+#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
+
+/* rx_head flags */
+#define RMD1_ENP 0x01 /* end of packet */
+#define RMD1_STP 0x02 /* start of packet */
+#define RMD1_BUFF 0x04 /* buffer error */
+#define RMD1_CRC 0x08 /* CRC error */
+#define RMD1_OFLO 0x10 /* overflow */
+#define RMD1_FRAM 0x20 /* framing error */
+#define RMD1_ERR 0x40 /* error summary */
+#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
+
+#define RMD1_OWN_CHIP RMD1_OWN
+#define RMD1_OWN_HOST 0
+
+/* register names */
+#define CSR0 0 /* mode/status */
+#define CSR1 1 /* init block addr (low) */
+#define CSR2 2 /* init block addr (high) */
+#define CSR3 3 /* misc */
+#define CSR8 8 /* address filter */
+#define CSR15 15 /* promiscuous mode */
+
+/* CSR0 */
+/* (R=readable, W=writeable, S=set on write, C=clear on write) */
+#define CSR0_INIT 0x0001 /* initialize (RS) */
+#define CSR0_STRT 0x0002 /* start (RS) */
+#define CSR0_STOP 0x0004 /* stop (RS) */
+#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
+#define CSR0_TXON 0x0010 /* transmitter on (R) */
+#define CSR0_RXON 0x0020 /* receiver on (R) */
+#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
+#define CSR0_INTR 0x0080 /* interrupt active (R) */
+#define CSR0_IDON 0x0100 /* initialization done (RC) */
+#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
+#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
+#define CSR0_MERR 0x0800 /* memory error (RC) */
+#define CSR0_MISS 0x1000 /* missed frame (RC) */
+#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
+#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
+#define CSR0_ERR 0x8000 /* error (RC) */
+
+/* CSR3 */
+#define CSR3_BCON 0x0001 /* byte control */
+#define CSR3_ACON 0x0002 /* ALE control */
+#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
+
+/***************************** Prototypes *****************************/
+
+static int lance_probe( struct net_device *dev);
+static int lance_open( struct net_device *dev );
+static void lance_init_ring( struct net_device *dev );
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
+static irqreturn_t lance_interrupt( int irq, void *dev_id);
+static int lance_rx( struct net_device *dev );
+static int lance_close( struct net_device *dev );
+static void set_multicast_list( struct net_device *dev );
+
+/************************* End of Prototypes **************************/
+
+struct net_device * __init sun3lance_probe(int unit)
+{
+ struct net_device *dev;
+ static int found;
+ int err = -ENODEV;
+
+ if (!MACH_IS_SUN3 && !MACH_IS_SUN3X)
+ return ERR_PTR(-ENODEV);
+
+ /* check that this machine has an onboard lance */
+ switch(idprom->id_machtype) {
+ case SM_SUN3|SM_3_50:
+ case SM_SUN3|SM_3_60:
+ case SM_SUN3X|SM_3_80:
+ /* these machines have lance */
+ break;
+
+ default:
+ return ERR_PTR(-ENODEV);
+ }
+
+ if (found)
+ return ERR_PTR(-ENODEV);
+
+ dev = alloc_etherdev(sizeof(struct lance_private));
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+ if (unit >= 0) {
+ sprintf(dev->name, "eth%d", unit);
+ netdev_boot_setup_check(dev);
+ }
+
+ if (!lance_probe(dev))
+ goto out;
+
+ err = register_netdev(dev);
+ if (err)
+ goto out1;
+ found = 1;
+ return dev;
+
+out1:
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)dev->base_addr);
+#endif
+out:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+static const struct net_device_ops lance_netdev_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = set_multicast_list,
+ .ndo_set_mac_address = NULL,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int __init lance_probe( struct net_device *dev)
+{
+ unsigned long ioaddr;
+
+ struct lance_private *lp;
+ int i;
+ static int did_version;
+ volatile unsigned short *ioaddr_probe;
+ unsigned short tmp1, tmp2;
+
+#ifdef CONFIG_SUN3
+ ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
+ if (!ioaddr)
+ return 0;
+#else
+ ioaddr = SUN3X_LANCE;
+#endif
+
+ /* test to see if there's really a lance here */
+ /* (CSRO_INIT shouldn't be readable) */
+
+ ioaddr_probe = (volatile unsigned short *)ioaddr;
+ tmp1 = ioaddr_probe[0];
+ tmp2 = ioaddr_probe[1];
+
+ ioaddr_probe[1] = CSR0;
+ ioaddr_probe[0] = CSR0_INIT | CSR0_STOP;
+
+ if(ioaddr_probe[0] != CSR0_STOP) {
+ ioaddr_probe[0] = tmp1;
+ ioaddr_probe[1] = tmp2;
+
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ return 0;
+ }
+
+ lp = netdev_priv(dev);
+
+ /* XXX - leak? */
+ MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
+ if (MEM == NULL) {
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n");
+ return 0;
+ }
+
+ lp->iobase = (volatile unsigned short *)ioaddr;
+ dev->base_addr = (unsigned long)ioaddr; /* informational only */
+
+ REGA(CSR0) = CSR0_STOP;
+
+ if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)ioaddr);
+#endif
+ dvma_free((void *)MEM);
+ printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n");
+ return 0;
+ }
+ dev->irq = (unsigned short)LANCE_IRQ;
+
+
+ printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ",
+ dev->name,
+ (unsigned long)ioaddr,
+ (unsigned long)MEM,
+ dev->irq);
+
+ /* copy in the ethernet address from the prom */
+ for(i = 0; i < 6 ; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ printk("%pM\n", dev->dev_addr);
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+ DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n",
+ dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head),
+ (dvma_vtob(MEM->tx_head))));
+
+ if (did_version++ == 0)
+ printk( version );
+
+ dev->netdev_ops = &lance_netdev_ops;
+// KLUDGE -- REMOVE ME
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+
+ return 1;
+}
+
+static int lance_open( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
+
+ REGA(CSR0) = CSR0_STOP;
+
+ lance_init_ring(dev);
+
+ /* From now on, AREG is kept to point to CSR0 */
+ REGA(CSR0) = CSR0_INIT;
+
+ i = 1000000;
+ while (--i > 0)
+ if (DREG & CSR0_IDON)
+ break;
+ if (i <= 0 || (DREG & CSR0_ERR)) {
+ DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
+ dev->name, i, DREG ));
+ DREG = CSR0_STOP;
+ return -EIO;
+ }
+
+ DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
+
+ netif_start_queue(dev);
+
+ DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
+
+ return 0;
+}
+
+
+/* Initialize the LANCE Rx and Tx rings. */
+
+static void lance_init_ring( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int i;
+
+ lp->lock = 0;
+ lp->tx_full = 0;
+ lp->new_rx = lp->new_tx = 0;
+ lp->old_rx = lp->old_tx = 0;
+
+ for( i = 0; i < TX_RING_SIZE; i++ ) {
+ MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
+ MEM->tx_head[i].flag = 0;
+ MEM->tx_head[i].base_hi =
+ (dvma_vtob(MEM->tx_data[i])) >>16;
+ MEM->tx_head[i].length = 0;
+ MEM->tx_head[i].misc = 0;
+ }
+
+ for( i = 0; i < RX_RING_SIZE; i++ ) {
+ MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]);
+ MEM->rx_head[i].flag = RMD1_OWN_CHIP;
+ MEM->rx_head[i].base_hi =
+ (dvma_vtob(MEM->rx_data[i])) >> 16;
+ MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000;
+ MEM->rx_head[i].msg_length = 0;
+ }
+
+ /* tell the card it's ether address, bytes swapped */
+ MEM->init.hwaddr[0] = dev->dev_addr[1];
+ MEM->init.hwaddr[1] = dev->dev_addr[0];
+ MEM->init.hwaddr[2] = dev->dev_addr[3];
+ MEM->init.hwaddr[3] = dev->dev_addr[2];
+ MEM->init.hwaddr[4] = dev->dev_addr[5];
+ MEM->init.hwaddr[5] = dev->dev_addr[4];
+
+ MEM->init.mode = 0x0000;
+ MEM->init.filter[0] = 0x00000000;
+ MEM->init.filter[1] = 0x00000000;
+ MEM->init.rdra = dvma_vtob(MEM->rx_head);
+ MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->rx_head) >> 16);
+ MEM->init.tdra = dvma_vtob(MEM->tx_head);
+ MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
+ (dvma_vtob(MEM->tx_head) >> 16);
+
+
+ /* tell the lance the address of its init block */
+ REGA(CSR1) = dvma_vtob(&(MEM->init));
+ REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16;
+
+#ifdef CONFIG_SUN3X
+ REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON;
+#else
+ REGA(CSR3) = CSR3_BSWP;
+#endif
+
+}
+
+
+static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, len;
+ struct lance_tx_head *head;
+ unsigned long flags;
+
+ DPRINTK( 1, ( "%s: transmit start.\n",
+ dev->name));
+
+ /* Transmitter timeout, serious problems. */
+ if (netif_queue_stopped(dev)) {
+ int tickssofar = jiffies - dev_trans_start(dev);
+ if (tickssofar < HZ/5)
+ return NETDEV_TX_BUSY;
+
+ DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
+ dev->name, DREG ));
+ DREG = CSR0_STOP;
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA(CSR3) = CSR3_BSWP;
+ dev->stats.tx_errors++;
+
+ if(lance_debug >= 2) {
+ int i;
+ printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n",
+ lp->old_tx, lp->new_tx,
+ lp->tx_full ? " (full)" : "",
+ lp->new_rx );
+ for( i = 0 ; i < RX_RING_SIZE; i++ )
+ printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
+ i, MEM->rx_head[i].base,
+ -MEM->rx_head[i].buf_length,
+ MEM->rx_head[i].msg_length);
+ for( i = 0 ; i < TX_RING_SIZE; i++ )
+ printk("tx #%d: base=%04x len=%04x misc=%04x\n",
+ i, MEM->tx_head[i].base,
+ -MEM->tx_head[i].length,
+ MEM->tx_head[i].misc );
+ }
+
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
+
+ netif_start_queue(dev);
+
+ return NETDEV_TX_OK;
+ }
+
+
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+
+ /* Block a timer-based transmit from overlapping with us by
+ stopping the queue for a bit... */
+
+ netif_stop_queue(dev);
+
+ if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
+ printk( "%s: tx queue lock!.\n", dev->name);
+ /* don't clear dev->tbusy flag. */
+ return NETDEV_TX_BUSY;
+ }
+
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
+ dev->name, DREG ));
+
+#ifdef CONFIG_SUN3X
+ /* this weirdness doesn't appear on sun3... */
+ if(!(DREG & CSR0_INIT)) {
+ DPRINTK( 1, ("INIT not set, reinitializing...\n"));
+ REGA( CSR0 ) = CSR0_STOP;
+ lance_init_ring(dev);
+ REGA( CSR0 ) = CSR0_INIT | CSR0_STRT;
+ }
+#endif
+
+ /* Fill in a Tx ring entry */
+#if 0
+ if (lance_debug >= 2) {
+ printk( "%s: TX pkt %d type 0x%04x"
+ " from %s to %s"
+ " data at 0x%08x len %d\n",
+ dev->name, lp->new_tx, ((u_short *)skb->data)[6],
+ DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data),
+ (int)skb->data, (int)skb->len );
+ }
+#endif
+ /* We're not prepared for the int until the last flags are set/reset.
+ * And the int may happen already after setting the OWN_CHIP... */
+ local_irq_save(flags);
+
+ /* Mask to ring buffer boundary. */
+ entry = lp->new_tx;
+ head = &(MEM->tx_head[entry]);
+
+ /* Caution: the write order is important here, set the "ownership" bits
+ * last.
+ */
+
+ /* the sun3's lance needs it's buffer padded to the minimum
+ size */
+ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
+
+// head->length = -len;
+ head->length = (-len) | 0xf000;
+ head->misc = 0;
+
+ skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
+ if (len != skb->len)
+ memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
+
+ head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
+ lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK;
+ dev->stats.tx_bytes += skb->len;
+
+ /* Trigger an immediate send poll. */
+ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
+ AREG = CSR0;
+ DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
+ dev->name, DREG ));
+ dev_kfree_skb(skb);
+
+ lp->lock = 0;
+ if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
+ TMD1_OWN_HOST)
+ netif_start_queue(dev);
+
+ local_irq_restore(flags);
+
+ return NETDEV_TX_OK;
+}
+
+/* The LANCE interrupt handler. */
+
+static irqreturn_t lance_interrupt( int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+ static int in_interrupt;
+
+ if (dev == NULL) {
+ DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" ));
+ return IRQ_NONE;
+ }
+
+ if (in_interrupt)
+ DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name ));
+ in_interrupt = 1;
+
+ still_more:
+ flush_cache_all();
+
+ AREG = CSR0;
+ csr0 = DREG;
+
+ /* ack interrupts */
+ DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON);
+
+ /* clear errors */
+ if(csr0 & CSR0_ERR)
+ DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS;
+
+
+ DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
+ dev->name, csr0, DREG ));
+
+ if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
+ int old_tx = lp->old_tx;
+
+// if(lance_debug >= 3) {
+// int i;
+//
+// printk("%s: tx int\n", dev->name);
+//
+// for(i = 0; i < TX_RING_SIZE; i++)
+// printk("ring %d flag=%04x\n", i,
+// MEM->tx_head[i].flag);
+// }
+
+ while( old_tx != lp->new_tx) {
+ struct lance_tx_head *head = &(MEM->tx_head[old_tx]);
+
+ DPRINTK(3, ("on tx_ring %d\n", old_tx));
+
+ if (head->flag & TMD1_OWN_CHIP)
+ break; /* It still hasn't been Txed */
+
+ if (head->flag & TMD1_ERR) {
+ int status = head->misc;
+ dev->stats.tx_errors++;
+ if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
+ if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
+ if (status & TMD3_LCOL) dev->stats.tx_window_errors++;
+ if (status & (TMD3_UFLO | TMD3_BUFF)) {
+ dev->stats.tx_fifo_errors++;
+ printk("%s: Tx FIFO error\n",
+ dev->name);
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ return IRQ_HANDLED;
+ }
+ } else if(head->flag & (TMD1_ENP | TMD1_STP)) {
+
+ head->flag &= ~(TMD1_ENP | TMD1_STP);
+ if(head->flag & (TMD1_ONE | TMD1_MORE))
+ dev->stats.collisions++;
+
+ dev->stats.tx_packets++;
+ DPRINTK(3, ("cleared tx ring %d\n", old_tx));
+ }
+ old_tx = (old_tx +1) & TX_RING_MOD_MASK;
+ }
+
+ lp->old_tx = old_tx;
+ }
+
+
+ if (netif_queue_stopped(dev)) {
+ /* The ring is no longer full, clear tbusy. */
+ netif_start_queue(dev);
+ netif_wake_queue(dev);
+ }
+
+ if (csr0 & CSR0_RINT) /* Rx interrupt */
+ lance_rx( dev );
+
+ /* Log misc errors. */
+ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
+ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
+ if (csr0 & CSR0_MERR) {
+ DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
+ "status %04x.\n", dev->name, csr0 ));
+ /* Restart the chip. */
+ REGA(CSR0) = CSR0_STOP;
+ REGA(CSR3) = CSR3_BSWP;
+ lance_init_ring(dev);
+ REGA(CSR0) = CSR0_STRT | CSR0_INEA;
+ }
+
+
+ /* Clear any other interrupt, and set interrupt enable. */
+// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
+// CSR0_IDON | CSR0_INEA;
+
+ REGA(CSR0) = CSR0_INEA;
+
+ if(DREG & (CSR0_RINT | CSR0_TINT)) {
+ DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG));
+ goto still_more;
+ }
+
+ DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
+ dev->name, DREG ));
+ in_interrupt = 0;
+ return IRQ_HANDLED;
+}
+
+/* get packet, toss into skbuff */
+static int lance_rx( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry = lp->new_rx;
+
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
+ struct lance_rx_head *head = &(MEM->rx_head[entry]);
+ int status = head->flag;
+
+ if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
+ /* There is a tricky error noted by John Murphy,
+ <murf@perftech.com> to Russ Nelson: Even with
+ full-sized buffers it's possible for a jabber packet to use two
+ buffers, with only the last correctly noting the error. */
+ if (status & RMD1_ENP) /* Only count a general error at the */
+ dev->stats.rx_errors++; /* end of a packet.*/
+ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
+ if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
+ if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
+ if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
+ head->flag &= (RMD1_ENP|RMD1_STP);
+ } else {
+ /* Malloc up new buffer, compatible with net-3. */
+// short pkt_len = head->msg_length;// & 0xfff;
+ short pkt_len = (head->msg_length & 0xfff) - 4;
+ struct sk_buff *skb;
+
+ if (pkt_len < 60) {
+ printk( "%s: Runt packet!\n", dev->name );
+ dev->stats.rx_errors++;
+ }
+ else {
+ skb = netdev_alloc_skb(dev, pkt_len + 2);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ head->msg_length = 0;
+ head->flag |= RMD1_OWN_CHIP;
+ lp->new_rx = (lp->new_rx+1) &
+ RX_RING_MOD_MASK;
+ }
+
+#if 0
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk("%s: RX pkt %d type 0x%04x"
+ " from %pM to %pM",
+ dev->name, lp->new_tx, ((u_short *)data)[6],
+ &data[6], data);
+
+ printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
+ "len %d at %08x\n",
+ data[15], data[16], data[17], data[18],
+ data[19], data[20], data[21], data[22],
+ pkt_len, data);
+ }
+#endif
+ if (lance_debug >= 3) {
+ u_char *data = PKTBUF_ADDR(head);
+ printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len);
+ }
+
+
+ skb_reserve( skb, 2 ); /* 16 byte align */
+ skb_put( skb, pkt_len ); /* Make room */
+ skb_copy_to_linear_data(skb,
+ PKTBUF_ADDR(head),
+ pkt_len);
+
+ skb->protocol = eth_type_trans( skb, dev );
+ netif_rx( skb );
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ }
+ }
+
+// head->buf_length = -PKT_BUF_SZ | 0xf000;
+ head->msg_length = 0;
+ head->flag = RMD1_OWN_CHIP;
+
+ entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK;
+ }
+
+ /* From lance.c (Donald Becker): */
+ /* We should check that at least two ring entries are free.
+ If not, we should free one and mark stats->rx_dropped++. */
+
+ return 0;
+}
+
+
+static int lance_close( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ AREG = CSR0;
+
+ DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
+ dev->name, DREG ));
+
+ /* We stop the LANCE here -- it occasionally polls
+ memory if we don't. */
+ DREG = CSR0_STOP;
+ return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+ num_addrs == -1 Promiscuous mode, receive all packets
+ num_addrs == 0 Normal mode, clear multicast list
+ num_addrs > 0 Multicast mode, receive normal and MC packets, and do
+ best-effort filtering.
+ */
+
+/* completely untested on a sun3 */
+static void set_multicast_list( struct net_device *dev )
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ if(netif_queue_stopped(dev))
+ /* Only possible if board is already started */
+ return;
+
+ /* We take the simple way out and always enable promiscuous mode. */
+ DREG = CSR0_STOP; /* Temporarily stop the lance. */
+
+ if (dev->flags & IFF_PROMISC) {
+ /* Log any net taps. */
+ DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
+ REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
+ } else {
+ short multicast_table[4];
+ int num_addrs = netdev_mc_count(dev);
+ int i;
+ /* We don't use the multicast table, but rely on upper-layer
+ * filtering. */
+ memset( multicast_table, (num_addrs == 0) ? 0 : -1,
+ sizeof(multicast_table) );
+ for( i = 0; i < 4; i++ )
+ REGA( CSR8+i ) = multicast_table[i];
+ REGA( CSR15 ) = 0; /* Unset promiscuous mode */
+ }
+
+ /*
+ * Always set BSWP after a STOP as STOP puts it back into
+ * little endian mode.
+ */
+ REGA( CSR3 ) = CSR3_BSWP;
+
+ /* Resume normal operation and reset AREG to CSR0 */
+ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
+}
+
+
+#ifdef MODULE
+
+static struct net_device *sun3lance_dev;
+
+int __init init_module(void)
+{
+ sun3lance_dev = sun3lance_probe(-1);
+ return PTR_ERR_OR_ZERO(sun3lance_dev);
+}
+
+void __exit cleanup_module(void)
+{
+ unregister_netdev(sun3lance_dev);
+#ifdef CONFIG_SUN3
+ iounmap((void __iomem *)sun3lance_dev->base_addr);
+#endif
+ free_netdev(sun3lance_dev);
+}
+
+#endif /* MODULE */
+
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
new file mode 100644
index 000000000..7847638bd
--- /dev/null
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -0,0 +1,1533 @@
+/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $
+ * lance.c: Linux/Sparc/Lance driver
+ *
+ * Written 1995, 1996 by Miguel de Icaza
+ * Sources:
+ * The Linux depca driver
+ * The Linux lance driver.
+ * The Linux skeleton driver.
+ * The NetBSD Sparc/Lance driver.
+ * Theo de Raadt (deraadt@openbsd.org)
+ * NCR92C990 Lan Controller manual
+ *
+ * 1.4:
+ * Added support to run with a ledma on the Sun4m
+ *
+ * 1.5:
+ * Added multiple card detection.
+ *
+ * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
+ * (davem@caip.rutgers.edu)
+ *
+ * 5/29/96: override option 'tpe-link-test?', if it is 'false', as
+ * this disables auto carrier detection on sun4m. Eddie C. Dost
+ * (ecd@skynet.be)
+ *
+ * 1.7:
+ * 6/26/96: Bug fix for multiple ledmas, miguel.
+ *
+ * 1.8:
+ * Stole multicast code from depca.c, fixed lance_tx.
+ *
+ * 1.9:
+ * 8/21/96: Fixed the multicast code (Pedro Roque)
+ *
+ * 8/28/96: Send fake packet in lance_open() if auto_select is true,
+ * so we can detect the carrier loss condition in time.
+ * Eddie C. Dost (ecd@skynet.be)
+ *
+ * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
+ * MNA trap during chksum_partial_copy(). (ecd@skynet.be)
+ *
+ * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
+ *
+ * 12/22/96: Don't loop forever in lance_rx() on incomplete packets.
+ * This was the sun4c killer. Shit, stupid bug.
+ * (ecd@skynet.be)
+ *
+ * 1.10:
+ * 1/26/97: Modularize driver. (ecd@skynet.be)
+ *
+ * 1.11:
+ * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
+ *
+ * 1.12:
+ * 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
+ * Anton Blanchard (anton@progsoc.uts.edu.au)
+ * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
+ * David S. Miller (davem@redhat.com)
+ * 2.01:
+ * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
+ *
+ */
+
+#undef DEBUG_DRIVER
+
+static char lancestr[] = "LANCE";
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/errno.h>
+#include <linux/socket.h> /* Used for the temporal inet entries and routing */
+#include <linux/route.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gfp.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/pgtable.h>
+#include <asm/byteorder.h> /* Used by the checksum routines */
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#include <asm/auxio.h> /* For tpe-link-test? setting */
+#include <asm/irq.h>
+
+#define DRV_NAME "sunlance"
+#define DRV_VERSION "2.02"
+#define DRV_RELDATE "8/24/03"
+#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
+
+static char version[] =
+ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("Sun Lance ethernet driver");
+MODULE_LICENSE("GPL");
+
+/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
+#ifndef LANCE_LOG_TX_BUFFERS
+#define LANCE_LOG_TX_BUFFERS 4
+#define LANCE_LOG_RX_BUFFERS 4
+#endif
+
+#define LE_CSR0 0
+#define LE_CSR1 1
+#define LE_CSR2 2
+#define LE_CSR3 3
+
+#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
+
+#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
+#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
+#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
+#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
+#define LE_C0_MERR 0x0800 /* ME: Memory error */
+#define LE_C0_RINT 0x0400 /* Received interrupt */
+#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
+#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
+#define LE_C0_INTR 0x0080 /* Interrupt or error */
+#define LE_C0_INEA 0x0040 /* Interrupt enable */
+#define LE_C0_RXON 0x0020 /* Receiver on */
+#define LE_C0_TXON 0x0010 /* Transmitter on */
+#define LE_C0_TDMD 0x0008 /* Transmitter demand */
+#define LE_C0_STOP 0x0004 /* Stop the card */
+#define LE_C0_STRT 0x0002 /* Start the card */
+#define LE_C0_INIT 0x0001 /* Init the card */
+
+#define LE_C3_BSWP 0x4 /* SWAP */
+#define LE_C3_ACON 0x2 /* ALE Control */
+#define LE_C3_BCON 0x1 /* Byte control */
+
+/* Receive message descriptor 1 */
+#define LE_R1_OWN 0x80 /* Who owns the entry */
+#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
+#define LE_R1_FRA 0x20 /* FRA: Frame error */
+#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
+#define LE_R1_CRC 0x08 /* CRC error */
+#define LE_R1_BUF 0x04 /* BUF: Buffer error */
+#define LE_R1_SOP 0x02 /* Start of packet */
+#define LE_R1_EOP 0x01 /* End of packet */
+#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T1_OWN 0x80 /* Lance owns the packet */
+#define LE_T1_ERR 0x40 /* Error summary */
+#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
+#define LE_T1_EONE 0x08 /* Error: one retry needed */
+#define LE_T1_EDEF 0x04 /* Error: deferred */
+#define LE_T1_SOP 0x02 /* Start of packet */
+#define LE_T1_EOP 0x01 /* End of packet */
+#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
+
+#define LE_T3_BUF 0x8000 /* Buffer error */
+#define LE_T3_UFL 0x4000 /* Error underflow */
+#define LE_T3_LCOL 0x1000 /* Error late collision */
+#define LE_T3_CLOS 0x0800 /* Error carrier loss */
+#define LE_T3_RTY 0x0400 /* Error retry */
+#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
+
+#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
+#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK)
+
+#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
+#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK)
+
+#define PKT_BUF_SZ 1544
+#define RX_BUFF_SIZE PKT_BUF_SZ
+#define TX_BUFF_SIZE PKT_BUF_SZ
+
+struct lance_rx_desc {
+ u16 rmd0; /* low address of packet */
+ u8 rmd1_bits; /* descriptor bits */
+ u8 rmd1_hadr; /* high address of packet */
+ s16 length; /* This length is 2s complement (negative)!
+ * Buffer length
+ */
+ u16 mblength; /* This is the actual number of bytes received */
+};
+
+struct lance_tx_desc {
+ u16 tmd0; /* low address of packet */
+ u8 tmd1_bits; /* descriptor bits */
+ u8 tmd1_hadr; /* high address of packet */
+ s16 length; /* Length is 2s complement (negative)! */
+ u16 misc;
+};
+
+/* The LANCE initialization block, described in databook. */
+/* On the Sparc, this block should be on a DMA region */
+struct lance_init_block {
+ u16 mode; /* Pre-set mode (reg. 15) */
+ u8 phys_addr[6]; /* Physical ethernet address */
+ u32 filter[2]; /* Multicast filter. */
+
+ /* Receive and transmit ring base, along with extra bits. */
+ u16 rx_ptr; /* receive descriptor addr */
+ u16 rx_len; /* receive len and high addr */
+ u16 tx_ptr; /* transmit descriptor addr */
+ u16 tx_len; /* transmit len and high addr */
+
+ /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
+ struct lance_rx_desc brx_ring[RX_RING_SIZE];
+ struct lance_tx_desc btx_ring[TX_RING_SIZE];
+
+ u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
+ u8 pad[2]; /* align rx_buf for copy_and_sum(). */
+ u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
+};
+
+#define libdesc_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
+
+#define libbuff_offset(rt, elem) \
+((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
+
+struct lance_private {
+ void __iomem *lregs; /* Lance RAP/RDP regs. */
+ void __iomem *dregs; /* DMA controller regs. */
+ struct lance_init_block __iomem *init_block_iomem;
+ struct lance_init_block *init_block_mem;
+
+ spinlock_t lock;
+
+ int rx_new, tx_new;
+ int rx_old, tx_old;
+
+ struct platform_device *ledma; /* If set this points to ledma */
+ char tpe; /* cable-selection is TPE */
+ char auto_select; /* cable-selection by carrier */
+ char burst_sizes; /* ledma SBus burst sizes */
+ char pio_buffer; /* init block in PIO space? */
+
+ unsigned short busmaster_regval;
+
+ void (*init_ring)(struct net_device *);
+ void (*rx)(struct net_device *);
+ void (*tx)(struct net_device *);
+
+ char *name;
+ dma_addr_t init_block_dvma;
+ struct net_device *dev; /* Backpointer */
+ struct platform_device *op;
+ struct platform_device *lebuffer;
+ struct timer_list multicast_timer;
+};
+
+#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
+ lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
+ lp->tx_old - lp->tx_new-1)
+
+/* Lance registers. */
+#define RDP 0x00UL /* register data port */
+#define RAP 0x02UL /* register address port */
+#define LANCE_REG_SIZE 0x04UL
+
+#define STOP_LANCE(__lp) \
+do { void __iomem *__base = (__lp)->lregs; \
+ sbus_writew(LE_CSR0, __base + RAP); \
+ sbus_writew(LE_C0_STOP, __base + RDP); \
+} while (0)
+
+int sparc_lance_debug = 2;
+
+/* The Lance uses 24 bit addresses */
+/* On the Sun4c the DVMA will provide the remaining bytes for us */
+/* On the Sun4m we have to instruct the ledma to provide them */
+/* Even worse, on scsi/ether SBUS cards, the init block and the
+ * transmit/receive buffers are addresses as offsets from absolute
+ * zero on the lebuffer PIO area. -DaveM
+ */
+
+#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
+
+/* Load the CSR registers */
+static void load_csrs(struct lance_private *lp)
+{
+ u32 leptr;
+
+ if (lp->pio_buffer)
+ leptr = 0;
+ else
+ leptr = LANCE_ADDR(lp->init_block_dvma);
+
+ sbus_writew(LE_CSR1, lp->lregs + RAP);
+ sbus_writew(leptr & 0xffff, lp->lregs + RDP);
+ sbus_writew(LE_CSR2, lp->lregs + RAP);
+ sbus_writew(leptr >> 16, lp->lregs + RDP);
+ sbus_writew(LE_CSR3, lp->lregs + RAP);
+ sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
+
+ /* Point back to csr0 */
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+}
+
+/* Setup the Lance Rx and Tx rings */
+static void lance_init_ring_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ dma_addr_t aib = lp->init_block_dvma;
+ __u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ ib->phys_addr [0] = dev->dev_addr [1];
+ ib->phys_addr [1] = dev->dev_addr [0];
+ ib->phys_addr [2] = dev->dev_addr [3];
+ ib->phys_addr [3] = dev->dev_addr [2];
+ ib->phys_addr [4] = dev->dev_addr [5];
+ ib->phys_addr [5] = dev->dev_addr [4];
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
+ ib->btx_ring [i].tmd0 = leptr;
+ ib->btx_ring [i].tmd1_hadr = leptr >> 16;
+ ib->btx_ring [i].tmd1_bits = 0;
+ ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
+ ib->btx_ring [i].misc = 0;
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
+
+ ib->brx_ring [i].rmd0 = leptr;
+ ib->brx_ring [i].rmd1_hadr = leptr >> 16;
+ ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
+ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
+ ib->brx_ring [i].mblength = 0;
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
+ ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
+ ib->rx_ptr = leptr;
+
+ /* Setup tx descriptor pointer */
+ leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
+ ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
+ ib->tx_ptr = leptr;
+}
+
+static void lance_init_ring_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u32 leptr;
+ int i;
+
+ /* Lock out other processes while setting up hardware */
+ netif_stop_queue(dev);
+ lp->rx_new = lp->tx_new = 0;
+ lp->rx_old = lp->tx_old = 0;
+
+ /* Copy the ethernet address to the lance init block
+ * Note that on the sparc you need to swap the ethernet address.
+ */
+ sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
+ sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
+ sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
+ sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
+ sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
+ sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
+
+ /* Setup the Tx ring entries */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ leptr = libbuff_offset(tx_buf, i);
+ sbus_writew(leptr, &ib->btx_ring [i].tmd0);
+ sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
+ sbus_writeb(0, &ib->btx_ring [i].tmd1_bits);
+
+ /* The ones required by tmd2 */
+ sbus_writew(0xf000, &ib->btx_ring [i].length);
+ sbus_writew(0, &ib->btx_ring [i].misc);
+ }
+
+ /* Setup the Rx ring entries */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ leptr = libbuff_offset(rx_buf, i);
+
+ sbus_writew(leptr, &ib->brx_ring [i].rmd0);
+ sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
+ sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits);
+ sbus_writew(-RX_BUFF_SIZE|0xf000,
+ &ib->brx_ring [i].length);
+ sbus_writew(0, &ib->brx_ring [i].mblength);
+ }
+
+ /* Setup the initialization block */
+
+ /* Setup rx descriptor pointer */
+ leptr = libdesc_offset(brx_ring, 0);
+ sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
+ &ib->rx_len);
+ sbus_writew(leptr, &ib->rx_ptr);
+
+ /* Setup tx descriptor pointer */
+ leptr = libdesc_offset(btx_ring, 0);
+ sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
+ &ib->tx_len);
+ sbus_writew(leptr, &ib->tx_ptr);
+}
+
+static void init_restart_ledma(struct lance_private *lp)
+{
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ if (!(csr & DMA_HNDL_ERROR)) {
+ /* E-Cache draining */
+ while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
+ barrier();
+ }
+
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ csr &= ~DMA_E_BURSTS;
+ if (lp->burst_sizes & DMA_BURST32)
+ csr |= DMA_E_BURST32;
+ else
+ csr |= DMA_E_BURST16;
+
+ csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
+
+ if (lp->tpe)
+ csr |= DMA_EN_ENETAUI;
+ else
+ csr &= ~DMA_EN_ENETAUI;
+ udelay(20);
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ udelay(200);
+}
+
+static int init_restart_lance(struct lance_private *lp)
+{
+ u16 regval = 0;
+ int i;
+
+ if (lp->dregs)
+ init_restart_ledma(lp);
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ sbus_writew(LE_C0_INIT, lp->lregs + RDP);
+
+ /* Wait for the lance to complete initialization */
+ for (i = 0; i < 100; i++) {
+ regval = sbus_readw(lp->lregs + RDP);
+
+ if (regval & (LE_C0_ERR | LE_C0_IDON))
+ break;
+ barrier();
+ }
+ if (i == 100 || (regval & LE_C0_ERR)) {
+ printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
+ i, regval);
+ if (lp->dregs)
+ printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
+ return -1;
+ }
+
+ /* Clear IDON by writing a "1", enable interrupts and start lance */
+ sbus_writew(LE_C0_IDON, lp->lregs + RDP);
+ sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ csr |= DMA_INT_ENAB;
+ sbus_writel(csr, lp->dregs + DMA_CSR);
+ }
+
+ return 0;
+}
+
+static void lance_rx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ struct lance_rx_desc *rd;
+ u8 bits;
+ int len, entry = lp->rx_new;
+ struct sk_buff *skb;
+
+ for (rd = &ib->brx_ring [entry];
+ !((bits = rd->rmd1_bits) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ len = (rd->mblength & 0xfff) - 4;
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ dev->stats.rx_bytes += len;
+
+ skb_reserve(skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ skb_copy_to_linear_data(skb,
+ (unsigned char *)&(ib->rx_buf [entry][0]),
+ len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ rd->mblength = 0;
+ rd->rmd1_bits = LE_R1_OWN;
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_dvma(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib = lp->init_block_mem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc *td = &ib->btx_ring [i];
+ u8 bits = td->tmd1_bits;
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = td->misc;
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ td->tmd1_bits = bits & ~(LE_T1_POK);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+out:
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
+}
+
+static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
+{
+ u16 *p16 = (u16 *) skb->data;
+ u32 *p32;
+ u8 *p8;
+ void __iomem *pbuf = piobuf;
+
+ /* We know here that both src and dest are on a 16bit boundary. */
+ *p16++ = sbus_readw(pbuf);
+ p32 = (u32 *) p16;
+ pbuf += 2;
+ len -= 2;
+
+ while (len >= 4) {
+ *p32++ = sbus_readl(pbuf);
+ pbuf += 4;
+ len -= 4;
+ }
+ p8 = (u8 *) p32;
+ if (len >= 2) {
+ p16 = (u16 *) p32;
+ *p16++ = sbus_readw(pbuf);
+ pbuf += 2;
+ len -= 2;
+ p8 = (u8 *) p16;
+ }
+ if (len >= 1)
+ *p8 = sbus_readb(pbuf);
+}
+
+static void lance_rx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ struct lance_rx_desc __iomem *rd;
+ unsigned char bits;
+ int len, entry;
+ struct sk_buff *skb;
+
+ entry = lp->rx_new;
+ for (rd = &ib->brx_ring [entry];
+ !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
+ rd = &ib->brx_ring [entry]) {
+
+ /* We got an incomplete frame? */
+ if ((bits & LE_R1_POK) != LE_R1_POK) {
+ dev->stats.rx_over_errors++;
+ dev->stats.rx_errors++;
+ } else if (bits & LE_R1_ERR) {
+ /* Count only the end frame as a rx error,
+ * not the beginning
+ */
+ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
+ if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
+ if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
+ if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
+ if (bits & LE_R1_EOP) dev->stats.rx_errors++;
+ } else {
+ len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
+ skb = netdev_alloc_skb(dev, len + 2);
+
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ lp->rx_new = RX_NEXT(entry);
+ return;
+ }
+
+ dev->stats.rx_bytes += len;
+
+ skb_reserve (skb, 2); /* 16 byte align */
+ skb_put(skb, len); /* make room */
+ lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ }
+
+ /* Return the packet to the pool */
+ sbus_writew(0, &rd->mblength);
+ sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
+ entry = RX_NEXT(entry);
+ }
+
+ lp->rx_new = entry;
+}
+
+static void lance_tx_pio(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ int i, j;
+
+ spin_lock(&lp->lock);
+
+ j = lp->tx_old;
+ for (i = j; i != lp->tx_new; i = j) {
+ struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
+ u8 bits = sbus_readb(&td->tmd1_bits);
+
+ /* If we hit a packet not owned by us, stop */
+ if (bits & LE_T1_OWN)
+ break;
+
+ if (bits & LE_T1_ERR) {
+ u16 status = sbus_readw(&td->misc);
+
+ dev->stats.tx_errors++;
+ if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
+ if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
+
+ if (status & LE_T3_CLOS) {
+ dev->stats.tx_carrier_errors++;
+ if (lp->auto_select) {
+ lp->tpe = 1 - lp->tpe;
+ printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
+ dev->name, lp->tpe?"TPE":"AUI");
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ }
+
+ /* Buffer errors and underflows turn off the
+ * transmitter, restart the adapter.
+ */
+ if (status & (LE_T3_BUF|LE_T3_UFL)) {
+ dev->stats.tx_fifo_errors++;
+
+ printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
+ dev->name);
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ goto out;
+ }
+ } else if ((bits & LE_T1_POK) == LE_T1_POK) {
+ /*
+ * So we don't count the packet more than once.
+ */
+ sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
+
+ /* One collision before packet was sent. */
+ if (bits & LE_T1_EONE)
+ dev->stats.collisions++;
+
+ /* More than one collision, be optimistic. */
+ if (bits & LE_T1_EMORE)
+ dev->stats.collisions += 2;
+
+ dev->stats.tx_packets++;
+ }
+
+ j = TX_NEXT(j);
+ }
+ lp->tx_old = j;
+
+ if (netif_queue_stopped(dev) &&
+ TX_BUFFS_AVAIL > 0)
+ netif_wake_queue(dev);
+out:
+ spin_unlock(&lp->lock);
+}
+
+static irqreturn_t lance_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct lance_private *lp = netdev_priv(dev);
+ int csr0;
+
+ sbus_writew(LE_CSR0, lp->lregs + RAP);
+ csr0 = sbus_readw(lp->lregs + RDP);
+
+ /* Acknowledge all the interrupt sources ASAP */
+ sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
+ lp->lregs + RDP);
+
+ if ((csr0 & LE_C0_ERR) != 0) {
+ /* Clear the error condition */
+ sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
+ LE_C0_CERR | LE_C0_MERR),
+ lp->lregs + RDP);
+ }
+
+ if (csr0 & LE_C0_RINT)
+ lp->rx(dev);
+
+ if (csr0 & LE_C0_TINT)
+ lp->tx(dev);
+
+ if (csr0 & LE_C0_BABL)
+ dev->stats.tx_errors++;
+
+ if (csr0 & LE_C0_MISS)
+ dev->stats.rx_errors++;
+
+ if (csr0 & LE_C0_MERR) {
+ if (lp->dregs) {
+ u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
+
+ printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
+ dev->name, csr0, addr & 0xffffff);
+ } else {
+ printk(KERN_ERR "%s: Memory error, status %04x\n",
+ dev->name, csr0);
+ }
+
+ sbus_writew(LE_C0_STOP, lp->lregs + RDP);
+
+ if (lp->dregs) {
+ u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
+
+ dma_csr |= DMA_FIFO_INV;
+ sbus_writel(dma_csr, lp->dregs + DMA_CSR);
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+ }
+
+ sbus_writew(LE_C0_INEA, lp->lregs + RDP);
+
+ return IRQ_HANDLED;
+}
+
+/* Build a fake network packet and send it to ourselves. */
+static void build_fake_packet(struct lance_private *lp)
+{
+ struct net_device *dev = lp->dev;
+ int i, entry;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]);
+ struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet;
+ for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
+ sbus_writew(0, &packet[i]);
+ for (i = 0; i < 6; i++) {
+ sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
+ sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
+ }
+ sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *packet = (u16 *) &(ib->tx_buf[entry][0]);
+ struct ethhdr *eth = (struct ethhdr *) packet;
+ memset(packet, 0, ETH_ZLEN);
+ for (i = 0; i < 6; i++) {
+ eth->h_dest[i] = dev->dev_addr[i];
+ eth->h_source[i] = dev->dev_addr[i];
+ }
+ ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
+ ib->btx_ring[entry].misc = 0;
+ ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
+ }
+ lp->tx_new = TX_NEXT(entry);
+}
+
+static int lance_open(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status = 0;
+
+ STOP_LANCE(lp);
+
+ if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED,
+ lancestr, (void *) dev)) {
+ printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+ }
+
+ /* On the 4m, setup the ledma to provide the upper bits for buffers */
+ if (lp->dregs) {
+ u32 regval = lp->init_block_dvma & 0xff000000;
+
+ sbus_writel(regval, lp->dregs + DMA_TEST);
+ }
+
+ /* Set mode and clear multicast filter only at device open,
+ * so that lance_init_ring() called at any error will not
+ * forget multicast filters.
+ *
+ * BTW it is common bug in all lance drivers! --ANK
+ */
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew(0, &ib->mode);
+ sbus_writel(0, &ib->filter[0]);
+ sbus_writel(0, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->mode = 0;
+ ib->filter [0] = 0;
+ ib->filter [1] = 0;
+ }
+
+ lp->init_ring(dev);
+ load_csrs(lp);
+
+ netif_start_queue(dev);
+
+ status = init_restart_lance(lp);
+ if (!status && lp->auto_select) {
+ build_fake_packet(lp);
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+ }
+
+ return status;
+}
+
+static int lance_close(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->multicast_timer);
+
+ STOP_LANCE(lp);
+
+ free_irq(dev->irq, (void *) dev);
+ return 0;
+}
+
+static int lance_reset(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int status;
+
+ STOP_LANCE(lp);
+
+ /* On the 4m, reset the dma too */
+ if (lp->dregs) {
+ u32 csr, addr;
+
+ printk(KERN_ERR "resetting ledma\n");
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+
+ addr = lp->init_block_dvma & 0xff000000;
+ sbus_writel(addr, lp->dregs + DMA_TEST);
+ }
+ lp->init_ring(dev);
+ load_csrs(lp);
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ status = init_restart_lance(lp);
+ return status;
+}
+
+static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len)
+{
+ void __iomem *piobuf = dest;
+ u32 *p32;
+ u16 *p16;
+ u8 *p8;
+
+ switch ((unsigned long)src & 0x3) {
+ case 0:
+ p32 = (u32 *) src;
+ while (len >= 4) {
+ sbus_writel(*p32, piobuf);
+ p32++;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p32;
+ break;
+ case 1:
+ case 3:
+ p8 = (u8 *) src;
+ while (len >= 4) {
+ u32 val;
+
+ val = p8[0] << 24;
+ val |= p8[1] << 16;
+ val |= p8[2] << 8;
+ val |= p8[3];
+ sbus_writel(val, piobuf);
+ p8 += 4;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p8;
+ break;
+ case 2:
+ p16 = (u16 *) src;
+ while (len >= 4) {
+ u32 val = p16[0]<<16 | p16[1];
+ sbus_writel(val, piobuf);
+ p16 += 2;
+ piobuf += 4;
+ len -= 4;
+ }
+ src = (char *) p16;
+ break;
+ }
+ if (len >= 2) {
+ u16 val = src[0] << 8 | src[1];
+ sbus_writew(val, piobuf);
+ src += 2;
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(src[0], piobuf);
+}
+
+static void lance_piozero(void __iomem *dest, int len)
+{
+ void __iomem *piobuf = dest;
+
+ if ((unsigned long)piobuf & 1) {
+ sbus_writeb(0, piobuf);
+ piobuf += 1;
+ len -= 1;
+ if (len == 0)
+ return;
+ }
+ if (len == 1) {
+ sbus_writeb(0, piobuf);
+ return;
+ }
+ if ((unsigned long)piobuf & 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ if (len == 0)
+ return;
+ }
+ while (len >= 4) {
+ sbus_writel(0, piobuf);
+ piobuf += 4;
+ len -= 4;
+ }
+ if (len >= 2) {
+ sbus_writew(0, piobuf);
+ piobuf += 2;
+ len -= 2;
+ }
+ if (len >= 1)
+ sbus_writeb(0, piobuf);
+}
+
+static void lance_tx_timeout(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+
+ printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
+ dev->name, sbus_readw(lp->lregs + RDP));
+ lance_reset(dev);
+ netif_wake_queue(dev);
+}
+
+static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ int entry, skblen, len;
+
+ skblen = skb->len;
+
+ len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
+
+ spin_lock_irq(&lp->lock);
+
+ dev->stats.tx_bytes += len;
+
+ entry = lp->tx_new & TX_RING_MOD_MASK;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
+ sbus_writew(0, &ib->btx_ring[entry].misc);
+ lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
+ if (len != skblen)
+ lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
+ sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->btx_ring [entry].length = (-len) | 0xf000;
+ ib->btx_ring [entry].misc = 0;
+ skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
+ if (len != skblen)
+ memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
+ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
+ }
+
+ lp->tx_new = TX_NEXT(entry);
+
+ if (TX_BUFFS_AVAIL <= 0)
+ netif_stop_queue(dev);
+
+ /* Kick the lance: transmit now */
+ sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
+
+ /* Read back CSR to invalidate the E-Cache.
+ * This is needed, because DMA_DSBL_WR_INV is set.
+ */
+ if (lp->dregs)
+ sbus_readw(lp->lregs + RDP);
+
+ spin_unlock_irq(&lp->lock);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/* taken from the depca driver */
+static void lance_load_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ u32 crc;
+ u32 val;
+
+ /* set all multicast bits */
+ if (dev->flags & IFF_ALLMULTI)
+ val = ~0;
+ else
+ val = 0;
+
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ sbus_writel(val, &ib->filter[0]);
+ sbus_writel(val, &ib->filter[1]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ ib->filter [0] = val;
+ ib->filter [1] = val;
+ }
+
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+
+ /* Add addresses */
+ netdev_for_each_mc_addr(ha, dev) {
+ crc = ether_crc_le(6, ha->addr);
+ crc = crc >> 26;
+ if (lp->pio_buffer) {
+ struct lance_init_block __iomem *ib = lp->init_block_iomem;
+ u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter;
+ u16 tmp = sbus_readw(&mcast_table[crc>>4]);
+ tmp |= 1 << (crc & 0xf);
+ sbus_writew(tmp, &mcast_table[crc>>4]);
+ } else {
+ struct lance_init_block *ib = lp->init_block_mem;
+ u16 *mcast_table = (u16 *) &ib->filter;
+ mcast_table [crc >> 4] |= 1 << (crc & 0xf);
+ }
+ }
+}
+
+static void lance_set_multicast(struct net_device *dev)
+{
+ struct lance_private *lp = netdev_priv(dev);
+ struct lance_init_block *ib_mem = lp->init_block_mem;
+ struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem;
+ u16 mode;
+
+ if (!netif_running(dev))
+ return;
+
+ if (lp->tx_old != lp->tx_new) {
+ mod_timer(&lp->multicast_timer, jiffies + 4);
+ netif_wake_queue(dev);
+ return;
+ }
+
+ netif_stop_queue(dev);
+
+ STOP_LANCE(lp);
+ lp->init_ring(dev);
+
+ if (lp->pio_buffer)
+ mode = sbus_readw(&ib_iomem->mode);
+ else
+ mode = ib_mem->mode;
+ if (dev->flags & IFF_PROMISC) {
+ mode |= LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ } else {
+ mode &= ~LE_MO_PROM;
+ if (lp->pio_buffer)
+ sbus_writew(mode, &ib_iomem->mode);
+ else
+ ib_mem->mode = mode;
+ lance_load_multicast(dev);
+ }
+ load_csrs(lp);
+ init_restart_lance(lp);
+ netif_wake_queue(dev);
+}
+
+static void lance_set_multicast_retry(unsigned long _opaque)
+{
+ struct net_device *dev = (struct net_device *) _opaque;
+
+ lance_set_multicast(dev);
+}
+
+static void lance_free_hwresources(struct lance_private *lp)
+{
+ if (lp->lregs)
+ of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE);
+ if (lp->dregs) {
+ struct platform_device *ledma = lp->ledma;
+
+ of_iounmap(&ledma->resource[0], lp->dregs,
+ resource_size(&ledma->resource[0]));
+ }
+ if (lp->init_block_iomem) {
+ of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem,
+ sizeof(struct lance_init_block));
+ } else if (lp->init_block_mem) {
+ dma_free_coherent(&lp->op->dev,
+ sizeof(struct lance_init_block),
+ lp->init_block_mem,
+ lp->init_block_dvma);
+ }
+}
+
+/* Ethtool support... */
+static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
+}
+
+static const struct ethtool_ops sparc_lance_ethtool_ops = {
+ .get_drvinfo = sparc_lance_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops sparc_lance_ops = {
+ .ndo_open = lance_open,
+ .ndo_stop = lance_close,
+ .ndo_start_xmit = lance_start_xmit,
+ .ndo_set_rx_mode = lance_set_multicast,
+ .ndo_tx_timeout = lance_tx_timeout,
+ .ndo_change_mtu = eth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int sparc_lance_probe_one(struct platform_device *op,
+ struct platform_device *ledma,
+ struct platform_device *lebuffer)
+{
+ struct device_node *dp = op->dev.of_node;
+ static unsigned version_printed;
+ struct lance_private *lp;
+ struct net_device *dev;
+ int i;
+
+ dev = alloc_etherdev(sizeof(struct lance_private) + 8);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+
+ if (sparc_lance_debug && version_printed++ == 0)
+ printk (KERN_INFO "%s", version);
+
+ spin_lock_init(&lp->lock);
+
+ /* Copy the IDPROM ethernet address to the device structure, later we
+ * will copy the address in the device structure to the lance
+ * initialization block.
+ */
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = idprom->id_ethaddr[i];
+
+ /* Get the IO region */
+ lp->lregs = of_ioremap(&op->resource[0], 0,
+ LANCE_REG_SIZE, lancestr);
+ if (!lp->lregs) {
+ printk(KERN_ERR "SunLance: Cannot map registers.\n");
+ goto fail;
+ }
+
+ lp->ledma = ledma;
+ if (lp->ledma) {
+ lp->dregs = of_ioremap(&ledma->resource[0], 0,
+ resource_size(&ledma->resource[0]),
+ "ledma");
+ if (!lp->dregs) {
+ printk(KERN_ERR "SunLance: Cannot map "
+ "ledma registers.\n");
+ goto fail;
+ }
+ }
+
+ lp->op = op;
+ lp->lebuffer = lebuffer;
+ if (lebuffer) {
+ /* sanity check */
+ if (lebuffer->resource[0].start & 7) {
+ printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n");
+ goto fail;
+ }
+ lp->init_block_iomem =
+ of_ioremap(&lebuffer->resource[0], 0,
+ sizeof(struct lance_init_block), "lebuffer");
+ if (!lp->init_block_iomem) {
+ printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n");
+ goto fail;
+ }
+ lp->init_block_dvma = 0;
+ lp->pio_buffer = 1;
+ lp->init_ring = lance_init_ring_pio;
+ lp->rx = lance_rx_pio;
+ lp->tx = lance_tx_pio;
+ } else {
+ lp->init_block_mem =
+ dma_alloc_coherent(&op->dev,
+ sizeof(struct lance_init_block),
+ &lp->init_block_dvma, GFP_ATOMIC);
+ if (!lp->init_block_mem)
+ goto fail;
+
+ lp->pio_buffer = 0;
+ lp->init_ring = lance_init_ring_dvma;
+ lp->rx = lance_rx_dvma;
+ lp->tx = lance_tx_dvma;
+ }
+ lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval",
+ (LE_C3_BSWP |
+ LE_C3_ACON |
+ LE_C3_BCON));
+
+ lp->name = lancestr;
+
+ lp->burst_sizes = 0;
+ if (lp->ledma) {
+ struct device_node *ledma_dp = ledma->dev.of_node;
+ struct device_node *sbus_dp;
+ unsigned int sbmask;
+ const char *prop;
+ u32 csr;
+
+ /* Find burst-size property for ledma */
+ lp->burst_sizes = of_getintprop_default(ledma_dp,
+ "burst-sizes", 0);
+
+ /* ledma may be capable of fast bursts, but sbus may not. */
+ sbus_dp = ledma_dp->parent;
+ sbmask = of_getintprop_default(sbus_dp, "burst-sizes",
+ DMA_BURSTBITS);
+ lp->burst_sizes &= sbmask;
+
+ /* Get the cable-selection property */
+ prop = of_get_property(ledma_dp, "cable-selection", NULL);
+ if (!prop || prop[0] == '\0') {
+ struct device_node *nd;
+
+ printk(KERN_INFO "SunLance: using "
+ "auto-carrier-detection.\n");
+
+ nd = of_find_node_by_path("/options");
+ if (!nd)
+ goto no_link_test;
+
+ prop = of_get_property(nd, "tpe-link-test?", NULL);
+ if (!prop)
+ goto no_link_test;
+
+ if (strcmp(prop, "true")) {
+ printk(KERN_NOTICE "SunLance: warning: overriding option "
+ "'tpe-link-test?'\n");
+ printk(KERN_NOTICE "SunLance: warning: mail any problems "
+ "to ecd@skynet.be\n");
+ auxio_set_lte(AUXIO_LTE_ON);
+ }
+no_link_test:
+ lp->auto_select = 1;
+ lp->tpe = 0;
+ } else if (!strcmp(prop, "aui")) {
+ lp->auto_select = 0;
+ lp->tpe = 0;
+ } else {
+ lp->auto_select = 0;
+ lp->tpe = 1;
+ }
+
+ /* Reset ledma */
+ csr = sbus_readl(lp->dregs + DMA_CSR);
+ sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
+ udelay(200);
+ sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
+ } else
+ lp->dregs = NULL;
+
+ lp->dev = dev;
+ SET_NETDEV_DEV(dev, &op->dev);
+ dev->watchdog_timeo = 5*HZ;
+ dev->ethtool_ops = &sparc_lance_ethtool_ops;
+ dev->netdev_ops = &sparc_lance_ops;
+
+ dev->irq = op->archdata.irqs[0];
+
+ /* We cannot sleep if the chip is busy during a
+ * multicast list update event, because such events
+ * can occur from interrupts (ex. IPv6). So we
+ * use a timer to try again later when necessary. -DaveM
+ */
+ init_timer(&lp->multicast_timer);
+ lp->multicast_timer.data = (unsigned long) dev;
+ lp->multicast_timer.function = lance_set_multicast_retry;
+
+ if (register_netdev(dev)) {
+ printk(KERN_ERR "SunLance: Cannot register device.\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(op, lp);
+
+ printk(KERN_INFO "%s: LANCE %pM\n",
+ dev->name, dev->dev_addr);
+
+ return 0;
+
+fail:
+ lance_free_hwresources(lp);
+ free_netdev(dev);
+ return -ENODEV;
+}
+
+static int sunlance_sbus_probe(struct platform_device *op)
+{
+ struct platform_device *parent = to_platform_device(op->dev.parent);
+ struct device_node *parent_dp = parent->dev.of_node;
+ int err;
+
+ if (!strcmp(parent_dp->name, "ledma")) {
+ err = sparc_lance_probe_one(op, parent, NULL);
+ } else if (!strcmp(parent_dp->name, "lebuffer")) {
+ err = sparc_lance_probe_one(op, NULL, parent);
+ } else
+ err = sparc_lance_probe_one(op, NULL, NULL);
+
+ return err;
+}
+
+static int sunlance_sbus_remove(struct platform_device *op)
+{
+ struct lance_private *lp = platform_get_drvdata(op);
+ struct net_device *net_dev = lp->dev;
+
+ unregister_netdev(net_dev);
+
+ lance_free_hwresources(lp);
+
+ free_netdev(net_dev);
+
+ return 0;
+}
+
+static const struct of_device_id sunlance_sbus_match[] = {
+ {
+ .name = "le",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
+
+static struct platform_driver sunlance_sbus_driver = {
+ .driver = {
+ .name = "sunlance",
+ .of_match_table = sunlance_sbus_match,
+ },
+ .probe = sunlance_sbus_probe,
+ .remove = sunlance_sbus_remove,
+};
+
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 000000000..171a7e680
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
+
+amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+ xgbe-ptp.o
+
+amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
+amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 000000000..34c28aac7
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1145 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_COMMON_H__
+#define __XGBE_COMMON_H__
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_TDC_INDEX 24
+#define DMA_AXIAWCR_TDC_WIDTH 4
+#define DMA_AXIAWCR_TDD_INDEX 28
+#define DMA_AXIAWCR_TDD_WIDTH 2
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 15
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 16
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x4c
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS MMD select register offset
+ * The MMD select register is used for accessing PCS registers
+ * when the underlying APB3 interface is using indirect addressing.
+ * Indirect addressing requires accessing registers in two phases,
+ * an address phase and a data phase. The address phases requires
+ * writing an address selection value to the MMD select regiesters.
+ */
+#define PCS_MMD_SELECT 0xff
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
+ (_var) |= cpu_to_le32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define XGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XGMAC_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ ioread32((_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define XGMAC_DMA_IOREAD(_channel, _reg) \
+ ioread32((_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ iowrite32((_val), (_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ mmd_val &= ~_mask; \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
+} while (0)
+
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
new file mode 100644
index 000000000..8a50b01c2
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
@@ -0,0 +1,269 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_dcb_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported traffic classes */
+ ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->ets) {
+ ets->cbs = pdata->ets->cbs;
+ memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, pdata->ets->prio_tc,
+ sizeof(ets->prio_tc));
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i, tc_ets, tc_ets_weight;
+
+ tc_ets = 0;
+ tc_ets_weight = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ DBGPR(" TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+ ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
+ DBGPR(" PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
+ (i >= pdata->hw_feat.tc_cnt))
+ return -EINVAL;
+
+ if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
+ return -EINVAL;
+
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ tc_ets = 1;
+ tc_ets_weight += ets->tc_tx_bw[i];
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Weights must add up to 100% */
+ if (tc_ets && (tc_ets_weight != 100))
+ return -EINVAL;
+
+ if (!pdata->ets) {
+ pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
+ GFP_KERNEL);
+ if (!pdata->ets)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+ pdata->hw_if.config_dcb_tc(pdata);
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ /* Set number of supported PFC traffic classes */
+ pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+ if (pdata->pfc) {
+ pfc->pfc_en = pdata->pfc->pfc_en;
+ pfc->mbc = pdata->pfc->mbc;
+ pfc->delay = pdata->pfc->delay;
+ }
+
+ return 0;
+}
+
+static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR(" cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
+ pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+ if (!pdata->pfc) {
+ pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
+ GFP_KERNEL);
+ if (!pdata->pfc)
+ return -ENOMEM;
+ }
+
+ memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+ pdata->hw_if.config_dcb_pfc(pdata);
+
+ return 0;
+}
+
+static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
+{
+ u8 support = xgbe_dcb_getdcbx(netdev);
+
+ DBGPR(" DCBX=%#hhx\n", dcbx);
+
+ if (dcbx & ~support)
+ return 1;
+
+ if ((dcbx & support) != support)
+ return 1;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
+ /* IEEE 802.1Qaz std */
+ .ieee_getets = xgbe_dcb_ieee_getets,
+ .ieee_setets = xgbe_dcb_ieee_setets,
+ .ieee_getpfc = xgbe_dcb_ieee_getpfc,
+ .ieee_setpfc = xgbe_dcb_ieee_setpfc,
+
+ /* DCBX configuration */
+ .getdcbx = xgbe_dcb_getdcbx,
+ .setdcbx = xgbe_dcb_setdcbx,
+};
+
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void)
+{
+ return &xgbe_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 000000000..2c063b60d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,373 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static ssize_t xgbe_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char workarea[32];
+ ssize_t len;
+ int ret;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ workarea[len] = '\0';
+ ret = kstrtouint(workarea, 16, value);
+ if (ret)
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xgmac_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xgmac_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_addr_read,
+ .write = xgmac_reg_addr_write,
+};
+
+static const struct file_operations xgmac_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_value_read,
+ .write = xgmac_reg_value_write,
+};
+
+static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xpcs_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
+ value);
+
+ return len;
+}
+
+static const struct file_operations xpcs_mmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_mmd_read,
+ .write = xpcs_mmd_write,
+};
+
+static const struct file_operations xpcs_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_addr_read,
+ .write = xpcs_reg_addr_write,
+};
+
+static const struct file_operations xpcs_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_value_read,
+ .write = xpcs_reg_value_write,
+};
+
+void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+{
+ struct dentry *pfile;
+ char *buf;
+
+ /* Set defaults */
+ pdata->debugfs_xgmac_reg = 0;
+ pdata->debugfs_xpcs_mmd = 1;
+ pdata->debugfs_xpcs_reg = 0;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+ if (!pdata->xgbe_debugfs) {
+ netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("xgmac_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xgmac_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_mmd", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ kfree(buf);
+}
+
+void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
+{
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 000000000..5c92fb71b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,636 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
+
+static void xgbe_free_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_ring_data *rdata;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->rdata) {
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+ xgbe_unmap_rdata(pdata, rdata);
+ }
+
+ kfree(ring->rdata);
+ ring->rdata = NULL;
+ }
+
+ if (ring->rx_hdr_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
+ ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_hdr_pa.pages);
+
+ ring->rx_hdr_pa.pages = NULL;
+ ring->rx_hdr_pa.pages_len = 0;
+ ring->rx_hdr_pa.pages_offset = 0;
+ ring->rx_hdr_pa.pages_dma = 0;
+ }
+
+ if (ring->rx_buf_pa.pages) {
+ dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
+ ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
+ put_page(ring->rx_buf_pa.pages);
+
+ ring->rx_buf_pa.pages = NULL;
+ ring->rx_buf_pa.pages_len = 0;
+ ring->rx_buf_pa.pages_offset = 0;
+ ring->rx_buf_pa.pages_dma = 0;
+ }
+
+ if (ring->rdesc) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ ring->rdesc_count),
+ ring->rdesc, ring->rdesc_dma);
+ ring->rdesc = NULL;
+ }
+}
+
+static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+
+ DBGPR("<--xgbe_free_ring_resources\n");
+}
+
+static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+{
+ DBGPR("-->xgbe_init_ring\n");
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->rdesc_count = rdesc_count;
+ ring->rdesc = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ rdesc_count), &ring->rdesc_dma,
+ GFP_KERNEL);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+ ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
+ GFP_KERNEL);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
+ ring->rdesc, ring->rdesc_dma, ring->rdata);
+
+ DBGPR("<--xgbe_init_ring\n");
+
+ return 0;
+}
+
+static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+ int ret;
+
+ DBGPR("-->xgbe_alloc_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ DBGPR(" %s - tx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+
+ DBGPR(" %s - rx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+ }
+
+ DBGPR("<--xgbe_alloc_ring_resources\n");
+
+ return 0;
+
+err_ring:
+ xgbe_free_ring_resources(pdata);
+
+ return ret;
+}
+
+static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct xgbe_page_alloc *pa, gfp_t gfp, int order)
+{
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
+ if (!pages)
+ return -ENOMEM;
+
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(pdata->dev, pages_dma);
+ if (ret) {
+ put_page(pages);
+ return ret;
+ }
+
+ pa->pages = pages;
+ pa->pages_len = PAGE_SIZE << order;
+ pa->pages_offset = 0;
+ pa->pages_dma = pages_dma;
+
+ return 0;
+}
+
+static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
+ struct xgbe_page_alloc *pa,
+ unsigned int len)
+{
+ get_page(pa->pages);
+ bd->pa = *pa;
+
+ bd->dma = pa->pages_dma + pa->pages_offset;
+ bd->dma_len = len;
+
+ pa->pages_offset += len;
+ if ((pa->pages_offset + len) > pa->pages_len) {
+ /* This data descriptor is responsible for unmapping page(s) */
+ bd->pa_unmap = *pa;
+
+ /* Get a new allocation next time */
+ pa->pages = NULL;
+ pa->pages_len = 0;
+ pa->pages_offset = 0;
+ pa->pages_dma = 0;
+ }
+}
+
+static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+{
+ int order, ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+ order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+ order);
+ if (ret)
+ return ret;
+ }
+
+ /* Set up the header page info */
+ xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
+ XGBE_SKB_ALLOC_SIZE);
+
+ /* Set up the buffer page info */
+ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
+ pdata->rx_buf_size);
+
+ return 0;
+}
+
+static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ memset(&ring->tx, 0, sizeof(ring->tx));
+
+ hw_if->tx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
+}
+
+static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ if (xgbe_map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+
+ hw_if->rx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
+}
+
+static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
+{
+ if (rdata->skb_dma) {
+ if (rdata->mapped_as_page) {
+ dma_unmap_page(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ }
+ rdata->skb_dma = 0;
+ rdata->skb_dma_len = 0;
+ }
+
+ if (rdata->skb) {
+ dev_kfree_skb_any(rdata->skb);
+ rdata->skb = NULL;
+ }
+
+ if (rdata->rx.hdr.pa.pages)
+ put_page(rdata->rx.hdr.pa.pages);
+
+ if (rdata->rx.hdr.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
+ rdata->rx.hdr.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.hdr.pa_unmap.pages);
+ }
+
+ if (rdata->rx.buf.pa.pages)
+ put_page(rdata->rx.buf.pa.pages);
+
+ if (rdata->rx.buf.pa_unmap.pages) {
+ dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
+ rdata->rx.buf.pa_unmap.pages_len,
+ DMA_FROM_DEVICE);
+ put_page(rdata->rx.buf.pa_unmap.pages);
+ }
+
+ memset(&rdata->tx, 0, sizeof(rdata->tx));
+ memset(&rdata->rx, 0, sizeof(rdata->rx));
+
+ rdata->mapped_as_page = 0;
+
+ if (rdata->state_saved) {
+ rdata->state_saved = 0;
+ rdata->state.incomplete = 0;
+ rdata->state.context_next = 0;
+ rdata->state.skb = NULL;
+ rdata->state.len = 0;
+ rdata->state.error = 0;
+ }
+}
+
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct skb_frag_struct *frag;
+ dma_addr_t skb_dma;
+ unsigned int start_index, cur_index;
+ unsigned int offset, tso, vlan, datalen, len;
+ unsigned int i;
+
+ DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ packet = &ring->packet_data;
+ packet->rdesc_count = 0;
+ packet->length = 0;
+
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (packet->mss != ring->tx.cur_mss)) ||
+ (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ DBGPR(" TSO packet\n");
+
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ packet->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = packet->header_len;
+
+ offset = packet->header_len;
+
+ packet->length += packet->header_len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ DBGPR(" mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen,
+ XGBE_TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ rdata->mapped_as_page = 1;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry. We always have some data
+ * that has been mapped so rdata is always advanced past the last
+ * piece of mapped data - use the entry pointed to by cur_index - 1.
+ */
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
+ rdata->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ packet->rdesc_count = cur_index - start_index;
+
+ DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
+
+ return packet->rdesc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ rdata = XGBE_GET_DESC_DATA(ring, start_index++);
+ xgbe_unmap_rdata(pdata, rdata);
+ }
+
+ DBGPR("<--xgbe_map_tx_skb: count=0\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs_desc\n");
+
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+ desc_if->map_rx_buffer = xgbe_map_rx_buffer;
+ desc_if->unmap_rdata = xgbe_unmap_rdata;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+
+ DBGPR("<--xgbe_init_function_ptrs_desc\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 000000000..21d949751
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2943 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/phy.h>
+#include <linux/mdio.h>
+#include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ DBGPR("<--xgbe_usec_to_riwt\n");
+
+ return ret;
+}
+
+static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+ rate = pdata->sysclk_rate;
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ DBGPR("<--xgbe_riwt_to_usec\n");
+
+ return ret;
+}
+
+static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+
+ return 0;
+}
+
+static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
+}
+
+static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
+}
+
+static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ return 0;
+}
+
+static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+}
+
+static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ }
+}
+
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
+static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+
+ /* Set MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->tx_pause || (pfc && pfc->pfc_en))
+ xgbe_enable_tx_flow_control(pdata);
+ else
+ xgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ if (pdata->rx_pause || (pfc && pfc->pfc_en))
+ xgbe_enable_rx_flow_control(pdata);
+ else
+ xgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+
+ xgbe_config_tx_flow_control(pdata);
+ xgbe_config_rx_flow_control(pdata);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
+ (pfc && pfc->pfc_en) ? 1 : 0);
+}
+
+static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_ier = 0;
+
+ /* Enable Timestamp interrupt */
+ XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
+
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
+}
+
+static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
+ return 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
+ return 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+
+ return 0;
+}
+
+static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+{
+ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
+ return 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+
+ return 0;
+}
+
+static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
+ return 0;
+
+ DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+
+ return 0;
+}
+
+static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
+ return 0;
+
+ DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
+
+ return 0;
+}
+
+static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
+ struct netdev_hw_addr *ha, unsigned int *mac_reg)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+
+ if (ha) {
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding mac address %pM at 0x%04x\n", ha->addr,
+ *mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+ }
+
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
+ *mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
+ *mac_reg += MAC_MACA_INC;
+}
+
+static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int addn_macs;
+
+ mac_reg = MAC_MACA1HR;
+ addn_macs = pdata->hw_feat.addn_mac;
+
+ if (netdev_uc_count(netdev) > addn_macs) {
+ xgbe_set_promiscuous_mode(pdata, 1);
+ } else {
+ netdev_for_each_uc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+
+ if (netdev_mc_count(netdev) > addn_macs) {
+ xgbe_set_all_multicast_mode(pdata, 1);
+ } else {
+ netdev_for_each_mc_addr(ha, netdev) {
+ xgbe_set_mac_reg(pdata, ha, &mac_reg);
+ addn_macs--;
+ }
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ while (addn_macs--)
+ xgbe_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_hw_addr *ha;
+ unsigned int hash_reg;
+ unsigned int hash_table_shift, hash_table_count;
+ u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
+ u32 crc;
+ unsigned int i;
+
+ hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+ hash_table_count = pdata->hw_feat.hash_table_size / 32;
+ memset(hash_table, 0, sizeof(hash_table));
+
+ /* Build the MAC Hash Table register values */
+ netdev_for_each_uc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+ crc >>= hash_table_shift;
+ hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+ }
+
+ /* Set the MAC Hash Table registers */
+ hash_reg = MAC_HTR0;
+ for (i = 0; i < hash_table_count; i++) {
+ XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
+ hash_reg += MAC_HTR_INC;
+ }
+}
+
+static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+{
+ if (pdata->hw_feat.hash_table_size)
+ xgbe_set_mac_hash_table(pdata);
+ else
+ xgbe_set_mac_addn_addrs(pdata);
+
+ return 0;
+}
+
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xgbe_set_promiscuous_mode(pdata, pr_mode);
+ xgbe_set_all_multicast_mode(pdata, am_mode);
+
+ xgbe_add_mac_addresses(pdata);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* If the PCS is changing modes, match the MAC speed to it */
+ if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
+ ((mmd_address & 0xffff) == MDIO_CTRL2)) {
+ struct phy_device *phydev = pdata->phydev;
+
+ if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
+ /* KX mode */
+ if (phydev->supported & SUPPORTED_1000baseKX_Full)
+ xgbe_set_gmii_speed(pdata);
+ else
+ xgbe_set_gmii_2500_speed(pdata);
+ } else {
+ /* KR mode */
+ xgbe_set_xgmii_speed(pdata);
+ }
+ }
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+}
+
+static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Enable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+ /* Enable VLAN Hash Table filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+ /* Disable VLAN tag inverse matching */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+ /* Only filter on the lower 12-bits of the VLAN tag */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+ /* In order for the VLAN Hash Table filtering to be effective,
+ * the VLAN tag identifier in the VLAN Tag Register must not
+ * be zero. Set the VLAN tag identifier to "1" to enable the
+ * VLAN Hash Table filtering. This implies that a VLAN tag of
+ * 1 will always pass filtering.
+ */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+ /* Disable VLAN filtering */
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+ return 0;
+}
+
+#ifndef CRCPOLY_LE
+#define CRCPOLY_LE 0xedb88320
+#endif
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+ u32 poly = CRCPOLY_LE;
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= poly;
+ }
+
+ return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+ u32 crc;
+ u16 vid;
+ __le16 vid_le;
+ u16 vlan_hash_table = 0;
+
+ /* Generate the VLAN Hash Table value */
+ for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+ /* Get the CRC32 value of the VLAN ID */
+ vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+ vlan_hash_table |= (1 << crc);
+ }
+
+ /* Set the VLAN Hash Table filtering register */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+ return 0;
+}
+
+static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ int i;
+ int start_index = ring->cur;
+
+ DBGPR("-->tx_desc_init\n");
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
+ }
+
+ /* Update the total number of Tx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--tx_desc_init\n");
+}
+
+static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata, unsigned int index)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int inte;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ dma_wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+}
+
+static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int start_index = ring->cur;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, i);
+
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(pdata, rdata, i);
+ }
+
+ /* Update the total number of Rx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--rx_desc_init\n");
+}
+
+static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+{
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+ while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
+}
+
+static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+{
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+ while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
+}
+
+static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+ u64 nsec;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+ return nsec;
+}
+
+static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+ unsigned int tx_snr;
+ u64 nsec;
+
+ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+ nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+ return nsec;
+}
+
+static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+ struct xgbe_ring_desc *rdesc)
+{
+ u64 nsec;
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+ !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+ nsec = le32_to_cpu(rdesc->desc1);
+ nsec <<= 32;
+ nsec |= le32_to_cpu(rdesc->desc0);
+ if (nsec != 0xffffffffffffffffULL) {
+ packet->rx_tstamp = nsec;
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RX_TSTAMP, 1);
+ }
+ }
+}
+
+static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+ unsigned int mac_tscr)
+{
+ /* Set one nano-second accuracy */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+ /* Set fine timestamp update */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+ /* Overwrite earlier timestamps */
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+ XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
+
+ /* Exit if timestamping is not enabled */
+ if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+ return 0;
+
+ /* Initialize time registers */
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+ XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+ xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+ xgbe_set_tstamp_time(pdata, 0, 0);
+
+ /* Initialize the timecounter */
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int total_weight, min_weight, weight;
+ unsigned int i;
+
+ if (!ets)
+ return;
+
+ /* Set Tx to deficit weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+ /* Set Traffic Class algorithms */
+ total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+ min_weight = total_weight / 100;
+ if (!min_weight)
+ min_weight = 1;
+
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ DBGPR(" TC%u using SP\n", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_SP);
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ weight = total_weight * ets->tc_tx_bw[i] / 100;
+ weight = clamp(weight, min_weight, total_weight);
+
+ DBGPR(" TC%u using DWRR (weight %u)\n", i, weight);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+ weight);
+ break;
+ }
+ }
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+ struct ieee_pfc *pfc = pdata->pfc;
+ struct ieee_ets *ets = pdata->ets;
+ unsigned int mask, reg, reg_val;
+ unsigned int tc, prio;
+
+ if (!pfc || !ets)
+ return;
+
+ for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
+ mask = 0;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if ((pfc->pfc_en & (1 << prio)) &&
+ (ets->prio_tc[prio] == tc))
+ mask |= (1 << prio);
+ }
+ mask &= 0xff;
+
+ DBGPR(" TC%u PFC mask=%#x\n", tc, mask);
+ reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+ reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ }
+
+ xgbe_config_flow_control(pdata);
+}
+
+static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int csum, tso, vlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_set_ic;
+ int start_index = ring->cur;
+ int cur_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_dev_xmit\n");
+
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += packet->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (packet->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ packet->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ DBGPR(" TSO context descriptor, mss=%u\n",
+ packet->mss);
+
+ /* Set the MSS size */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, packet->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Indicate this descriptor contains the MSS */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ TCMSSV, 1);
+
+ ring->tx.cur_mss = packet->mss;
+ }
+
+ if (vlan_context) {
+ DBGPR(" VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Set the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, packet->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Timestamp enablement check */
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (cur_index != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ packet->tcp_payload_len);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
+ }
+
+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ rdata->tx.packets = packet->tx_packets;
+ rdata->tx.bytes = packet->tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ dma_wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, start_index);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
+#endif
+
+ /* Make sure ownership is written to the descriptor */
+ dma_wmb();
+
+ ring->cur = cur_index + 1;
+ if (!packet->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xgbe_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
+
+ DBGPR(" %s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->rdesc_count - 1),
+ (ring->cur - 1) & (ring->rdesc_count - 1));
+
+ DBGPR("<--xgbe_dev_xmit\n");
+}
+
+static int xgbe_dev_read(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ struct net_device *netdev = channel->pdata->netdev;
+ unsigned int err, etlt, l34t;
+
+ DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Check for data availability */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ return 1;
+
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ dma_rmb();
+
+#ifdef XGMAC_ENABLE_RX_DESC_DUMP
+ xgbe_dump_rx_desc(ring, rdesc, ring->cur);
+#endif
+
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+ /* Timestamp Context Descriptor */
+ xgbe_get_rx_tstamp(packet, rdesc);
+
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT, 1);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 0);
+ return 0;
+ }
+
+ /* Normal Descriptor, be sure Context Descriptor bit is off */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
+
+ /* Indicate if a Context Descriptor is next */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT, 1);
+
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
+ /* Not all the data has been transferred for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+ etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
+ if ((etlt == 0x09) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0,
+ OVT);
+ DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ else
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+ }
+
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+}
+
+static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+}
+
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ dma_ch_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
+ return 0;
+}
+
+static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
+ dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
+ break;
+ default:
+ return -1;
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
+ return 0;
+}
+
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ unsigned int count = 2000;
+
+ DBGPR("-->xgbe_exit\n");
+
+ /* Issue a software reset */
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ DBGPR("<--xgbe_exit\n");
+
+ return 0;
+}
+
+static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, count;
+
+ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+{
+ /* Set enhanced addressing mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
+}
+
+static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ unsigned int arcache, awcache;
+
+ arcache = 0;
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+}
+
+static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+ unsigned int queue_count)
+{
+ unsigned int q_fifo_size = 0;
+ enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ /* Calculate Tx/Rx fifo share per queue */
+ switch (fifo_size) {
+ case 0:
+ q_fifo_size = XGBE_FIFO_SIZE_B(128);
+ break;
+ case 1:
+ q_fifo_size = XGBE_FIFO_SIZE_B(256);
+ break;
+ case 2:
+ q_fifo_size = XGBE_FIFO_SIZE_B(512);
+ break;
+ case 3:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(1);
+ break;
+ case 4:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(2);
+ break;
+ case 5:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(4);
+ break;
+ case 6:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(8);
+ break;
+ case 7:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(16);
+ break;
+ case 8:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(32);
+ break;
+ case 9:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(64);
+ break;
+ case 10:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(128);
+ break;
+ case 11:
+ q_fifo_size = XGBE_FIFO_SIZE_KB(256);
+ break;
+ }
+
+ /* The configured value is not the actual amount of fifo RAM */
+ q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
+
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Set the queue fifo size programmable value */
+ if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_512;
+ else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ return p_fifo;
+}
+
+static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
+ pdata->tx_q_count);
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+
+ netdev_notice(pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+ pdata->tx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
+ pdata->rx_q_count);
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+
+ netdev_notice(pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
+ pdata->rx_q_count, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int prio_queues;
+ unsigned int ppq, ppq_extra, prio;
+ unsigned int mask;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++) {
+ DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+
+ if (i < qptc_extra) {
+ DBGPR(" TXq%u mapped to TC%u\n", queue, i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ pdata->q2tc_map[queue++] = i;
+ }
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+ prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+ pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+ reg = MAC_RQC2R;
+ reg_val = 0;
+ for (i = 0, prio = 0; i < prio_queues;) {
+ mask = 0;
+ for (j = 0; j < ppq; j++) {
+ DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ if (i < ppq_extra) {
+ DBGPR(" PRIO%u mapped to RXq%u\n", prio, i);
+ mask |= (1 << prio);
+ pdata->prio2q_map[prio++] = i;
+ }
+
+ reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+ if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+ reg += MAC_RQC2_INC;
+ reg_val = 0;
+ }
+
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ /* Activate flow control when less than 4k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
+
+ /* De-activate flow control when more than 6k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
+ }
+}
+
+static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+ /* Filtering is done using perfect filtering and hash filtering */
+ if (pdata->hw_feat.hash_table_size) {
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
+ }
+}
+
+static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+{
+ switch (pdata->phy_speed) {
+ case SPEED_10000:
+ xgbe_set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ xgbe_set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ xgbe_set_gmii_speed(pdata);
+ break;
+ }
+}
+
+static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xgbe_enable_rx_csum(pdata);
+ else
+ xgbe_disable_rx_csum(pdata);
+}
+
+static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+{
+ /* Indicate that VLAN Tx CTAGs come from context descriptors */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+ /* Set the current VLAN Hash Table register value */
+ xgbe_update_vlan_hash_table(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ xgbe_enable_rx_vlan_filtering(pdata);
+ else
+ xgbe_disable_rx_vlan_filtering(pdata);
+
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xgbe_enable_rx_vlan_stripping(pdata);
+ else
+ xgbe_disable_rx_vlan_stripping(pdata);
+}
+
+static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+{
+ bool read_hi;
+ u64 val;
+
+ switch (reg_lo) {
+ /* These registers are always 64 bit */
+ case MMC_TXOCTETCOUNT_GB_LO:
+ case MMC_TXOCTETCOUNT_G_LO:
+ case MMC_RXOCTETCOUNT_GB_LO:
+ case MMC_RXOCTETCOUNT_G_LO:
+ read_hi = true;
+ break;
+
+ default:
+ read_hi = false;
+ };
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+
+ if (read_hi)
+ val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
+
+ return val;
+}
+
+static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
+ stats->txoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
+ stats->txframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
+ stats->txmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
+ stats->tx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
+ stats->tx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
+ stats->tx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
+ stats->tx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
+ stats->tx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
+ stats->tx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
+ stats->txunicastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
+ stats->txmulticastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
+ stats->txunderflowerror +=
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
+ stats->txoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
+ stats->txframecount_g +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
+ stats->txpauseframes +=
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
+ stats->txvlanframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
+ stats->rxframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
+ stats->rxoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
+ stats->rxoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
+ stats->rxbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
+ stats->rxmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
+ stats->rxcrcerror +=
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
+ stats->rxrunterror +=
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
+ stats->rxjabbererror +=
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
+ stats->rxundersize_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
+ stats->rxoversize_g +=
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
+ stats->rx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
+ stats->rx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
+ stats->rx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
+ stats->rx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
+ stats->rx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
+ stats->rx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
+ stats->rxunicastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
+ stats->rxlengtherror +=
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
+ stats->rxoutofrangetype +=
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
+ stats->rxpauseframes +=
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
+ stats->rxfifooverflow +=
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
+ stats->rxvlanframes_gb +=
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
+ stats->rxwatchdogerror +=
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ /* Freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
+
+ stats->txoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
+}
+
+static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+{
+ /* Set counters to reset on read */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
+
+ /* Reset the counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+}
+
+static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
+static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->tx_q_count; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Disable each Rx queue */
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+}
+
+static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static int xgbe_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_init\n");
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialize DMA related features
+ */
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+ xgbe_config_pblx8(pdata);
+ xgbe_config_tx_pbl_val(pdata);
+ xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+
+ /*
+ * Initialize MTL related features
+ */
+ xgbe_config_mtl_mode(pdata);
+ xgbe_config_queue_mapping(pdata);
+ xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control_threshold(pdata);
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_config_dcb_tc(pdata);
+ xgbe_config_dcb_pfc(pdata);
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /*
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
+ xgbe_config_rx_mode(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
+ xgbe_config_mac_speed(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs\n");
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+ hw_if->set_mac_address = xgbe_set_mac_address;
+ hw_if->config_rx_mode = xgbe_config_rx_mode;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+
+ hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+ hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+ hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+ hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
+
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+ hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+ hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+ hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+ hw_if->enable_rx = xgbe_enable_rx;
+ hw_if->disable_rx = xgbe_disable_rx;
+
+ hw_if->powerup_tx = xgbe_powerup_tx;
+ hw_if->powerdown_tx = xgbe_powerdown_tx;
+ hw_if->powerup_rx = xgbe_powerup_rx;
+ hw_if->powerdown_rx = xgbe_powerdown_rx;
+
+ hw_if->dev_xmit = xgbe_dev_xmit;
+ hw_if->dev_read = xgbe_dev_read;
+ hw_if->enable_int = xgbe_enable_int;
+ hw_if->disable_int = xgbe_disable_int;
+ hw_if->init = xgbe_init;
+ hw_if->exit = xgbe_exit;
+
+ /* Descriptor related Sequences have to be initialized here */
+ hw_if->tx_desc_init = xgbe_tx_desc_init;
+ hw_if->rx_desc_init = xgbe_rx_desc_init;
+ hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+ hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+ hw_if->is_last_desc = xgbe_is_last_desc;
+ hw_if->is_context_desc = xgbe_is_context_desc;
+ hw_if->tx_start_xmit = xgbe_tx_start_xmit;
+
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+
+ /* For RX coalescing */
+ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+ hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+ hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+ hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+ hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+ hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+ hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+ hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+ hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+ hw_if->config_pblx8 = xgbe_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+ hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+
+ /* For PTP config */
+ hw_if->config_tstamp = xgbe_config_tstamp;
+ hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+ hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+ hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+ hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+
+ /* For Data Center Bridging config */
+ hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+ hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
+ DBGPR("<--xgbe_init_function_ptrs\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 000000000..9fd6c69a8
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,2227 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/busy_poll.h>
+#include <linux/clk.h>
+#include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_one_poll(struct napi_struct *, int);
+static int xgbe_all_poll(struct napi_struct *, int);
+
+static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+ int ret = -ENOMEM;
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ goto err_channel;
+
+ tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!tx_ring)
+ goto err_tx_ring;
+
+ rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+ GFP_KERNEL);
+ if (!rx_ring)
+ goto err_rx_ring;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (pdata->per_channel_irq) {
+ /* Get the DMA interrupt (offset 1) */
+ ret = platform_get_irq(pdata->pdev, i + 1);
+ if (ret < 0) {
+ netdev_err(pdata->netdev,
+ "platform_get_irq %u failed\n",
+ i + 1);
+ goto err_irq;
+ }
+
+ channel->dma_irq = ret;
+ }
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&rx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s: queue=%u, dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->dma_irq, channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel = channel_mem;
+ pdata->channel_count = count;
+
+ return 0;
+
+err_irq:
+ kfree(rx_ring);
+
+err_rx_ring:
+ kfree(tx_ring);
+
+err_tx_ring:
+ kfree(channel_mem);
+
+err_channel:
+ return ret;
+}
+
+static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->channel)
+ return;
+
+ kfree(pdata->channel->rx_ring);
+ kfree(pdata->channel->tx_ring);
+ kfree(pdata->channel);
+
+ pdata->channel = NULL;
+ pdata->channel_count = 0;
+}
+
+static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+{
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+}
+
+static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring *ring)
+{
+ return (ring->cur - ring->dirty);
+}
+
+static int xgbe_maybe_stop_tx_queue(struct xgbe_channel *channel,
+ struct xgbe_ring *ring, unsigned int count)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+
+ if (count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(pdata->netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+
+ /* If we haven't notified the hardware because of xmit_more
+ * support, tell it now
+ */
+ if (ring->tx.xmit_more)
+ pdata->hw_if.tx_start_xmit(channel, ring);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return 0;
+}
+
+static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+ rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+ ~(XGBE_RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ enum xgbe_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_if->enable_int(channel, int_id);
+ }
+}
+
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ enum xgbe_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring && channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+ else if (channel->tx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_TI;
+ else if (channel->rx_ring)
+ int_id = XGMAC_INT_DMA_CH_SR_RI;
+ else
+ continue;
+
+ hw_if->disable_int(channel, int_id);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+ unsigned int mac_isr, mac_tssr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ if (!dma_isr)
+ goto isr_done;
+
+ DBGPR(" DMA_ISR = %08x\n", dma_isr);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel + i;
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+
+ /* The TI or RI interrupt bits may still be set even if using
+ * per channel DMA interrupts. Check to be sure those are not
+ * enabled before using the private data napi structure.
+ */
+ if (!pdata->per_channel_irq &&
+ (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+ }
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear all interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+ mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
+ if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+ /* Read Tx Timestamp to clear interrupt */
+ pdata->tx_tstamp =
+ hw_if->get_tx_tstamp(pdata);
+ schedule_work(&pdata->tx_tstamp_work);
+ }
+ }
+ }
+
+ DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
+
+isr_done:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgbe_dma_isr(int irq, void *data)
+{
+ struct xgbe_channel *channel = data;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule(&channel->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void xgbe_tx_timer(unsigned long data)
+{
+ struct xgbe_channel *channel = (struct xgbe_channel *)data;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+ disable_irq_nosync(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(napi);
+ }
+
+ channel->tx_timer_active = 0;
+
+ DBGPR("<--xgbe_tx_timer\n");
+}
+
+static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_init_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s adding tx timer\n", channel->name);
+ setup_timer(&channel->tx_timer, xgbe_tx_timer,
+ (unsigned long)channel);
+ }
+
+ DBGPR("<--xgbe_init_tx_timers\n");
+}
+
+static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s deleting tx timer\n", channel->name);
+ del_timer_sync(&channel->tx_timer);
+ }
+
+ DBGPR("<--xgbe_stop_tx_timers\n");
+}
+
+void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ DBGPR("-->xgbe_get_all_hw_features\n");
+
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->dma_width = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
+ DBGPR("<--xgbe_get_all_hw_features\n");
+}
+
+static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xgbe_one_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&channel->napi);
+ }
+ } else {
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi,
+ xgbe_all_poll, NAPI_POLL_WEIGHT);
+
+ napi_enable(&pdata->napi);
+ }
+}
+
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ napi_disable(&channel->napi);
+
+ if (del)
+ netif_napi_del(&channel->napi);
+ }
+ } else {
+ napi_disable(&pdata->napi);
+
+ if (del)
+ netif_napi_del(&pdata->napi);
+ }
+}
+
+static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ unsigned int i;
+ int ret;
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+ return ret;
+ }
+
+ if (!pdata->per_channel_irq)
+ return 0;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+ channel->queue_index);
+
+ ret = devm_request_irq(pdata->dev, channel->dma_irq,
+ xgbe_dma_isr, 0,
+ channel->dma_irq_name, channel);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+}
+
+static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ if (!pdata->per_channel_irq)
+ return;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+}
+
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_tx_coalesce\n");
+
+ pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
+
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_tx_coalesce\n");
+}
+
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_rx_coalesce\n");
+}
+
+static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_data\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+ desc_if->unmap_rdata(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_tx_data\n");
+}
+
+static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_data\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = XGBE_GET_DESC_DATA(ring, j);
+ desc_if->unmap_rdata(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_rx_data\n");
+}
+
+static void xgbe_adjust_link(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct phy_device *phydev = pdata->phydev;
+ int new_state = 0;
+
+ if (!phydev)
+ return;
+
+ if (phydev->link) {
+ /* Flow control support */
+ if (pdata->pause_autoneg) {
+ if (phydev->pause || phydev->asym_pause) {
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ } else {
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ }
+ }
+
+ if (pdata->tx_pause != pdata->phy_tx_pause) {
+ hw_if->config_tx_flow_control(pdata);
+ pdata->phy_tx_pause = pdata->tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy_rx_pause) {
+ hw_if->config_rx_flow_control(pdata);
+ pdata->phy_rx_pause = pdata->rx_pause;
+ }
+
+ /* Speed support */
+ if (phydev->speed != pdata->phy_speed) {
+ new_state = 1;
+
+ switch (phydev->speed) {
+ case SPEED_10000:
+ hw_if->set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ hw_if->set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ hw_if->set_gmii_speed(pdata);
+ break;
+ }
+ pdata->phy_speed = phydev->speed;
+ }
+
+ if (phydev->link != pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 1;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct phy_device *phydev = pdata->phydev;
+ int ret;
+
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->phy_tx_pause = pdata->tx_pause;
+ pdata->phy_rx_pause = pdata->rx_pause;
+
+ ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
+ pdata->phy_mode);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct failed\n");
+ return ret;
+ }
+
+ if (!phydev->drv || (phydev->drv->phy_id == 0)) {
+ netdev_err(netdev, "phy_id not valid\n");
+ ret = -ENODEV;
+ goto err_phy_connect;
+ }
+ DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
+ dev_name(&phydev->dev), phydev->link);
+
+ return 0;
+
+err_phy_connect:
+ phy_disconnect(phydev);
+
+ return ret;
+}
+
+static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->phydev)
+ return;
+
+ phy_disconnect(pdata->phydev);
+}
+
+int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered down\n");
+ DBGPR("<--xgbe_powerdown\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
+ xgbe_napi_disable(pdata, 0);
+
+ phy_stop(pdata->phydev);
+
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+}
+
+int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered up\n");
+ DBGPR("<--xgbe_powerup\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ pdata->power_down = 0;
+
+ phy_start(pdata->phydev);
+
+ xgbe_napi_enable(pdata, 0);
+
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+ netif_tx_start_all_queues(netdev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+}
+
+static int xgbe_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
+ DBGPR("-->xgbe_start\n");
+
+ hw_if->init(pdata);
+
+ phy_start(pdata->phydev);
+
+ xgbe_napi_enable(pdata, 1);
+
+ ret = xgbe_request_irqs(pdata);
+ if (ret)
+ goto err_napi;
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_init_tx_timers(pdata);
+
+ netif_tx_start_all_queues(netdev);
+
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
+
+err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ return ret;
+}
+
+static void xgbe_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop\n");
+
+ netif_tx_stop_all_queues(netdev);
+
+ xgbe_stop_tx_timers(pdata);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+ phy_stop(pdata->phydev);
+
+ hw_if->exit(pdata);
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ continue;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ netdev_tx_reset_queue(txq);
+ }
+
+ DBGPR("<--xgbe_stop\n");
+}
+
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_restart_dev\n");
+
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+
+ xgbe_free_tx_data(pdata);
+ xgbe_free_rx_data(pdata);
+
+ xgbe_start(pdata);
+
+ DBGPR("<--xgbe_restart_dev\n");
+}
+
+static void xgbe_restart(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ restart_work);
+
+ rtnl_lock();
+
+ xgbe_restart_dev(pdata);
+
+ rtnl_unlock();
+}
+
+static void xgbe_tx_tstamp(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ tx_tstamp_work);
+ struct skb_shared_hwtstamps hwtstamps;
+ u64 nsec;
+ unsigned long flags;
+
+ if (pdata->tx_tstamp) {
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ pdata->tx_tstamp);
+
+ memset(&hwtstamps, 0, sizeof(hwtstamps));
+ hwtstamps.hwtstamp = ns_to_ktime(nsec);
+ skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+ }
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ pdata->tx_tstamp_skb = NULL;
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+ sizeof(pdata->tstamp_config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+ struct ifreq *ifreq)
+{
+ struct hwtstamp_config config;
+ unsigned int mac_tscr;
+
+ if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ mac_tscr = 0;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ break;
+
+ case HWTSTAMP_TX_ON:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+
+ case HWTSTAMP_FILTER_ALL:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ /* PTP v1, UDP, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* 802.AS1, Ethernet, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, any kind of event packet */
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+ XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+ break;
+
+ default:
+ return -ERANGE;
+ }
+
+ pdata->hw_if.config_tstamp(pdata, mac_tscr);
+
+ memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+ return 0;
+}
+
+static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ unsigned long flags;
+
+ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ if (pdata->tx_tstamp_skb) {
+ /* Another timestamp in progress, ignore this one */
+ XGMAC_SET_BITS(packet->attributes,
+ TX_PACKET_ATTRIBUTES, PTP, 0);
+ } else {
+ pdata->tx_tstamp_skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+ skb_tx_timestamp(skb);
+}
+
+static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ if (skb_vlan_tag_present(skb))
+ packet->vlan_ctag = skb_vlan_tag_get(skb);
+}
+
+static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ int ret;
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ packet->tcp_header_len = tcp_hdrlen(skb);
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+ DBGPR(" packet->mss=%u\n", packet->mss);
+
+ /* Update the number of packets that will ultimately be transmitted
+ * along with the extra bytes for each extra packet
+ */
+ packet->tx_packets = skb_shinfo(skb)->gso_segs;
+ packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
+
+ return 0;
+}
+
+static int xgbe_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ DBGPR(" TSO packet to be processed\n");
+
+ return 1;
+}
+
+static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ packet->skb = skb;
+
+ context_desc = 0;
+ packet->rdesc_count = 0;
+
+ packet->tx_packets = 1;
+ packet->tx_bytes = skb->len;
+
+ if (xgbe_is_tso(skb)) {
+ /* TSO requires an extra descriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ /* TSO requires an extra descriptor for TSO header */
+ packet->rdesc_count++;
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE, 1);
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ }
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ PTP, 1);
+
+ for (len = skb_headlen(skb); len;) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xgbe_open(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_open\n");
+
+ /* Initialize the phy */
+ ret = xgbe_phy_init(pdata);
+ if (ret)
+ return ret;
+
+ /* Enable the clocks */
+ ret = clk_prepare_enable(pdata->sysclk);
+ if (ret) {
+ netdev_alert(netdev, "dma clk_prepare_enable failed\n");
+ goto err_phy_init;
+ }
+
+ ret = clk_prepare_enable(pdata->ptpclk);
+ if (ret) {
+ netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
+ goto err_sysclk;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ goto err_ptpclk;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the channel and ring structures */
+ ret = xgbe_alloc_channels(pdata);
+ if (ret)
+ goto err_ptpclk;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_channels;
+
+ /* Initialize the device restart and Tx timestamp work struct */
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+
+ ret = xgbe_start(pdata);
+ if (ret)
+ goto err_rings;
+
+ DBGPR("<--xgbe_open\n");
+
+ return 0;
+
+err_rings:
+ desc_if->free_ring_resources(pdata);
+
+err_channels:
+ xgbe_free_channels(pdata);
+
+err_ptpclk:
+ clk_disable_unprepare(pdata->ptpclk);
+
+err_sysclk:
+ clk_disable_unprepare(pdata->sysclk);
+
+err_phy_init:
+ xgbe_phy_exit(pdata);
+
+ return ret;
+}
+
+static int xgbe_close(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+
+ DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+ /* Free the ring descriptors and buffers */
+ desc_if->free_ring_resources(pdata);
+
+ /* Free the channel and ring structures */
+ xgbe_free_channels(pdata);
+
+ /* Disable the clocks */
+ clk_disable_unprepare(pdata->ptpclk);
+ clk_disable_unprepare(pdata->sysclk);
+
+ /* Release the phy */
+ xgbe_phy_exit(pdata);
+
+ DBGPR("<--xgbe_close\n");
+
+ return 0;
+}
+
+static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
+ int ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+ channel = pdata->channel + skb->queue_mapping;
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+
+ ret = NETDEV_TX_OK;
+
+ if (skb->len == 0) {
+ netdev_err(netdev, "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Calculate preliminary packet info */
+ memset(packet, 0, sizeof(*packet));
+ xgbe_packet_info(pdata, ring, skb, packet);
+
+ /* Check that there are enough descriptors available */
+ ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
+ if (ret)
+ goto tx_netdev_return;
+
+ ret = xgbe_prep_tso(skb, packet);
+ if (ret) {
+ netdev_err(netdev, "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+ xgbe_prep_vlan(skb, packet);
+
+ if (!desc_if->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ xgbe_prep_tx_tstamp(pdata, skb, packet);
+
+ /* Report on the actual number of bytes (to be) sent */
+ netdev_tx_sent_queue(txq, packet->tx_bytes);
+
+ /* Configure required descriptor fields for transmission */
+ hw_if->dev_xmit(channel);
+
+#ifdef XGMAC_ENABLE_TX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, true);
+#endif
+
+ /* Stop the queue in advance if there may not be enough descriptors */
+ xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
+
+ ret = NETDEV_TX_OK;
+
+tx_netdev_return:
+ return ret;
+}
+
+static void xgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+ hw_if->config_rx_mode(pdata);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+}
+
+static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sockaddr *saddr = addr;
+
+ DBGPR("-->xgbe_set_mac_address\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_if->set_mac_address(pdata, netdev->dev_addr);
+
+ DBGPR("<--xgbe_set_mac_address\n");
+
+ return 0;
+}
+
+static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ case SIOCSHWTSTAMP:
+ ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_change_mtu\n");
+
+ ret = xgbe_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xgbe_restart_dev(pdata);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+}
+
+static void xgbe_tx_timeout(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ netdev_warn(netdev, "tx timeout, device restarting\n");
+ schedule_work(&pdata->restart_work);
+}
+
+static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+
+ return s;
+}
+
+static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ set_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+ u16 vid)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->%s\n", __func__);
+
+ clear_bit(vid, pdata->active_vlans);
+ hw_if->update_vlan_hash_table(pdata);
+
+ DBGPR("<--%s\n", __func__);
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xgbe_poll_controller(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_poll_controller\n");
+
+ if (pdata->per_channel_irq) {
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_dma_isr(channel->dma_irq, channel);
+ } else {
+ disable_irq(pdata->dev_irq);
+ xgbe_isr(pdata->dev_irq, pdata);
+ enable_irq(pdata->dev_irq);
+ }
+
+ DBGPR("<--xgbe_poll_controller\n");
+}
+#endif /* End CONFIG_NET_POLL_CONTROLLER */
+
+static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int offset, queue;
+ u8 i;
+
+ if (tc && (tc != pdata->hw_feat.tc_cnt))
+ return -EINVAL;
+
+ if (tc) {
+ netdev_set_num_tc(netdev, tc);
+ for (i = 0, queue = 0, offset = 0; i < tc; i++) {
+ while ((queue < pdata->tx_q_count) &&
+ (pdata->q2tc_map[queue] == i))
+ queue++;
+
+ DBGPR(" TC%u using TXq%u-%u\n", i, offset, queue - 1);
+ netdev_set_tc_queue(netdev, i, queue - offset, offset);
+ offset = queue;
+ }
+ } else {
+ netdev_reset_tc(netdev);
+ }
+
+ return 0;
+}
+
+static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_if->enable_rss(pdata);
+ else if (!(features & NETIF_F_RXHASH) && rxhash)
+ ret = hw_if->disable_rss(pdata);
+ if (ret)
+ return ret;
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum)
+ hw_if->enable_rx_csum(pdata);
+ else if (!(features & NETIF_F_RXCSUM) && rxcsum)
+ hw_if->disable_rx_csum(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
+ hw_if->enable_rx_vlan_stripping(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
+ hw_if->disable_rx_vlan_stripping(pdata);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+ hw_if->enable_rx_vlan_filtering(pdata);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_if->disable_rx_vlan_filtering(pdata);
+
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+
+ return 0;
+}
+
+static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+ .ndo_start_xmit = xgbe_xmit,
+ .ndo_set_rx_mode = xgbe_set_rx_mode,
+ .ndo_set_mac_address = xgbe_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = xgbe_ioctl,
+ .ndo_change_mtu = xgbe_change_mtu,
+ .ndo_tx_timeout = xgbe_tx_timeout,
+ .ndo_get_stats64 = xgbe_get_stats64,
+ .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgbe_poll_controller,
+#endif
+ .ndo_setup_tc = xgbe_setup_tc,
+ .ndo_set_features = xgbe_set_features,
+};
+
+struct net_device_ops *xgbe_get_netdev_ops(void)
+{
+ return (struct net_device_ops *)&xgbe_netdev_ops;
+}
+
+static void xgbe_rx_refresh(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+
+ while (ring->dirty != ring->cur) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+
+ /* Reset rdata values */
+ desc_if->unmap_rdata(pdata, rdata);
+
+ if (desc_if->map_rx_buffer(pdata, ring, rdata))
+ break;
+
+ hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
+
+ ring->dirty++;
+ }
+
+ /* Make sure everything is written before the register write */
+ wmb();
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+}
+
+static struct sk_buff *xgbe_create_skb(struct napi_struct *napi,
+ struct xgbe_ring_data *rdata,
+ unsigned int *len)
+{
+ struct sk_buff *skb;
+ u8 *packet;
+ unsigned int copy_len;
+
+ skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
+ if (!skb)
+ return NULL;
+
+ packet = page_address(rdata->rx.hdr.pa.pages) +
+ rdata->rx.hdr.pa.pages_offset;
+ copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : *len;
+ copy_len = min(rdata->rx.hdr.dma_len, copy_len);
+ skb_copy_to_linear_data(skb, packet, copy_len);
+ skb_put(skb, copy_len);
+
+ *len -= copy_len;
+
+ return skb;
+}
+
+static int xgbe_tx_poll(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ struct netdev_queue *txq;
+ int processed = 0;
+ unsigned int tx_packets = 0, tx_bytes = 0;
+
+ DBGPR("-->xgbe_tx_poll\n");
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+
+ while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+ (ring->dirty != ring->cur)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
+ rdesc = rdata->rdesc;
+
+ if (!hw_if->tx_complete(rdesc))
+ break;
+
+ /* Make sure descriptor fields are read after reading the OWN
+ * bit */
+ dma_rmb();
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
+#endif
+
+ if (hw_if->is_last_desc(rdesc)) {
+ tx_packets += rdata->tx.packets;
+ tx_bytes += rdata->tx.bytes;
+ }
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_if->unmap_rdata(pdata, rdata);
+ hw_if->tx_desc_reset(rdata);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if (!processed)
+ return 0;
+
+ netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_tx_wake_queue(txq);
+ }
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct net_device *netdev = pdata->netdev;
+ struct napi_struct *napi;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *hwtstamps;
+ unsigned int incomplete, error, context_next, context;
+ unsigned int len, put_len, max_len;
+ unsigned int received = 0;
+ int packet_count = 0;
+
+ DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
+
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ packet = &ring->packet_data;
+ while (packet_count < budget) {
+ DBGPR(" cur = %d\n", ring->cur);
+
+ /* First time in loop see if we need to restore state */
+ if (!received && rdata->state_saved) {
+ incomplete = rdata->state.incomplete;
+ context_next = rdata->state.context_next;
+ skb = rdata->state.skb;
+ error = rdata->state.error;
+ len = rdata->state.len;
+ } else {
+ memset(packet, 0, sizeof(*packet));
+ incomplete = 0;
+ context_next = 0;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
+ if (xgbe_rx_dirty_desc(ring) > (XGBE_RX_DESC_CNT >> 3))
+ xgbe_rx_refresh(channel);
+
+ if (hw_if->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+
+ incomplete = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ INCOMPLETE);
+ context_next = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT_NEXT);
+ context = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ CONTEXT);
+
+ /* Earlier error, just drain the remaining data */
+ if ((incomplete || context_next) && error)
+ goto read_again;
+
+ if (error || packet->errors) {
+ if (packet->errors)
+ DBGPR("Error in received packet\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+ if (!context) {
+ put_len = rdata->rx.len - len;
+ len += put_len;
+
+ if (!skb) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.hdr.dma,
+ rdata->rx.hdr.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb = xgbe_create_skb(napi, rdata, &put_len);
+ if (!skb) {
+ error = 1;
+ goto skip_data;
+ }
+ }
+
+ if (put_len) {
+ dma_sync_single_for_cpu(pdata->dev,
+ rdata->rx.buf.dma,
+ rdata->rx.buf.dma_len,
+ DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ rdata->rx.buf.pa.pages,
+ rdata->rx.buf.pa.pages_offset,
+ put_len, rdata->rx.buf.dma_len);
+ rdata->rx.buf.pa.pages = NULL;
+ }
+ }
+
+skip_data:
+ if (incomplete || context_next)
+ goto read_again;
+
+ if (!skb)
+ goto next_packet;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ DBGPR("packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ goto next_packet;
+ }
+
+#ifdef XGMAC_ENABLE_RX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, false);
+#endif
+
+ skb_checksum_none_assert(skb);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CSUM_DONE))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
+ u64 nsec;
+
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ packet->rx_tstamp);
+ hwtstamps = skb_hwtstamps(skb);
+ hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ }
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, RSS_HASH))
+ skb_set_hash(skb, packet->rss_hash,
+ packet->rss_hash_type);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+ skb_mark_napi_id(skb, napi);
+
+ netdev->last_rx = jiffies;
+ napi_gro_receive(napi, skb);
+
+next_packet:
+ packet_count++;
+ }
+
+ /* Check if we need to save state before leaving */
+ if (received && (incomplete || context_next)) {
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ rdata->state_saved = 1;
+ rdata->state.incomplete = incomplete;
+ rdata->state.context_next = context_next;
+ rdata->state.skb = skb;
+ rdata->state.len = len;
+ rdata->state.error = error;
+ }
+
+ DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
+
+ return packet_count;
+}
+
+static int xgbe_one_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+ napi);
+ int processed = 0;
+
+ DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = xgbe_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ enable_irq(channel->dma_irq);
+ }
+
+ DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+static int xgbe_all_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
+ napi);
+ struct xgbe_channel *channel;
+ int ring_budget;
+ int processed, last_processed;
+ unsigned int i;
+
+ DBGPR("-->xgbe_all_poll: budget=%d\n", budget);
+
+ processed = 0;
+ ring_budget = budget / pdata->rx_ring_count;
+ do {
+ last_processed = processed;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ if (ring_budget > (budget - processed))
+ ring_budget = budget - processed;
+ processed += xgbe_rx_poll(channel, ring_budget);
+ }
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+
+ DBGPR("<--xgbe_all_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
+ unsigned int count, unsigned int flag)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ while (count--) {
+ rdata = XGBE_GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ pr_alert("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ idx++;
+ }
+}
+
+void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+ unsigned int idx)
+{
+ pr_alert("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+}
+
+void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_alert(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 000000000..5f149e8ee
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,601 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+struct xgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+};
+
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ memcpy(data, xgbe_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+{
+ int ret;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XGBE_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ DBGPR("<--%s\n", __func__);
+
+ return ret;
+}
+
+static void xgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_get_pauseparam\n");
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->tx_pause = pdata->tx_pause;
+ pause->rx_pause = pdata->rx_pause;
+
+ DBGPR("<--xgbe_get_pauseparam\n");
+}
+
+static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_set_pauseparam\n");
+
+ DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
+ pause->autoneg, pause->tx_pause, pause->rx_pause);
+
+ pdata->pause_autoneg = pause->autoneg;
+ if (pause->autoneg) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+
+ } else {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ pdata->tx_pause = pause->tx_pause;
+ pdata->rx_pause = pause->rx_pause;
+ }
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_pauseparam\n");
+
+ return ret;
+}
+
+static int xgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_get_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ ret = phy_ethtool_gset(pdata->phydev, cmd);
+
+ DBGPR("<--xgbe_get_settings\n");
+
+ return ret;
+}
+
+static int xgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ u32 speed;
+ int ret;
+
+ DBGPR("-->xgbe_set_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ if (cmd->phy_address != phydev->addr)
+ return -EINVAL;
+
+ if ((cmd->autoneg != AUTONEG_ENABLE) &&
+ (cmd->autoneg != AUTONEG_DISABLE))
+ return -EINVAL;
+
+ if (cmd->autoneg == AUTONEG_DISABLE) {
+ switch (speed) {
+ case SPEED_10000:
+ case SPEED_2500:
+ case SPEED_1000:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cmd->duplex != DUPLEX_FULL)
+ return -EINVAL;
+ }
+
+ cmd->advertising &= phydev->supported;
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ return -EINVAL;
+
+ ret = 0;
+ phydev->autoneg = cmd->autoneg;
+ phydev->speed = speed;
+ phydev->duplex = cmd->duplex;
+ phydev->advertising = cmd->advertising;
+
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_settings\n");
+
+ return ret;
+}
+
+static void xgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
+ XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
+ drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_get_coalesce\n");
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ ec->rx_coalesce_usecs = pdata->rx_usecs;
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ DBGPR("<--xgbe_get_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames;
+
+ DBGPR("-->xgbe_set_coalesce\n");
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs) ||
+ (ec->tx_coalesce_usecs_irq) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->use_adaptive_rx_coalesce) ||
+ (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) ||
+ (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) ||
+ (ec->tx_coalesce_usecs_low) ||
+ (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) ||
+ (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_usecs = ec->rx_coalesce_usecs;
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (rx_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
+ hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->rx_desc_count) {
+ netdev_alert(netdev, "rx-frames is limited to %d frames\n",
+ pdata->rx_desc_count);
+ return -EINVAL;
+ }
+
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->tx_desc_count) {
+ netdev_alert(netdev, "tx-frames is limited to %d frames\n",
+ pdata->tx_desc_count);
+ return -EINVAL;
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_usecs = rx_usecs;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_set_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = pdata->rx_ring_count;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return sizeof(pdata->rss_key);
+}
+
+static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ return ARRAY_SIZE(pdata->rss_table);
+}
+
+static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ unsigned int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
+ MAC_RSSDR, DMCH);
+ }
+
+ if (key)
+ memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int ret;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ ret = hw_if->set_rss_lookup_table(pdata, indir);
+ if (ret)
+ return ret;
+ }
+
+ if (key) {
+ ret = hw_if->set_rss_hash_key(pdata, key);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *ts_info)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (pdata->ptp_clock)
+ ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
+ else
+ ts_info->phc_index = -1;
+
+ ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+ ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_settings = xgbe_get_settings,
+ .set_settings = xgbe_set_settings,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = xgbe_get_coalesce,
+ .set_coalesce = xgbe_set_coalesce,
+ .get_pauseparam = xgbe_get_pauseparam,
+ .set_pauseparam = xgbe_set_pauseparam,
+ .get_strings = xgbe_get_strings,
+ .get_ethtool_stats = xgbe_get_ethtool_stats,
+ .get_sset_count = xgbe_get_sset_count,
+ .get_rxnfc = xgbe_get_rxnfc,
+ .get_rxfh_key_size = xgbe_get_rxfh_key_size,
+ .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
+ .get_rxfh = xgbe_get_rxfh,
+ .set_rxfh = xgbe_set_rxfh,
+ .get_ts_info = xgbe_get_ts_info,
+};
+
+struct ethtool_ops *xgbe_get_ethtool_ops(void)
+{
+ return (struct ethtool_ops *)&xgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 000000000..714905384
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,631 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/property.h>
+#include <linux/acpi.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(XGBE_DRV_VERSION);
+MODULE_DESCRIPTION(XGBE_DRV_DESC);
+
+static void xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_default_config\n");
+
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_16;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_16;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+ pdata->default_autoneg = AUTONEG_ENABLE;
+ pdata->default_speed = SPEED_10000;
+
+ DBGPR("<--xgbe_default_config\n");
+}
+
+static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+}
+
+#ifdef CONFIG_ACPI
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ struct acpi_device *adev = pdata->adev;
+ struct device *dev = pdata->dev;
+ u32 property;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long data;
+ int cca;
+ int ret;
+
+ /* Obtain the system clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_DMA_FREQ);
+ return ret;
+ }
+ pdata->sysclk_rate = property;
+
+ /* Obtain the PTP clock setting */
+ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+ if (ret) {
+ dev_err(dev, "unable to obtain %s property\n",
+ XGBE_ACPI_PTP_FREQ);
+ return ret;
+ }
+ pdata->ptpclk_rate = property;
+
+ /* Retrieve the device cache coherency value */
+ handle = adev->handle;
+ do {
+ status = acpi_evaluate_integer(handle, "_CCA", NULL, &data);
+ if (!ACPI_FAILURE(status)) {
+ cca = data;
+ break;
+ }
+
+ status = acpi_get_parent(handle, &handle);
+ } while (!ACPI_FAILURE(status));
+
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "error obtaining acpi coherency value\n");
+ return -EINVAL;
+ }
+ pdata->coherent = !!cca;
+
+ return 0;
+}
+#else /* CONFIG_ACPI */
+static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ACPI */
+
+#ifdef CONFIG_OF
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+
+ /* Obtain the system clock setting */
+ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+ if (IS_ERR(pdata->sysclk)) {
+ dev_err(dev, "dma devm_clk_get failed\n");
+ return PTR_ERR(pdata->sysclk);
+ }
+ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+
+ /* Obtain the PTP clock setting */
+ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+ if (IS_ERR(pdata->ptpclk)) {
+ dev_err(dev, "ptp devm_clk_get failed\n");
+ return PTR_ERR(pdata->ptpclk);
+ }
+ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+
+ /* Retrieve the device cache coherency value */
+ pdata->coherent = of_dma_is_coherent(dev->of_node);
+
+ return 0;
+}
+#else /* CONFIG_OF */
+static int xgbe_of_support(struct xgbe_prv_data *pdata)
+{
+ return -EINVAL;
+}
+#endif /*CONFIG_OF */
+
+static int xgbe_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct xgbe_hw_if *hw_if;
+ struct xgbe_desc_if *desc_if;
+ struct net_device *netdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const char *phy_mode;
+ unsigned int i;
+ int ret;
+
+ DBGPR("--> xgbe_probe\n");
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->pdev = pdev;
+ pdata->adev = ACPI_COMPANION(dev);
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, netdev);
+
+ spin_lock_init(&pdata->lock);
+ mutex_init(&pdata->xpcs_mutex);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+
+ /* Check if we should use ACPI or DT */
+ pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Retrieve the MAC address */
+ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+ pdata->mac_addr,
+ sizeof(pdata->mac_addr));
+ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+ &phy_mode);
+ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_io;
+ }
+ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+
+ /* Check for per channel interrupt support */
+ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
+ pdata->per_channel_irq = 1;
+
+ /* Obtain device settings unique to ACPI/OF */
+ if (pdata->use_acpi)
+ ret = xgbe_acpi_support(pdata);
+ else
+ ret = xgbe_of_support(pdata);
+ if (ret)
+ goto err_io;
+
+ /* Set the DMA coherency values */
+ if (pdata->coherent) {
+ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_OS_ARCACHE;
+ pdata->awcache = XGBE_DMA_OS_AWCACHE;
+ } else {
+ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+ }
+
+ /* Get the device interrupt */
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq 0 failed\n");
+ goto err_io;
+ }
+ pdata->dev_irq = ret;
+
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+ hw_if = &pdata->hw_if;
+ desc_if = &pdata->desc_if;
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev,
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+ goto err_io;
+ }
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues
+ */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ pdata->tx_q_count = pdata->tx_ring_count;
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real tx queue count\n");
+ goto err_io;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real rx queue count\n");
+ goto err_io;
+ }
+
+ /* Initialize RSS hash key and lookup table */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->rx_ring_count);
+
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+
+ /* Prepare to regsiter with MDIO */
+ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
+ if (!pdata->mii_bus_id) {
+ dev_err(dev, "failed to allocate mii bus id\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+ ret = xgbe_mdio_register(pdata);
+ if (ret)
+ goto err_bus_id;
+
+ /* Set device operations */
+ netdev->netdev_ops = xgbe_get_netdev_ops();
+ netdev->ethtool_ops = xgbe_get_ethtool_ops();
+#ifdef CONFIG_AMD_XGBE_DCB
+ netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
+#endif
+
+ /* Set device features */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+
+ xgbe_init_rx_coalesce(pdata);
+ xgbe_init_tx_coalesce(pdata);
+
+ netif_carrier_off(netdev);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_reg_netdev;
+ }
+
+ xgbe_ptp_register(pdata);
+
+ xgbe_debugfs_init(pdata);
+
+ netdev_notice(netdev, "net device enabled\n");
+
+ DBGPR("<-- xgbe_probe\n");
+
+ return 0;
+
+err_reg_netdev:
+ xgbe_mdio_unregister(pdata);
+
+err_bus_id:
+ kfree(pdata->mii_bus_id);
+
+err_io:
+ free_netdev(netdev);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_remove\n");
+
+ xgbe_debugfs_exit(pdata);
+
+ xgbe_ptp_unregister(pdata);
+
+ unregister_netdev(netdev);
+
+ xgbe_mdio_unregister(pdata);
+
+ kfree(pdata->mii_bus_id);
+
+ free_netdev(netdev);
+
+ DBGPR("<--xgbe_remove\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int xgbe_suspend(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_suspend\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_resume\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_resume\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgbe_acpi_match[] = {
+ { "AMDI8001", 0 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+
+static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = "amd-xgbe",
+#ifdef CONFIG_ACPI
+ .acpi_match_table = xgbe_acpi_match,
+#endif
+#ifdef CONFIG_OF
+ .of_match_table = xgbe_of_match,
+#endif
+ .pm = &xgbe_pm_ops,
+ },
+ .probe = xgbe_probe,
+ .remove = xgbe_remove,
+};
+
+module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 000000000..59e267f3f
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,312 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data;
+
+ DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
+ prtad, mmd_reg);
+
+ mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+
+ DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+
+ return mmd_data;
+}
+
+static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
+ u16 mmd_val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data = mmd_val;
+
+ DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
+ prtad, mmd_reg, mmd_data);
+
+ hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+
+ DBGPR_MDIO("<--xgbe_mdio_write\n");
+
+ return 0;
+}
+
+void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
+ int i;
+
+ dev_alert(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_alert(dev, "MMD Device Mask = %#x\n",
+ phydev->c45_ids.devices_in_package);
+ for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
+ dev_alert(dev, " MMD %d: ID = %#08x\n", i,
+ phydev->c45_ids.device_ids[i]);
+
+ dev_alert(dev, "\n*************************************************\n");
+}
+
+int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+{
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_mdio_register\n");
+
+ mii = mdiobus_alloc();
+ if (!mii) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ /* Register on the MDIO bus (don't probe any PHYs) */
+ mii->name = XGBE_PHY_NAME;
+ mii->read = xgbe_mdio_read;
+ mii->write = xgbe_mdio_write;
+ snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
+ mii->priv = pdata;
+ mii->phy_mask = ~0;
+ mii->parent = pdata->dev;
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ goto err_mdiobus_alloc;
+ }
+ DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
+
+ /* Probe the PCS using Clause 45 */
+ phydev = get_phy_device(mii, XGBE_PRTAD, true);
+ if (IS_ERR(phydev) || !phydev ||
+ !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
+ dev_err(pdata->dev, "get_phy_device failed\n");
+ ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
+ goto err_mdiobus_register;
+ }
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+
+ ret = phy_device_register(phydev);
+ if (ret) {
+ dev_err(pdata->dev, "phy_device_register failed\n");
+ goto err_phy_device;
+ }
+ if (!phydev->dev.driver) {
+ dev_err(pdata->dev, "phy driver probe failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ /* Add a reference to the PHY driver so it can't be unloaded */
+ pdata->phy_module = phydev->dev.driver->owner;
+ if (!try_module_get(pdata->phy_module)) {
+ dev_err(pdata->dev, "try_module_get failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ pdata->mii = mii;
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ phydev->autoneg = pdata->default_autoneg;
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ phydev->speed = pdata->default_speed;
+ phydev->duplex = DUPLEX_FULL;
+
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ pdata->phydev = phydev;
+
+ DBGPHY_REGS(pdata);
+
+ DBGPR("<--xgbe_mdio_register\n");
+
+ return 0;
+
+err_phy_device:
+ phy_device_free(phydev);
+
+err_mdiobus_register:
+ mdiobus_unregister(mii);
+
+err_mdiobus_alloc:
+ mdiobus_free(mii);
+
+ return ret;
+}
+
+void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_mdio_unregister\n");
+
+ pdata->phydev = NULL;
+
+ module_put(pdata->phy_module);
+ pdata->phy_module = NULL;
+
+ mdiobus_unregister(pdata->mii);
+ pdata->mii->priv = NULL;
+
+ mdiobus_free(pdata->mii);
+ pdata->mii = NULL;
+
+ DBGPR("<--xgbe_mdio_unregister\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
new file mode 100644
index 000000000..b03e4f58d
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
@@ -0,0 +1,279 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+{
+ struct xgbe_prv_data *pdata = container_of(cc,
+ struct xgbe_prv_data,
+ tstamp_cc);
+ u64 nsec;
+
+ nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+ return nsec;
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 adjust;
+ u32 addend, diff;
+ unsigned int neg_adjust = 0;
+
+ if (delta < 0) {
+ neg_adjust = 1;
+ delta = -delta;
+ }
+
+ adjust = pdata->tstamp_addend;
+ adjust *= delta;
+ diff = div_u64(adjust, 1000000000UL);
+
+ addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+ pdata->tstamp_addend + diff;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ timecounter_adjtime(&pdata->tstamp_tc, delta);
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ nsec = timecounter_read(&pdata->tstamp_tc);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int xgbe_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct xgbe_prv_data *pdata = container_of(info,
+ struct xgbe_prv_data,
+ ptp_clock_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+ return 0;
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+{
+ struct ptp_clock_info *info = &pdata->ptp_clock_info;
+ struct ptp_clock *clock;
+ struct cyclecounter *cc = &pdata->tstamp_cc;
+ u64 dividend;
+
+ snprintf(info->name, sizeof(info->name), "%s",
+ netdev_name(pdata->netdev));
+ info->owner = THIS_MODULE;
+ info->max_adj = pdata->ptpclk_rate;
+ info->adjfreq = xgbe_adjfreq;
+ info->adjtime = xgbe_adjtime;
+ info->gettime64 = xgbe_gettime;
+ info->settime64 = xgbe_settime;
+ info->enable = xgbe_enable;
+
+ clock = ptp_clock_register(info, pdata->dev);
+ if (IS_ERR(clock)) {
+ dev_err(pdata->dev, "ptp_clock_register failed\n");
+ return;
+ }
+
+ pdata->ptp_clock = clock;
+
+ /* Calculate the addend:
+ * addend = 2^32 / (PTP ref clock / 50Mhz)
+ * = (2^32 * 50Mhz) / PTP ref clock
+ */
+ dividend = 50000000;
+ dividend <<= 32;
+ pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
+
+ /* Setup the timecounter */
+ cc->read = xgbe_cc_read;
+ cc->mask = CLOCKSOURCE_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Disable all timestamping to start */
+ XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+ pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+ if (pdata->ptp_clock)
+ ptp_clock_unregister(pdata->ptp_clock);
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 000000000..e62dfa2de
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,867 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_H__
+#define __XGBE_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
+#include <net/dcbnl.h>
+
+#define XGBE_DRV_NAME "amd-xgbe"
+#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+/* Descriptor related defines */
+#define XGBE_TX_DESC_CNT 512
+#define XGBE_TX_DESC_MIN_FREE (XGBE_TX_DESC_CNT >> 3)
+#define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
+#define XGBE_RX_DESC_CNT 512
+
+#define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+/* Descriptors required for maximum contigous TSO/GSO packet */
+#define XGBE_TX_MAX_SPLIT ((GSO_MAX_SIZE / XGBE_TX_MAX_BUF_SIZE) + 1)
+
+/* Maximum possible descriptors needed for an SKB:
+ * - Maximum number of SKB frags
+ * - Maximum descriptors for contiguous TSO/GSO packet
+ * - Possible context descriptor
+ * - Possible TSO header descriptor
+ */
+#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
+
+#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_BUF_ALIGN 64
+#define XGBE_SKB_ALLOC_SIZE 256
+#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
+
+#define XGBE_MAX_DMA_CHANNELS 16
+#define XGBE_MAX_QUEUES 16
+#define XGBE_DMA_STOP_TIMEOUT 5
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define XGBE_DMA_OS_AXDOMAIN 0x2
+#define XGBE_DMA_OS_ARCACHE 0xb
+#define XGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define XGBE_DMA_SYS_AXDOMAIN 0x3
+#define XGBE_DMA_SYS_ARCACHE 0x0
+#define XGBE_DMA_SYS_AWCACHE 0x0
+
+#define XGBE_DMA_INTERRUPT_MASK 0x31c7
+
+#define XGMAC_MIN_PACKET 60
+#define XGMAC_STD_PACKET_MTU 1500
+#define XGMAC_MAX_STD_PACKET 1518
+#define XGMAC_JUMBO_PACKET_MTU 9000
+#define XGMAC_MAX_JUMBO_PACKET 9018
+
+/* MDIO bus phy name */
+#define XGBE_PHY_NAME "amd_xgbe_phy"
+#define XGBE_PRTAD 0
+
+/* Common property names */
+#define XGBE_MAC_ADDR_PROPERTY "mac-address"
+#define XGBE_PHY_MODE_PROPERTY "phy-mode"
+#define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+
+/* Device-tree clock names */
+#define XGBE_DMA_CLOCK "dma_clk"
+#define XGBE_PTP_CLOCK "ptp_clk"
+
+/* ACPI property names */
+#define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
+#define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+
+/* Timestamp support - values based on 50MHz PTP clock
+ * 50MHz => 20 nsec
+ */
+#define XGBE_TSTAMP_SSINC 20
+#define XGBE_TSTAMP_SNSINC 0
+
+/* Driver PMT macros */
+#define XGMAC_DRIVER_CONTEXT 1
+#define XGMAC_IOCTL_CONTEXT 2
+
+#define XGBE_FIFO_MAX 81920
+#define XGBE_FIFO_SIZE_B(x) (x)
+#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
+
+#define XGBE_TC_MIN_QUANTUM 10
+
+/* Helper macro for descriptor handling
+ * Always use XGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define XGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+/* Default coalescing parameters */
+#define XGMAC_INIT_DMA_TX_USECS 1000
+#define XGMAC_INIT_DMA_TX_FRAMES 25
+
+#define XGMAC_MAX_DMA_RIWT 0xff
+#define XGMAC_INIT_DMA_RX_USECS 30
+#define XGMAC_INIT_DMA_RX_FRAMES 25
+
+/* Flow control queue count */
+#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define XGBE_RSS_HASH_KEY_SIZE 40
+#define XGBE_RSS_MAX_TABLE_SIZE 256
+#define XGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define XGBE_RSS_HASH_KEY_TYPE 1
+
+struct xgbe_prv_data;
+
+struct xgbe_packet_data {
+ struct sk_buff *skb;
+
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ unsigned int rdesc_count;
+ unsigned int length;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+
+ u64 rx_tstamp;
+
+ u32 rss_hash;
+ enum pkt_hash_types rss_hash_type;
+
+ unsigned int tx_packets;
+ unsigned int tx_bytes;
+};
+
+/* Common Rx and Tx descriptor mapping */
+struct xgbe_ring_desc {
+ __le32 desc0;
+ __le32 desc1;
+ __le32 desc2;
+ __le32 desc3;
+};
+
+/* Page allocation related values */
+struct xgbe_page_alloc {
+ struct page *pages;
+ unsigned int pages_len;
+ unsigned int pages_offset;
+
+ dma_addr_t pages_dma;
+};
+
+/* Ring entry buffer data */
+struct xgbe_buffer_data {
+ struct xgbe_page_alloc pa;
+ struct xgbe_page_alloc pa_unmap;
+
+ dma_addr_t dma;
+ unsigned int dma_len;
+};
+
+/* Tx-related ring data */
+struct xgbe_tx_ring_data {
+ unsigned int packets; /* BQL packet count */
+ unsigned int bytes; /* BQL byte count */
+};
+
+/* Rx-related ring data */
+struct xgbe_rx_ring_data {
+ struct xgbe_buffer_data hdr; /* Header locations */
+ struct xgbe_buffer_data buf; /* Payload locations */
+
+ unsigned short hdr_len; /* Length of received header */
+ unsigned short len; /* Length of received packet */
+};
+
+/* Structure used to hold information related to the descriptor
+ * and the packet associated with the descriptor (always use
+ * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
+ */
+struct xgbe_ring_data {
+ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
+ dma_addr_t rdesc_dma; /* DMA address of descriptor */
+
+ struct sk_buff *skb; /* Virtual address of SKB */
+ dma_addr_t skb_dma; /* DMA address of SKB data */
+ unsigned int skb_dma_len; /* Length of SKB DMA area */
+
+ struct xgbe_tx_ring_data tx; /* Tx-related data */
+ struct xgbe_rx_ring_data rx; /* Rx-related data */
+
+ unsigned int mapped_as_page;
+
+ /* Incomplete receive save location. If the budget is exhausted
+ * or the last descriptor (last normal descriptor or a following
+ * context descriptor) has not been DMA'd yet the current state
+ * of the receive processing needs to be saved.
+ */
+ unsigned int state_saved;
+ struct {
+ unsigned int incomplete;
+ unsigned int context_next;
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct xgbe_ring {
+ /* Ring lock - used just for TX rings at the moment */
+ spinlock_t lock;
+
+ /* Per packet related information */
+ struct xgbe_packet_data packet_data;
+
+ /* Virtual/DMA addresses and count of allocated descriptor memory */
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int rdesc_count;
+
+ /* Array of descriptor data corresponding the descriptor memory
+ * (always use the XGBE_GET_DESC_DATA macro to access this data)
+ */
+ struct xgbe_ring_data *rdata;
+
+ /* Page allocation for RX buffers */
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: index of descriptor to check for buffer reallocation
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int queue_stopped;
+ unsigned int xmit_more;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+ };
+} ____cacheline_aligned;
+
+/* Structure used to describe the descriptor rings associated with
+ * a DMA channel.
+ */
+struct xgbe_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xgbe_prv_data *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ /* Per channel interrupt irq number */
+ int dma_irq;
+ char dma_irq_name[IFNAMSIZ + 32];
+
+ /* Netdev related settings */
+ struct napi_struct napi;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct timer_list tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+} ____cacheline_aligned;
+
+enum xgbe_int {
+ XGMAC_INT_DMA_CH_SR_TI,
+ XGMAC_INT_DMA_CH_SR_TPS,
+ XGMAC_INT_DMA_CH_SR_TBU,
+ XGMAC_INT_DMA_CH_SR_RI,
+ XGMAC_INT_DMA_CH_SR_RBU,
+ XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_TI_RI,
+ XGMAC_INT_DMA_CH_SR_FBE,
+ XGMAC_INT_DMA_ALL,
+};
+
+enum xgbe_int_state {
+ XGMAC_INT_STATE_SAVE,
+ XGMAC_INT_STATE_RESTORE,
+};
+
+enum xgbe_mtl_fifo_size {
+ XGMAC_MTL_FIFO_SIZE_256 = 0x00,
+ XGMAC_MTL_FIFO_SIZE_512 = 0x01,
+ XGMAC_MTL_FIFO_SIZE_1K = 0x03,
+ XGMAC_MTL_FIFO_SIZE_2K = 0x07,
+ XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
+ XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
+ XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
+ XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
+ XGMAC_MTL_FIFO_SIZE_64K = 0xff,
+ XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
+ XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
+};
+
+struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* Rx Stats */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+};
+
+struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*config_rx_mode)(struct xgbe_prv_data *);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+
+ int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
+ int (*update_vlan_hash_table)(struct xgbe_prv_data *);
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+ int (*set_gmii_speed)(struct xgbe_prv_data *);
+ int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
+ int (*set_xgmii_speed)(struct xgbe_prv_data *);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+ void (*enable_rx)(struct xgbe_prv_data *);
+ void (*disable_rx)(struct xgbe_prv_data *);
+
+ void (*powerup_tx)(struct xgbe_prv_data *);
+ void (*powerdown_tx)(struct xgbe_prv_data *);
+ void (*powerup_rx)(struct xgbe_prv_data *);
+ void (*powerdown_rx)(struct xgbe_prv_data *);
+
+ int (*init)(struct xgbe_prv_data *);
+ int (*exit)(struct xgbe_prv_data *);
+
+ int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
+ int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
+ void (*dev_xmit)(struct xgbe_channel *);
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
+ void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
+ unsigned int);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+ void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct xgbe_prv_data *);
+ int (*config_rx_flow_control)(struct xgbe_prv_data *);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xgbe_prv_data *);
+ int (*config_tx_coalesce)(struct xgbe_prv_data *);
+ unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
+ unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_pblx8)(struct xgbe_prv_data *);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+ void (*read_mmc_stats)(struct xgbe_prv_data *);
+
+ /* For Timestamp config */
+ int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
+ void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
+ void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
+ unsigned int nsec);
+ u64 (*get_tstamp_time)(struct xgbe_prv_data *);
+ u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
+
+ /* For Data Center Bridging config */
+ void (*config_dcb_tc)(struct xgbe_prv_data *);
+ void (*config_dcb_pfc)(struct xgbe_prv_data *);
+
+ /* For Receive Side Scaling */
+ int (*enable_rss)(struct xgbe_prv_data *);
+ int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
+};
+
+struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
+ struct xgbe_ring_data *);
+ void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ struct acpi_device *adev;
+ struct device *dev;
+
+ /* ACPI or DT flag */
+ unsigned int use_acpi;
+
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing mutex */
+ struct mutex xpcs_mutex;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+
+ int dev_irq;
+ unsigned int per_channel_irq;
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_desc_if desc_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_usecs;
+ unsigned int rx_frames;
+
+ /* Current Rx buffer size */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+ u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
+ /* MDIO settings */
+ struct module *phy_module;
+ char *mii_bus_id;
+ struct mii_bus *mii;
+ int mdio_mmd;
+ struct phy_device *phydev;
+ int default_autoneg;
+ int default_speed;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+ unsigned int phy_tx_pause;
+ unsigned int phy_rx_pause;
+
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+
+ /* Filtering support */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+ /* Device clocks */
+ struct clk *sysclk;
+ unsigned long sysclk_rate;
+ struct clk *ptpclk;
+ unsigned long ptpclk_rate;
+
+ /* Timestamp support */
+ spinlock_t tstamp_lock;
+ struct ptp_clock_info ptp_clock_info;
+ struct ptp_clock *ptp_clock;
+ struct hwtstamp_config tstamp_config;
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ unsigned int tstamp_addend;
+ struct work_struct tx_tstamp_work;
+ struct sk_buff *tx_tstamp_skb;
+ u64 tx_tstamp;
+
+ /* DCB support */
+ struct ieee_ets *ets;
+ struct ieee_pfc *pfc;
+ unsigned int q2tc_map[XGBE_MAX_QUEUES];
+ unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+ /* Device restart work structure */
+ struct work_struct restart_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
+#endif
+};
+
+/* Function prototypes*/
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
+struct net_device_ops *xgbe_get_netdev_ops(void);
+struct ethtool_ops *xgbe_get_ethtool_ops(void);
+#ifdef CONFIG_AMD_XGBE_DCB
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
+#endif
+
+int xgbe_mdio_register(struct xgbe_prv_data *);
+void xgbe_mdio_unregister(struct xgbe_prv_data *);
+void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_ptp_register(struct xgbe_prv_data *);
+void xgbe_ptp_unregister(struct xgbe_prv_data *);
+void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
+ unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+ unsigned int);
+void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
+void xgbe_get_all_hw_features(struct xgbe_prv_data *);
+int xgbe_powerup(struct net_device *, unsigned int);
+int xgbe_powerdown(struct net_device *, unsigned int);
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+
+#ifdef CONFIG_DEBUG_FS
+void xgbe_debugfs_init(struct xgbe_prv_data *);
+void xgbe_debugfs_exit(struct xgbe_prv_data *);
+#else
+static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_DESC_DUMP
+#define XGMAC_ENABLE_RX_DESC_DUMP
+#endif
+
+/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_PKT_DUMP
+#define XGMAC_ENABLE_RX_PKT_DUMP
+#endif
+
+/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+#if 0
+#define YDEBUG
+#define YDEBUG_MDIO
+#endif
+
+/* For debug prints */
+#ifdef YDEBUG
+#define DBGPR(x...) pr_alert(x)
+#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
+#else
+#define DBGPR(x...) do { } while (0)
+#define DBGPHY_REGS(x...) do { } while (0)
+#endif
+
+#ifdef YDEBUG_MDIO
+#define DBGPR_MDIO(x...) pr_alert(x)
+#else
+#define DBGPR_MDIO(x...) do { } while (0)
+#endif
+
+#endif