diff options
Diffstat (limited to 'drivers/net/dsa')
-rw-r--r-- | drivers/net/dsa/Kconfig | 12 | ||||
-rw-r--r-- | drivers/net/dsa/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/dsa/b53/Kconfig | 33 | ||||
-rw-r--r-- | drivers/net/dsa/b53/Makefile | 6 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_common.c | 1799 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_mdio.c | 392 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_mmap.c | 273 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_priv.h | 388 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_regs.h | 434 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_spi.c | 331 | ||||
-rw-r--r-- | drivers/net/dsa/b53/b53_srab.c | 442 | ||||
-rw-r--r-- | drivers/net/dsa/bcm_sf2.c | 702 | ||||
-rw-r--r-- | drivers/net/dsa/bcm_sf2.h | 16 | ||||
-rw-r--r-- | drivers/net/dsa/bcm_sf2_regs.h | 70 | ||||
-rw-r--r-- | drivers/net/dsa/mv88e6xxx/Kconfig | 7 | ||||
-rw-r--r-- | drivers/net/dsa/mv88e6xxx/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/dsa/mv88e6xxx/chip.c | 4093 | ||||
-rw-r--r-- | drivers/net/dsa/mv88e6xxx/mv88e6xxx.h | 678 |
18 files changed, 9483 insertions, 198 deletions
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 200663c43..8f4544394 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -9,14 +9,6 @@ config NET_DSA_MV88E6060 This enables support for the Marvell 88E6060 ethernet switch chip. -config NET_DSA_MV88E6XXX - tristate "Marvell 88E6xxx Ethernet switch chip support" - depends on NET_DSA - select NET_DSA_TAG_EDSA - ---help--- - This enables support for most of the Marvell 88E6xxx models of - Ethernet switch chips, except 88E6060. - config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" depends on HAS_IOMEM && NET_DSA @@ -28,4 +20,8 @@ config NET_DSA_BCM_SF2 This enables support for the Broadcom Starfighter 2 Ethernet switch chips. +source "drivers/net/dsa/b53/Kconfig" + +source "drivers/net/dsa/mv88e6xxx/Kconfig" + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 76b751dd9..ca1e71b85 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,3 +1,5 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o + +obj-y += b53/ +obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig new file mode 100644 index 000000000..27f32a50d --- /dev/null +++ b/drivers/net/dsa/b53/Kconfig @@ -0,0 +1,33 @@ +menuconfig B53 + tristate "Broadcom BCM53xx managed switch support" + depends on NET_DSA + help + This driver adds support for Broadcom managed switch chips. It supports + BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX + integrated switches. + +config B53_SPI_DRIVER + tristate "B53 SPI connected switch driver" + depends on B53 && SPI + help + Select to enable support for registering switches configured through SPI. + +config B53_MDIO_DRIVER + tristate "B53 MDIO connected switch driver" + depends on B53 + help + Select to enable support for registering switches configured through MDIO. + +config B53_MMAP_DRIVER + tristate "B53 MMAP connected switch driver" + depends on B53 && HAS_IOMEM + help + Select to enable support for memory-mapped switches like the BCM63XX + integrated switches. + +config B53_SRAB_DRIVER + tristate "B53 SRAB connected switch driver" + depends on B53 && HAS_IOMEM + help + Select to enable support for memory-mapped Switch Register Access + Bridge Registers (SRAB) like it is found on the BCM53010 diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile new file mode 100644 index 000000000..7e6f9a8bf --- /dev/null +++ b/drivers/net/dsa/b53/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_B53) += b53_common.o + +obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o +obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o +obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o +obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c new file mode 100644 index 000000000..bda37d336 --- /dev/null +++ b/drivers/net/dsa/b53/b53_common.c @@ -0,0 +1,1799 @@ +/* + * B53 switch driver main logic + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/gpio.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_data/b53.h> +#include <linux/phy.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <net/dsa.h> +#include <net/switchdev.h> + +#include "b53_regs.h" +#include "b53_priv.h" + +struct b53_mib_desc { + u8 size; + u8 offset; + const char *name; +}; + +/* BCM5365 MIB counters */ +static const struct b53_mib_desc b53_mibs_65[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPkts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredTransmit" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x38, "TxPausePkts" }, + { 8, 0x44, "RxOctets" }, + { 4, 0x4c, "RxUndersizePkts" }, + { 4, 0x50, "RxPausePkts" }, + { 4, 0x54, "Pkts64Octets" }, + { 4, 0x58, "Pkts65to127Octets" }, + { 4, 0x5c, "Pkts128to255Octets" }, + { 4, 0x60, "Pkts256to511Octets" }, + { 4, 0x64, "Pkts512to1023Octets" }, + { 4, 0x68, "Pkts1024to1522Octets" }, + { 4, 0x6c, "RxOversizePkts" }, + { 4, 0x70, "RxJabbers" }, + { 4, 0x74, "RxAlignmentErrors" }, + { 4, 0x78, "RxFCSErrors" }, + { 8, 0x7c, "RxGoodOctets" }, + { 4, 0x84, "RxDropPkts" }, + { 4, 0x88, "RxUnicastPkts" }, + { 4, 0x8c, "RxMulticastPkts" }, + { 4, 0x90, "RxBroadcastPkts" }, + { 4, 0x94, "RxSAChanges" }, + { 4, 0x98, "RxFragments" }, +}; + +#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) + +/* BCM63xx MIB counters */ +static const struct b53_mib_desc b53_mibs_63xx[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x0c, "TxQoSPkts" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPkts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredTransmit" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x38, "TxPausePkts" }, + { 8, 0x3c, "TxQoSOctets" }, + { 8, 0x44, "RxOctets" }, + { 4, 0x4c, "RxUndersizePkts" }, + { 4, 0x50, "RxPausePkts" }, + { 4, 0x54, "Pkts64Octets" }, + { 4, 0x58, "Pkts65to127Octets" }, + { 4, 0x5c, "Pkts128to255Octets" }, + { 4, 0x60, "Pkts256to511Octets" }, + { 4, 0x64, "Pkts512to1023Octets" }, + { 4, 0x68, "Pkts1024to1522Octets" }, + { 4, 0x6c, "RxOversizePkts" }, + { 4, 0x70, "RxJabbers" }, + { 4, 0x74, "RxAlignmentErrors" }, + { 4, 0x78, "RxFCSErrors" }, + { 8, 0x7c, "RxGoodOctets" }, + { 4, 0x84, "RxDropPkts" }, + { 4, 0x88, "RxUnicastPkts" }, + { 4, 0x8c, "RxMulticastPkts" }, + { 4, 0x90, "RxBroadcastPkts" }, + { 4, 0x94, "RxSAChanges" }, + { 4, 0x98, "RxFragments" }, + { 4, 0xa0, "RxSymbolErrors" }, + { 4, 0xa4, "RxQoSPkts" }, + { 8, 0xa8, "RxQoSOctets" }, + { 4, 0xb0, "Pkts1523to2047Octets" }, + { 4, 0xb4, "Pkts2048to4095Octets" }, + { 4, 0xb8, "Pkts4096to8191Octets" }, + { 4, 0xbc, "Pkts8192to9728Octets" }, + { 4, 0xc0, "RxDiscarded" }, +}; + +#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) + +/* MIB counters */ +static const struct b53_mib_desc b53_mibs[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPkts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredTransmit" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x38, "TxPausePkts" }, + { 8, 0x50, "RxOctets" }, + { 4, 0x58, "RxUndersizePkts" }, + { 4, 0x5c, "RxPausePkts" }, + { 4, 0x60, "Pkts64Octets" }, + { 4, 0x64, "Pkts65to127Octets" }, + { 4, 0x68, "Pkts128to255Octets" }, + { 4, 0x6c, "Pkts256to511Octets" }, + { 4, 0x70, "Pkts512to1023Octets" }, + { 4, 0x74, "Pkts1024to1522Octets" }, + { 4, 0x78, "RxOversizePkts" }, + { 4, 0x7c, "RxJabbers" }, + { 4, 0x80, "RxAlignmentErrors" }, + { 4, 0x84, "RxFCSErrors" }, + { 8, 0x88, "RxGoodOctets" }, + { 4, 0x90, "RxDropPkts" }, + { 4, 0x94, "RxUnicastPkts" }, + { 4, 0x98, "RxMulticastPkts" }, + { 4, 0x9c, "RxBroadcastPkts" }, + { 4, 0xa0, "RxSAChanges" }, + { 4, 0xa4, "RxFragments" }, + { 4, 0xa8, "RxJumboPkts" }, + { 4, 0xac, "RxSymbolErrors" }, + { 4, 0xc0, "RxDiscarded" }, +}; + +#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) + +static int b53_do_vlan_op(struct b53_device *dev, u8 op) +{ + unsigned int i; + + b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); + + for (i = 0; i < 10; i++) { + u8 vta; + + b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); + if (!(vta & VTA_START_CMD)) + return 0; + + usleep_range(100, 200); + } + + return -EIO; +} + +static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, + struct b53_vlan *vlan) +{ + if (is5325(dev)) { + u32 entry = 0; + + if (vlan->members) { + entry = ((vlan->untag & VA_UNTAG_MASK_25) << + VA_UNTAG_S_25) | vlan->members; + if (dev->core_rev >= 3) + entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; + else + entry |= VA_VALID_25; + } + + b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | + VTA_RW_STATE_WR | VTA_RW_OP_EN); + } else if (is5365(dev)) { + u16 entry = 0; + + if (vlan->members) + entry = ((vlan->untag & VA_UNTAG_MASK_65) << + VA_UNTAG_S_65) | vlan->members | VA_VALID_65; + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | + VTA_RW_STATE_WR | VTA_RW_OP_EN); + } else { + b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); + b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], + (vlan->untag << VTE_UNTAG_S) | vlan->members); + + b53_do_vlan_op(dev, VTA_CMD_WRITE); + } + + dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", + vid, vlan->members, vlan->untag); +} + +static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, + struct b53_vlan *vlan) +{ + if (is5325(dev)) { + u32 entry = 0; + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | + VTA_RW_STATE_RD | VTA_RW_OP_EN); + b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); + + if (dev->core_rev >= 3) + vlan->valid = !!(entry & VA_VALID_25_R4); + else + vlan->valid = !!(entry & VA_VALID_25); + vlan->members = entry & VA_MEMBER_MASK; + vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; + + } else if (is5365(dev)) { + u16 entry = 0; + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | + VTA_RW_STATE_WR | VTA_RW_OP_EN); + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); + + vlan->valid = !!(entry & VA_VALID_65); + vlan->members = entry & VA_MEMBER_MASK; + vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; + } else { + u32 entry = 0; + + b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); + b53_do_vlan_op(dev, VTA_CMD_READ); + b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); + vlan->members = entry & VTE_MEMBERS; + vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; + vlan->valid = true; + } +} + +static void b53_set_forwarding(struct b53_device *dev, int enable) +{ + u8 mgmt; + + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + + if (enable) + mgmt |= SM_SW_FWD_EN; + else + mgmt &= ~SM_SW_FWD_EN; + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); +} + +static void b53_enable_vlan(struct b53_device *dev, bool enable) +{ + u8 mgmt, vc0, vc1, vc4 = 0, vc5; + + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); + + if (is5325(dev) || is5365(dev)) { + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); + } else if (is63xx(dev)) { + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); + } else { + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); + } + + mgmt &= ~SM_SW_FWD_MODE; + + if (enable) { + vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; + vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + + if (is5325(dev)) + vc0 &= ~VC0_RESERVED_1; + + if (is5325(dev) || is5365(dev)) + vc1 |= VC1_RX_MCST_TAG_EN; + + } else { + vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); + vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc5 &= ~VC5_DROP_VTABLE_MISS; + + if (is5325(dev) || is5365(dev)) + vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + else + vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; + + if (is5325(dev) || is5365(dev)) + vc1 &= ~VC1_RX_MCST_TAG_EN; + } + + if (!is5325(dev) && !is5365(dev)) + vc5 &= ~VC5_VID_FFF_EN; + + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); + + if (is5325(dev) || is5365(dev)) { + /* enable the high 8 bit vid check on 5325 */ + if (is5325(dev) && enable) + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, + VC3_HIGH_8BIT_EN); + else + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); + + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); + } else if (is63xx(dev)) { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); + } else { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); + } + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); +} + +static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) +{ + u32 port_mask = 0; + u16 max_size = JMS_MIN_SIZE; + + if (is5325(dev) || is5365(dev)) + return -EINVAL; + + if (enable) { + port_mask = dev->enabled_ports; + max_size = JMS_MAX_SIZE; + if (allow_10_100) + port_mask |= JPM_10_100_JUMBO_EN; + } + + b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); + return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); +} + +static int b53_flush_arl(struct b53_device *dev, u8 mask) +{ + unsigned int i; + + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, + FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); + + for (i = 0; i < 10; i++) { + u8 fast_age_ctrl; + + b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, + &fast_age_ctrl); + + if (!(fast_age_ctrl & FAST_AGE_DONE)) + goto out; + + msleep(1); + } + + return -ETIMEDOUT; +out: + /* Only age dynamic entries (default behavior) */ + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); + return 0; +} + +static int b53_fast_age_port(struct b53_device *dev, int port) +{ + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); + + return b53_flush_arl(dev, FAST_AGE_PORT); +} + +static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) +{ + b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); + + return b53_flush_arl(dev, FAST_AGE_VLAN); +} + +static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) +{ + struct b53_device *dev = ds_to_priv(ds); + unsigned int i; + u16 pvlan; + + /* Enable the IMP port to be in the same VLAN as the other ports + * on a per-port basis such that we only have Port i and IMP in + * the same VLAN. + */ + b53_for_each_port(dev, i) { + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); + pvlan |= BIT(cpu_port); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); + } +} + +static int b53_enable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct b53_device *dev = ds_to_priv(ds); + unsigned int cpu_port = dev->cpu_port; + u16 pvlan; + + /* Clear the Rx and Tx disable bits and set to no spanning tree */ + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); + + /* Set this port, and only this one to be in the default VLAN, + * if member of a bridge, restore its membership prior to + * bringing down this port. + */ + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); + pvlan &= ~0x1ff; + pvlan |= BIT(port); + pvlan |= dev->ports[port].vlan_ctl_mask; + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + + b53_imp_vlan_setup(ds, cpu_port); + + return 0; +} + +static void b53_disable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct b53_device *dev = ds_to_priv(ds); + u8 reg; + + /* Disable Tx/Rx for the port */ + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); + reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); +} + +static void b53_enable_cpu_port(struct b53_device *dev) +{ + unsigned int cpu_port = dev->cpu_port; + u8 port_ctrl; + + /* BCM5325 CPU port is at 8 */ + if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) + cpu_port = B53_CPU_PORT; + + port_ctrl = PORT_CTRL_RX_BCST_EN | + PORT_CTRL_RX_MCST_EN | + PORT_CTRL_RX_UCST_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl); +} + +static void b53_enable_mib(struct b53_device *dev) +{ + u8 gc; + + b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); + b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); +} + +static int b53_configure_vlan(struct b53_device *dev) +{ + struct b53_vlan vl = { 0 }; + int i; + + /* clear all vlan entries */ + if (is5325(dev) || is5365(dev)) { + for (i = 1; i < dev->num_vlans; i++) + b53_set_vlan_entry(dev, i, &vl); + } else { + b53_do_vlan_op(dev, VTA_CMD_CLEAR); + } + + b53_enable_vlan(dev, false); + + b53_for_each_port(dev, i) + b53_write16(dev, B53_VLAN_PAGE, + B53_VLAN_PORT_DEF_TAG(i), 1); + + if (!is5325(dev) && !is5365(dev)) + b53_set_jumbo(dev, dev->enable_jumbo, false); + + return 0; +} + +static void b53_switch_reset_gpio(struct b53_device *dev) +{ + int gpio = dev->reset_gpio; + + if (gpio < 0) + return; + + /* Reset sequence: RESET low(50ms)->high(20ms) + */ + gpio_set_value(gpio, 0); + mdelay(50); + + gpio_set_value(gpio, 1); + mdelay(20); + + dev->current_page = 0xff; +} + +static int b53_switch_reset(struct b53_device *dev) +{ + u8 mgmt; + + b53_switch_reset_gpio(dev); + + if (is539x(dev)) { + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); + } + + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + + if (!(mgmt & SM_SW_FWD_EN)) { + mgmt &= ~SM_SW_FWD_MODE; + mgmt |= SM_SW_FWD_EN; + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + + if (!(mgmt & SM_SW_FWD_EN)) { + dev_err(dev->dev, "Failed to enable switch!\n"); + return -EINVAL; + } + } + + b53_enable_mib(dev); + + return b53_flush_arl(dev, FAST_AGE_STATIC); +} + +static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) +{ + struct b53_device *priv = ds_to_priv(ds); + u16 value = 0; + int ret; + + if (priv->ops->phy_read16) + ret = priv->ops->phy_read16(priv, addr, reg, &value); + else + ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), + reg * 2, &value); + + return ret ? ret : value; +} + +static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) +{ + struct b53_device *priv = ds_to_priv(ds); + + if (priv->ops->phy_write16) + return priv->ops->phy_write16(priv, addr, reg, val); + + return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); +} + +static int b53_reset_switch(struct b53_device *priv) +{ + /* reset vlans */ + priv->enable_jumbo = false; + + memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); + memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); + + return b53_switch_reset(priv); +} + +static int b53_apply_config(struct b53_device *priv) +{ + /* disable switching */ + b53_set_forwarding(priv, 0); + + b53_configure_vlan(priv); + + /* enable switching */ + b53_set_forwarding(priv, 1); + + return 0; +} + +static void b53_reset_mib(struct b53_device *priv) +{ + u8 gc; + + b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + + b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); + msleep(1); + b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); + msleep(1); +} + +static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) +{ + if (is5365(dev)) + return b53_mibs_65; + else if (is63xx(dev)) + return b53_mibs_63xx; + else + return b53_mibs; +} + +static unsigned int b53_get_mib_size(struct b53_device *dev) +{ + if (is5365(dev)) + return B53_MIBS_65_SIZE; + else if (is63xx(dev)) + return B53_MIBS_63XX_SIZE; + else + return B53_MIBS_SIZE; +} + +static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + struct b53_device *dev = ds_to_priv(ds); + const struct b53_mib_desc *mibs = b53_get_mib(dev); + unsigned int mib_size = b53_get_mib_size(dev); + unsigned int i; + + for (i = 0; i < mib_size; i++) + memcpy(data + i * ETH_GSTRING_LEN, + mibs[i].name, ETH_GSTRING_LEN); +} + +static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct b53_device *dev = ds_to_priv(ds); + const struct b53_mib_desc *mibs = b53_get_mib(dev); + unsigned int mib_size = b53_get_mib_size(dev); + const struct b53_mib_desc *s; + unsigned int i; + u64 val = 0; + + if (is5365(dev) && port == 5) + port = 8; + + mutex_lock(&dev->stats_mutex); + + for (i = 0; i < mib_size; i++) { + s = &mibs[i]; + + if (s->size == 8) { + b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); + } else { + u32 val32; + + b53_read32(dev, B53_MIB_PAGE(port), s->offset, + &val32); + val = val32; + } + data[i] = (u64)val; + } + + mutex_unlock(&dev->stats_mutex); +} + +static int b53_get_sset_count(struct dsa_switch *ds) +{ + struct b53_device *dev = ds_to_priv(ds); + + return b53_get_mib_size(dev); +} + +static int b53_set_addr(struct dsa_switch *ds, u8 *addr) +{ + return 0; +} + +static int b53_setup(struct dsa_switch *ds) +{ + struct b53_device *dev = ds_to_priv(ds); + unsigned int port; + int ret; + + ret = b53_reset_switch(dev); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); + return ret; + } + + b53_reset_mib(dev); + + ret = b53_apply_config(dev); + if (ret) + dev_err(ds->dev, "failed to apply configuration\n"); + + for (port = 0; port < dev->num_ports; port++) { + if (BIT(port) & ds->enabled_port_mask) + b53_enable_port(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + b53_enable_cpu_port(dev); + else + b53_disable_port(ds, port, NULL); + } + + return ret; +} + +static void b53_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct b53_device *dev = ds_to_priv(ds); + u8 rgmii_ctrl = 0, reg = 0, off; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + + /* Override the port settings */ + if (port == dev->cpu_port) { + off = B53_PORT_OVERRIDE_CTRL; + reg = PORT_OVERRIDE_EN; + } else { + off = B53_GMII_PORT_OVERRIDE_CTRL(port); + reg = GMII_PO_EN; + } + + /* Set the link UP */ + if (phydev->link) + reg |= PORT_OVERRIDE_LINK; + + if (phydev->duplex == DUPLEX_FULL) + reg |= PORT_OVERRIDE_FULL_DUPLEX; + + switch (phydev->speed) { + case 2000: + reg |= PORT_OVERRIDE_SPEED_2000M; + /* fallthrough */ + case SPEED_1000: + reg |= PORT_OVERRIDE_SPEED_1000M; + break; + case SPEED_100: + reg |= PORT_OVERRIDE_SPEED_100M; + break; + case SPEED_10: + reg |= PORT_OVERRIDE_SPEED_10M; + break; + default: + dev_err(ds->dev, "unknown speed: %d\n", phydev->speed); + return; + } + + /* Enable flow control on BCM5301x's CPU port */ + if (is5301x(dev) && port == dev->cpu_port) + reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW; + + if (phydev->pause) { + if (phydev->asym_pause) + reg |= PORT_OVERRIDE_TX_FLOW; + reg |= PORT_OVERRIDE_RX_FLOW; + } + + b53_write8(dev, B53_CTRL_PAGE, off, reg); + + if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { + if (port == 8) + off = B53_RGMII_CTRL_IMP; + else + off = B53_RGMII_CTRL_P(port); + + /* Configure the port RGMII clock delay by DLL disabled and + * tx_clk aligned timing (restoring to reset defaults) + */ + b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); + rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | + RGMII_CTRL_TIMING_SEL); + + /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make + * sure that we enable the port TX clock internal delay to + * account for this internal delay that is inserted, otherwise + * the switch won't be able to receive correctly. + * + * PHY_INTERFACE_MODE_RGMII means that we are not introducing + * any delay neither on transmission nor reception, so the + * BCM53125 must also be configured accordingly to account for + * the lack of delay and introduce + * + * The BCM53125 switch has its RX clock and TX clock control + * swapped, hence the reason why we modify the TX clock path in + * the "RGMII" case + */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + rgmii_ctrl |= RGMII_CTRL_DLL_TXC; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; + rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; + b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); + + dev_info(ds->dev, "Configured port %d for %s\n", port, + phy_modes(phydev->interface)); + } + + /* configure MII port if necessary */ + if (is5325(dev)) { + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + ®); + + /* reverse mii needs to be enabled */ + if (!(reg & PORT_OVERRIDE_RV_MII_25)) { + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + reg | PORT_OVERRIDE_RV_MII_25); + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + ®); + + if (!(reg & PORT_OVERRIDE_RV_MII_25)) { + dev_err(ds->dev, + "Failed to enable reverse MII mode\n"); + return; + } + } + } else if (is5301x(dev)) { + if (port != dev->cpu_port) { + u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port); + u8 gmii_po; + + b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po); + gmii_po |= GMII_PO_LINK | + GMII_PO_RX_FLOW | + GMII_PO_TX_FLOW | + GMII_PO_EN | + GMII_PO_SPEED_2000M; + b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po); + } + } +} + +static int b53_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + return 0; +} + +static int b53_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct b53_device *dev = ds_to_priv(ds); + + if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) + return -EOPNOTSUPP; + + if (vlan->vid_end > dev->num_vlans) + return -ERANGE; + + b53_enable_vlan(dev, true); + + return 0; +} + +static void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct b53_device *dev = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + unsigned int cpu_port = dev->cpu_port; + struct b53_vlan *vl; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &dev->vlans[vid]; + + b53_get_vlan_entry(dev, vid, vl); + + vl->members |= BIT(port) | BIT(cpu_port); + if (untagged) + vl->untag |= BIT(port) | BIT(cpu_port); + else + vl->untag &= ~(BIT(port) | BIT(cpu_port)); + + b53_set_vlan_entry(dev, vid, vl); + b53_fast_age_vlan(dev, vid); + } + + if (pvid) { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), + vlan->vid_end); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), + vlan->vid_end); + b53_fast_age_vlan(dev, vid); + } +} + +static int b53_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct b53_device *dev = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + unsigned int cpu_port = dev->cpu_port; + struct b53_vlan *vl; + u16 vid; + u16 pvid; + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &dev->vlans[vid]; + + b53_get_vlan_entry(dev, vid, vl); + + vl->members &= ~BIT(port); + if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) + vl->members = 0; + + if (pvid == vid) { + if (is5325(dev) || is5365(dev)) + pvid = 1; + else + pvid = 0; + } + + if (untagged) { + vl->untag &= ~(BIT(port)); + if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) + vl->untag = 0; + } + + b53_set_vlan_entry(dev, vid, vl); + b53_fast_age_vlan(dev, vid); + } + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); + b53_fast_age_vlan(dev, pvid); + + return 0; +} + +static int b53_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct b53_device *dev = ds_to_priv(ds); + u16 vid, vid_start = 0, pvid; + struct b53_vlan *vl; + int err = 0; + + if (is5325(dev) || is5365(dev)) + vid_start = 1; + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + + /* Use our software cache for dumps, since we do not have any HW + * operation returning only the used/valid VLANs + */ + for (vid = vid_start; vid < dev->num_vlans; vid++) { + vl = &dev->vlans[vid]; + + if (!vl->valid) + continue; + + if (!(vl->members & BIT(port))) + continue; + + vlan->vid_begin = vlan->vid_end = vid; + vlan->flags = 0; + + if (vl->untag & BIT(port)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + if (pvid == vid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +/* Address Resolution Logic routines */ +static int b53_arl_op_wait(struct b53_device *dev) +{ + unsigned int timeout = 10; + u8 reg; + + do { + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); + if (!(reg & ARLTBL_START_DONE)) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); + + return -ETIMEDOUT; +} + +static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) +{ + u8 reg; + + if (op > ARLTBL_RW) + return -EINVAL; + + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); + reg |= ARLTBL_START_DONE; + if (op) + reg |= ARLTBL_RW; + else + reg &= ~ARLTBL_RW; + b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); + + return b53_arl_op_wait(dev); +} + +static int b53_arl_read(struct b53_device *dev, u64 mac, + u16 vid, struct b53_arl_entry *ent, u8 *idx, + bool is_valid) +{ + unsigned int i; + int ret; + + ret = b53_arl_op_wait(dev); + if (ret) + return ret; + + /* Read the bins */ + for (i = 0; i < dev->num_arl_entries; i++) { + u64 mac_vid; + u32 fwd_entry; + + b53_read64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); + + if (!(fwd_entry & ARLTBL_VALID)) + continue; + if ((mac_vid & ARLTBL_MAC_MASK) != mac) + continue; + *idx = i; + } + + return -ENOENT; +} + +static int b53_arl_op(struct b53_device *dev, int op, int port, + const unsigned char *addr, u16 vid, bool is_valid) +{ + struct b53_arl_entry ent; + u32 fwd_entry; + u64 mac, mac_vid = 0; + u8 idx = 0; + int ret; + + /* Convert the array into a 64-bit MAC */ + mac = b53_mac_to_u64(addr); + + /* Perform a read for the given MAC and VID */ + b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); + b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + + /* Issue a read operation for this MAC */ + ret = b53_arl_rw_op(dev, 1); + if (ret) + return ret; + + ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid); + /* If this is a read, just finish now */ + if (op) + return ret; + + /* We could not find a matching MAC, so reset to a new entry */ + if (ret) { + fwd_entry = 0; + idx = 1; + } + + memset(&ent, 0, sizeof(ent)); + ent.port = port; + ent.is_valid = is_valid; + ent.vid = vid; + ent.is_static = true; + memcpy(ent.mac, addr, ETH_ALEN); + b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); + + b53_write64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); + b53_write32(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); + + return b53_arl_rw_op(dev, 0); +} + +static int b53_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct b53_device *priv = ds_to_priv(ds); + + /* 5325 and 5365 require some more massaging, but could + * be supported eventually + */ + if (is5325(priv) || is5365(priv)) + return -EOPNOTSUPP; + + return 0; +} + +static void b53_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct b53_device *priv = ds_to_priv(ds); + + if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) + pr_err("%s: failed to add MAC address\n", __func__); +} + +static int b53_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct b53_device *priv = ds_to_priv(ds); + + return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); +} + +static int b53_arl_search_wait(struct b53_device *dev) +{ + unsigned int timeout = 1000; + u8 reg; + + do { + b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); + if (!(reg & ARL_SRCH_STDN)) + return 0; + + if (reg & ARL_SRCH_VLID) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +static void b53_arl_search_rd(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u64 mac_vid; + u32 fwd_entry; + + b53_read64(dev, B53_ARLIO_PAGE, + B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, + B53_ARL_SRCH_RSTL(idx), &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); +} + +static int b53_fdb_copy(struct net_device *dev, int port, + const struct b53_arl_entry *ent, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + if (!ent->is_valid) + return 0; + + if (port != ent->port) + return 0; + + ether_addr_copy(fdb->addr, ent->mac); + fdb->vid = ent->vid; + fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; + + return cb(&fdb->obj); +} + +static int b53_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct b53_device *priv = ds_to_priv(ds); + struct net_device *dev = ds->ports[port].netdev; + struct b53_arl_entry results[2]; + unsigned int count = 0; + int ret; + u8 reg; + + /* Start search operation */ + reg = ARL_SRCH_STDN; + b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); + + do { + ret = b53_arl_search_wait(priv); + if (ret) + return ret; + + b53_arl_search_rd(priv, 0, &results[0]); + ret = b53_fdb_copy(dev, port, &results[0], fdb, cb); + if (ret) + return ret; + + if (priv->num_arl_entries > 2) { + b53_arl_search_rd(priv, 1, &results[1]); + ret = b53_fdb_copy(dev, port, &results[1], fdb, cb); + if (ret) + return ret; + + if (!results[0].is_valid && !results[1].is_valid) + break; + } + + } while (count++ < 1024); + + return 0; +} + +static int b53_br_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct b53_device *dev = ds_to_priv(ds); + u16 pvlan, reg; + unsigned int i; + + dev->ports[port].bridge_dev = bridge; + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); + + b53_for_each_port(dev, i) { + if (dev->ports[i].bridge_dev != bridge) + continue; + + /* Add this local port to the remote port VLAN control + * membership and update the remote port bitmask + */ + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); + reg |= BIT(port); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); + dev->ports[i].vlan_ctl_mask = reg; + + pvlan |= BIT(i); + } + + /* Configure the local port VLAN control membership to include + * remote ports and update the local port bitmask + */ + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + dev->ports[port].vlan_ctl_mask = pvlan; + + return 0; +} + +static void b53_br_leave(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds_to_priv(ds); + struct net_device *bridge = dev->ports[port].bridge_dev; + struct b53_vlan *vl = &dev->vlans[0]; + unsigned int i; + u16 pvlan, reg, pvid; + + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); + + b53_for_each_port(dev, i) { + /* Don't touch the remaining ports */ + if (dev->ports[i].bridge_dev != bridge) + continue; + + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); + reg &= ~BIT(port); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); + dev->ports[port].vlan_ctl_mask = reg; + + /* Prevent self removal to preserve isolation */ + if (port != i) + pvlan &= ~BIT(i); + } + + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + dev->ports[port].vlan_ctl_mask = pvlan; + dev->ports[port].bridge_dev = NULL; + + if (is5325(dev) || is5365(dev)) + pvid = 1; + else + pvid = 0; + + b53_get_vlan_entry(dev, pvid, vl); + vl->members |= BIT(port) | BIT(dev->cpu_port); + vl->untag |= BIT(port) | BIT(dev->cpu_port); + b53_set_vlan_entry(dev, pvid, vl); +} + +static void b53_br_set_stp_state(struct dsa_switch *ds, int port, + u8 state) +{ + struct b53_device *dev = ds_to_priv(ds); + u8 hw_state, cur_hw_state; + u8 reg; + + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); + cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK; + + switch (state) { + case BR_STATE_DISABLED: + hw_state = PORT_CTRL_DIS_STATE; + break; + case BR_STATE_LISTENING: + hw_state = PORT_CTRL_LISTEN_STATE; + break; + case BR_STATE_LEARNING: + hw_state = PORT_CTRL_LEARN_STATE; + break; + case BR_STATE_FORWARDING: + hw_state = PORT_CTRL_FWD_STATE; + break; + case BR_STATE_BLOCKING: + hw_state = PORT_CTRL_BLOCK_STATE; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + /* Fast-age ARL entries if we are moving a port from Learning or + * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening + * state (hw_state) + */ + if (cur_hw_state != hw_state) { + if (cur_hw_state >= PORT_CTRL_LEARN_STATE && + hw_state <= PORT_CTRL_LISTEN_STATE) { + if (b53_fast_age_port(dev, port)) { + dev_err(ds->dev, "fast ageing failed\n"); + return; + } + } + } + + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); + reg &= ~PORT_CTRL_STP_STATE_MASK; + reg |= hw_state; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); +} + +static struct dsa_switch_driver b53_switch_ops = { + .tag_protocol = DSA_TAG_PROTO_NONE, + .setup = b53_setup, + .set_addr = b53_set_addr, + .get_strings = b53_get_strings, + .get_ethtool_stats = b53_get_ethtool_stats, + .get_sset_count = b53_get_sset_count, + .phy_read = b53_phy_read16, + .phy_write = b53_phy_write16, + .adjust_link = b53_adjust_link, + .port_enable = b53_enable_port, + .port_disable = b53_disable_port, + .port_bridge_join = b53_br_join, + .port_bridge_leave = b53_br_leave, + .port_stp_state_set = b53_br_set_stp_state, + .port_vlan_filtering = b53_vlan_filtering, + .port_vlan_prepare = b53_vlan_prepare, + .port_vlan_add = b53_vlan_add, + .port_vlan_del = b53_vlan_del, + .port_vlan_dump = b53_vlan_dump, + .port_fdb_prepare = b53_fdb_prepare, + .port_fdb_dump = b53_fdb_dump, + .port_fdb_add = b53_fdb_add, + .port_fdb_del = b53_fdb_del, +}; + +struct b53_chip_data { + u32 chip_id; + const char *dev_name; + u16 vlans; + u16 enabled_ports; + u8 cpu_port; + u8 vta_regs[3]; + u8 arl_entries; + u8 duplex_reg; + u8 jumbo_pm_reg; + u8 jumbo_size_reg; +}; + +#define B53_VTA_REGS \ + { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } +#define B53_VTA_REGS_9798 \ + { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } +#define B53_VTA_REGS_63XX \ + { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } + +static const struct b53_chip_data b53_switch_chips[] = { + { + .chip_id = BCM5325_DEVICE_ID, + .dev_name = "BCM5325", + .vlans = 16, + .enabled_ports = 0x1f, + .arl_entries = 2, + .cpu_port = B53_CPU_PORT_25, + .duplex_reg = B53_DUPLEX_STAT_FE, + }, + { + .chip_id = BCM5365_DEVICE_ID, + .dev_name = "BCM5365", + .vlans = 256, + .enabled_ports = 0x1f, + .arl_entries = 2, + .cpu_port = B53_CPU_PORT_25, + .duplex_reg = B53_DUPLEX_STAT_FE, + }, + { + .chip_id = BCM5395_DEVICE_ID, + .dev_name = "BCM5395", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM5397_DEVICE_ID, + .dev_name = "BCM5397", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_9798, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM5398_DEVICE_ID, + .dev_name = "BCM5398", + .vlans = 4096, + .enabled_ports = 0x7f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_9798, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53115_DEVICE_ID, + .dev_name = "BCM53115", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .vta_regs = B53_VTA_REGS, + .cpu_port = B53_CPU_PORT, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53125_DEVICE_ID, + .dev_name = "BCM53125", + .vlans = 4096, + .enabled_ports = 0xff, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53128_DEVICE_ID, + .dev_name = "BCM53128", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM63XX_DEVICE_ID, + .dev_name = "BCM63xx", + .vlans = 4096, + .enabled_ports = 0, /* pdata must provide them */ + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_63XX, + .duplex_reg = B53_DUPLEX_STAT_63XX, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, + }, + { + .chip_id = BCM53010_DEVICE_ID, + .dev_name = "BCM53010", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53011_DEVICE_ID, + .dev_name = "BCM53011", + .vlans = 4096, + .enabled_ports = 0x1bf, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53012_DEVICE_ID, + .dev_name = "BCM53012", + .vlans = 4096, + .enabled_ports = 0x1bf, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53018_DEVICE_ID, + .dev_name = "BCM53018", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53019_DEVICE_ID, + .dev_name = "BCM53019", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM58XX_DEVICE_ID, + .dev_name = "BCM585xx/586xx/88312", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, +}; + +static int b53_switch_init(struct b53_device *dev) +{ + struct dsa_switch *ds = dev->ds; + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { + const struct b53_chip_data *chip = &b53_switch_chips[i]; + + if (chip->chip_id == dev->chip_id) { + if (!dev->enabled_ports) + dev->enabled_ports = chip->enabled_ports; + dev->name = chip->dev_name; + dev->duplex_reg = chip->duplex_reg; + dev->vta_regs[0] = chip->vta_regs[0]; + dev->vta_regs[1] = chip->vta_regs[1]; + dev->vta_regs[2] = chip->vta_regs[2]; + dev->jumbo_pm_reg = chip->jumbo_pm_reg; + ds->drv = &b53_switch_ops; + dev->cpu_port = chip->cpu_port; + dev->num_vlans = chip->vlans; + dev->num_arl_entries = chip->arl_entries; + break; + } + } + + /* check which BCM5325x version we have */ + if (is5325(dev)) { + u8 vc4; + + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); + + /* check reserved bits */ + switch (vc4 & 3) { + case 1: + /* BCM5325E */ + break; + case 3: + /* BCM5325F - do not use port 4 */ + dev->enabled_ports &= ~BIT(4); + break; + default: +/* On the BCM47XX SoCs this is the supported internal switch.*/ +#ifndef CONFIG_BCM47XX + /* BCM5325M */ + return -EINVAL; +#else + break; +#endif + } + } else if (dev->chip_id == BCM53115_DEVICE_ID) { + u64 strap_value; + + b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); + /* use second IMP port if GMII is enabled */ + if (strap_value & SV_GMII_CTRL_115) + dev->cpu_port = 5; + } + + /* cpu port is always last */ + dev->num_ports = dev->cpu_port + 1; + dev->enabled_ports |= BIT(dev->cpu_port); + + dev->ports = devm_kzalloc(dev->dev, + sizeof(struct b53_port) * dev->num_ports, + GFP_KERNEL); + if (!dev->ports) + return -ENOMEM; + + dev->vlans = devm_kzalloc(dev->dev, + sizeof(struct b53_vlan) * dev->num_vlans, + GFP_KERNEL); + if (!dev->vlans) + return -ENOMEM; + + dev->reset_gpio = b53_switch_get_reset_gpio(dev); + if (dev->reset_gpio >= 0) { + ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, + GPIOF_OUT_INIT_HIGH, "robo_reset"); + if (ret) + return ret; + } + + return 0; +} + +struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, + void *priv) +{ + struct dsa_switch *ds; + struct b53_device *dev; + + ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL); + if (!ds) + return NULL; + + dev = (struct b53_device *)(ds + 1); + + ds->priv = dev; + ds->dev = base; + dev->dev = base; + + dev->ds = ds; + dev->priv = priv; + dev->ops = ops; + mutex_init(&dev->reg_mutex); + mutex_init(&dev->stats_mutex); + + return dev; +} +EXPORT_SYMBOL(b53_switch_alloc); + +int b53_switch_detect(struct b53_device *dev) +{ + u32 id32; + u16 tmp; + u8 id8; + int ret; + + ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); + if (ret) + return ret; + + switch (id8) { + case 0: + /* BCM5325 and BCM5365 do not have this register so reads + * return 0. But the read operation did succeed, so assume this + * is one of them. + * + * Next check if we can write to the 5325's VTA register; for + * 5365 it is read only. + */ + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); + + if (tmp == 0xf) + dev->chip_id = BCM5325_DEVICE_ID; + else + dev->chip_id = BCM5365_DEVICE_ID; + break; + case BCM5395_DEVICE_ID: + case BCM5397_DEVICE_ID: + case BCM5398_DEVICE_ID: + dev->chip_id = id8; + break; + default: + ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); + if (ret) + return ret; + + switch (id32) { + case BCM53115_DEVICE_ID: + case BCM53125_DEVICE_ID: + case BCM53128_DEVICE_ID: + case BCM53010_DEVICE_ID: + case BCM53011_DEVICE_ID: + case BCM53012_DEVICE_ID: + case BCM53018_DEVICE_ID: + case BCM53019_DEVICE_ID: + dev->chip_id = id32; + break; + default: + pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n", + id8, id32); + return -ENODEV; + } + } + + if (dev->chip_id == BCM5325_DEVICE_ID) + return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, + &dev->core_rev); + else + return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, + &dev->core_rev); +} +EXPORT_SYMBOL(b53_switch_detect); + +int b53_switch_register(struct b53_device *dev) +{ + int ret; + + if (dev->pdata) { + dev->chip_id = dev->pdata->chip_id; + dev->enabled_ports = dev->pdata->enabled_ports; + } + + if (!dev->chip_id && b53_switch_detect(dev)) + return -EINVAL; + + ret = b53_switch_init(dev); + if (ret) + return ret; + + pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); + + return dsa_register_switch(dev->ds, dev->ds->dev->of_node); +} +EXPORT_SYMBOL(b53_switch_register); + +MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); +MODULE_DESCRIPTION("B53 switch library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c new file mode 100644 index 000000000..aa87c3fff --- /dev/null +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -0,0 +1,392 @@ +/* + * B53 register access through MII registers + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/brcmphy.h> +#include <linux/rtnetlink.h> +#include <net/dsa.h> + +#include "b53_priv.h" + +/* MII registers */ +#define REG_MII_PAGE 0x10 /* MII Page register */ +#define REG_MII_ADDR 0x11 /* MII Address register */ +#define REG_MII_DATA0 0x18 /* MII Data register 0 */ +#define REG_MII_DATA1 0x19 /* MII Data register 1 */ +#define REG_MII_DATA2 0x1a /* MII Data register 2 */ +#define REG_MII_DATA3 0x1b /* MII Data register 3 */ + +#define REG_MII_PAGE_ENABLE BIT(0) +#define REG_MII_ADDR_WRITE BIT(0) +#define REG_MII_ADDR_READ BIT(1) + +static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op) +{ + int i; + u16 v; + int ret; + struct mii_bus *bus = dev->priv; + + if (dev->current_page != page) { + /* set page number */ + v = (page << 8) | REG_MII_PAGE_ENABLE; + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_PAGE, v); + if (ret) + return ret; + dev->current_page = page; + } + + /* set register address */ + v = (reg << 8) | op; + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v); + if (ret) + return ret; + + /* check if operation completed */ + for (i = 0; i < 5; ++i) { + v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_ADDR); + if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ))) + break; + usleep_range(10, 100); + } + + if (WARN_ON(i == 5)) + return -EIO; + + return 0; +} + +static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0) & 0xff; + + return 0; +} + +static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0); + + return 0; +} + +static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0); + *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA1) << 16; + + return 0; +} + +static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct mii_bus *bus = dev->priv; + u64 temp = 0; + int i; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + for (i = 2; i >= 0; i--) { + temp <<= 16; + temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i); + } + + *val = temp; + + return 0; +} + +static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct mii_bus *bus = dev->priv; + u64 temp = 0; + int i; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + for (i = 3; i >= 0; i--) { + temp <<= 16; + temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i); + } + + *val = temp; + + return 0; +} + +static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0, value); + if (ret) + return ret; + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0, value); + if (ret) + return ret; + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + struct mii_bus *bus = dev->priv; + unsigned int i; + u32 temp = value; + + for (i = 0; i < 2; i++) { + int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i, + temp & 0xffff); + if (ret) + return ret; + temp >>= 16; + } + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct mii_bus *bus = dev->priv; + unsigned int i; + u64 temp = value; + + for (i = 0; i < 3; i++) { + int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i, + temp & 0xffff); + if (ret) + return ret; + temp >>= 16; + } + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct mii_bus *bus = dev->priv; + unsigned int i; + u64 temp = value; + + for (i = 0; i < 4; i++) { + int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i, + temp & 0xffff); + if (ret) + return ret; + temp >>= 16; + } + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg, + u16 *value) +{ + struct mii_bus *bus = dev->priv; + + *value = mdiobus_read_nested(bus, addr, reg); + + return 0; +} + +static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg, + u16 value) +{ + struct mii_bus *bus = dev->bus; + + return mdiobus_write_nested(bus, addr, reg, value); +} + +static struct b53_io_ops b53_mdio_ops = { + .read8 = b53_mdio_read8, + .read16 = b53_mdio_read16, + .read32 = b53_mdio_read32, + .read48 = b53_mdio_read48, + .read64 = b53_mdio_read64, + .write8 = b53_mdio_write8, + .write16 = b53_mdio_write16, + .write32 = b53_mdio_write32, + .write48 = b53_mdio_write48, + .write64 = b53_mdio_write64, + .phy_read16 = b53_mdio_phy_read16, + .phy_write16 = b53_mdio_phy_write16, +}; + +#define B53_BRCM_OUI_1 0x0143bc00 +#define B53_BRCM_OUI_2 0x03625c00 +#define B53_BRCM_OUI_3 0x00406000 + +static int b53_mdio_probe(struct mdio_device *mdiodev) +{ + struct b53_device *dev; + u32 phy_id; + int ret; + + /* allow the generic PHY driver to take over the non-management MDIO + * addresses + */ + if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) { + dev_err(&mdiodev->dev, "leaving address %d to PHY\n", + mdiodev->addr); + return -ENODEV; + } + + /* read the first port's id */ + phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16; + phy_id |= mdiobus_read(mdiodev->bus, 0, 3); + + /* BCM5325, BCM539x (OUI_1) + * BCM53125, BCM53128 (OUI_2) + * BCM5365 (OUI_3) + */ + if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && + (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { + dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); + return -ENODEV; + } + + /* First probe will come from SWITCH_MDIO controller on the 7445D0 + * switch, which will conflict with the 7445 integrated switch + * pseudo-phy (we end-up programming both). In that case, we return + * -EPROBE_DEFER for the first time we get here, and wait until we come + * back with the slave MDIO bus which has the correct indirection + * layer setup + */ + if (of_machine_is_compatible("brcm,bcm7445d0") && + strcmp(mdiodev->bus->name, "sf2 slave mii")) + return -EPROBE_DEFER; + + dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus); + if (!dev) + return -ENOMEM; + + /* we don't use page 0xff, so force a page set */ + dev->current_page = 0xff; + dev->bus = mdiodev->bus; + + dev_set_drvdata(&mdiodev->dev, dev); + + ret = b53_switch_register(dev); + if (ret) { + dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret); + return ret; + } + + return ret; +} + +static void b53_mdio_remove(struct mdio_device *mdiodev) +{ + struct b53_device *dev = dev_get_drvdata(&mdiodev->dev); + struct dsa_switch *ds = dev->ds; + + dsa_unregister_switch(ds); +} + +static const struct of_device_id b53_of_match[] = { + { .compatible = "brcm,bcm5325" }, + { .compatible = "brcm,bcm53115" }, + { .compatible = "brcm,bcm53125" }, + { .compatible = "brcm,bcm53128" }, + { .compatible = "brcm,bcm5365" }, + { .compatible = "brcm,bcm5395" }, + { .compatible = "brcm,bcm5397" }, + { .compatible = "brcm,bcm5398" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, b53_of_match); + +static struct mdio_driver b53_mdio_driver = { + .probe = b53_mdio_probe, + .remove = b53_mdio_remove, + .mdiodrv.driver = { + .name = "bcm53xx", + .of_match_table = b53_of_match, + }, +}; + +static int __init b53_mdio_driver_register(void) +{ + return mdio_driver_register(&b53_mdio_driver); +} +module_init(b53_mdio_driver_register); + +static void __exit b53_mdio_driver_unregister(void) +{ + mdio_driver_unregister(&b53_mdio_driver); +} +module_exit(b53_mdio_driver_unregister); + +MODULE_DESCRIPTION("B53 MDIO access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c new file mode 100644 index 000000000..77ffc4312 --- /dev/null +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -0,0 +1,273 @@ +/* + * B53 register access through memory mapped registers + * + * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/kconfig.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/platform_data/b53.h> + +#include "b53_priv.h" + +struct b53_mmap_priv { + void __iomem *regs; +}; + +static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + u8 __iomem *regs = dev->priv; + + *val = readb(regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + *val = ioread16be(regs + (page << 8) + reg); + else + *val = readw(regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 4)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + *val = ioread32be(regs + (page << 8) + reg); + else + *val = readl(regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (reg % 4) { + u16 lo; + u32 hi; + + if (dev->pdata && dev->pdata->big_endian) { + lo = ioread16be(regs + (page << 8) + reg); + hi = ioread32be(regs + (page << 8) + reg + 2); + } else { + lo = readw(regs + (page << 8) + reg); + hi = readl(regs + (page << 8) + reg + 2); + } + + *val = ((u64)hi << 16) | lo; + } else { + u32 lo; + u16 hi; + + if (dev->pdata && dev->pdata->big_endian) { + lo = ioread32be(regs + (page << 8) + reg); + hi = ioread16be(regs + (page << 8) + reg + 4); + } else { + lo = readl(regs + (page << 8) + reg); + hi = readw(regs + (page << 8) + reg + 4); + } + + *val = ((u64)hi << 32) | lo; + } + + return 0; +} + +static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + u8 __iomem *regs = dev->priv; + u32 hi, lo; + + if (WARN_ON(reg % 4)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) { + lo = ioread32be(regs + (page << 8) + reg); + hi = ioread32be(regs + (page << 8) + reg + 4); + } else { + lo = readl(regs + (page << 8) + reg); + hi = readl(regs + (page << 8) + reg + 4); + } + + *val = ((u64)hi << 32) | lo; + + return 0; +} + +static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + u8 __iomem *regs = dev->priv; + + writeb(value, regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + iowrite16be(value, regs + (page << 8) + reg); + else + writew(value, regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 4)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + iowrite32be(value, regs + (page << 8) + reg); + else + writel(value, regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (reg % 4) { + u32 hi = (u32)(value >> 16); + u16 lo = (u16)value; + + b53_mmap_write16(dev, page, reg, lo); + b53_mmap_write32(dev, page, reg + 2, hi); + } else { + u16 hi = (u16)(value >> 32); + u32 lo = (u32)value; + + b53_mmap_write32(dev, page, reg, lo); + b53_mmap_write16(dev, page, reg + 4, hi); + } + + return 0; +} + +static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + u32 hi, lo; + + hi = upper_32_bits(value); + lo = lower_32_bits(value); + + if (WARN_ON(reg % 4)) + return -EINVAL; + + b53_mmap_write32(dev, page, reg, lo); + b53_mmap_write32(dev, page, reg + 4, hi); + + return 0; +} + +static struct b53_io_ops b53_mmap_ops = { + .read8 = b53_mmap_read8, + .read16 = b53_mmap_read16, + .read32 = b53_mmap_read32, + .read48 = b53_mmap_read48, + .read64 = b53_mmap_read64, + .write8 = b53_mmap_write8, + .write16 = b53_mmap_write16, + .write32 = b53_mmap_write32, + .write48 = b53_mmap_write48, + .write64 = b53_mmap_write64, +}; + +static int b53_mmap_probe(struct platform_device *pdev) +{ + struct b53_platform_data *pdata = pdev->dev.platform_data; + struct b53_device *dev; + + if (!pdata) + return -EINVAL; + + dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs); + if (!dev) + return -ENOMEM; + + dev->pdata = pdata; + + platform_set_drvdata(pdev, dev); + + return b53_switch_register(dev); +} + +static int b53_mmap_remove(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + + if (dev) + b53_switch_remove(dev); + + return 0; +} + +static const struct of_device_id b53_mmap_of_table[] = { + { .compatible = "brcm,bcm3384-switch" }, + { .compatible = "brcm,bcm6328-switch" }, + { .compatible = "brcm,bcm6368-switch" }, + { .compatible = "brcm,bcm63xx-switch" }, + { /* sentinel */ }, +}; + +static struct platform_driver b53_mmap_driver = { + .probe = b53_mmap_probe, + .remove = b53_mmap_remove, + .driver = { + .name = "b53-switch", + .of_match_table = b53_mmap_of_table, + }, +}; + +module_platform_driver(b53_mmap_driver); +MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); +MODULE_DESCRIPTION("B53 MMAP access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h new file mode 100644 index 000000000..835a744f2 --- /dev/null +++ b/drivers/net/dsa/b53/b53_priv.h @@ -0,0 +1,388 @@ +/* + * B53 common definitions + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_PRIV_H +#define __B53_PRIV_H + +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/phy.h> +#include <net/dsa.h> + +#include "b53_regs.h" + +struct b53_device; +struct net_device; + +struct b53_io_ops { + int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); + int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value); + int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value); + int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value); + int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value); + int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value); + int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value); + int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value); + int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value); + int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value); + int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value); + int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value); +}; + +enum { + BCM5325_DEVICE_ID = 0x25, + BCM5365_DEVICE_ID = 0x65, + BCM5395_DEVICE_ID = 0x95, + BCM5397_DEVICE_ID = 0x97, + BCM5398_DEVICE_ID = 0x98, + BCM53115_DEVICE_ID = 0x53115, + BCM53125_DEVICE_ID = 0x53125, + BCM53128_DEVICE_ID = 0x53128, + BCM63XX_DEVICE_ID = 0x6300, + BCM53010_DEVICE_ID = 0x53010, + BCM53011_DEVICE_ID = 0x53011, + BCM53012_DEVICE_ID = 0x53012, + BCM53018_DEVICE_ID = 0x53018, + BCM53019_DEVICE_ID = 0x53019, + BCM58XX_DEVICE_ID = 0x5800, +}; + +#define B53_N_PORTS 9 +#define B53_N_PORTS_25 6 + +struct b53_port { + u16 vlan_ctl_mask; + struct net_device *bridge_dev; +}; + +struct b53_vlan { + u16 members; + u16 untag; + bool valid; +}; + +struct b53_device { + struct dsa_switch *ds; + struct b53_platform_data *pdata; + const char *name; + + struct mutex reg_mutex; + struct mutex stats_mutex; + const struct b53_io_ops *ops; + + /* chip specific data */ + u32 chip_id; + u8 core_rev; + u8 vta_regs[3]; + u8 duplex_reg; + u8 jumbo_pm_reg; + u8 jumbo_size_reg; + int reset_gpio; + u8 num_arl_entries; + + /* used ports mask */ + u16 enabled_ports; + unsigned int cpu_port; + + /* connect specific data */ + u8 current_page; + struct device *dev; + + /* Master MDIO bus we got probed from */ + struct mii_bus *bus; + + void *priv; + + /* run time configuration */ + bool enable_jumbo; + + unsigned int num_vlans; + struct b53_vlan *vlans; + unsigned int num_ports; + struct b53_port *ports; +}; + +#define b53_for_each_port(dev, i) \ + for (i = 0; i < B53_N_PORTS; i++) \ + if (dev->enabled_ports & BIT(i)) + + +static inline int is5325(struct b53_device *dev) +{ + return dev->chip_id == BCM5325_DEVICE_ID; +} + +static inline int is5365(struct b53_device *dev) +{ +#ifdef CONFIG_BCM47XX + return dev->chip_id == BCM5365_DEVICE_ID; +#else + return 0; +#endif +} + +static inline int is5397_98(struct b53_device *dev) +{ + return dev->chip_id == BCM5397_DEVICE_ID || + dev->chip_id == BCM5398_DEVICE_ID; +} + +static inline int is539x(struct b53_device *dev) +{ + return dev->chip_id == BCM5395_DEVICE_ID || + dev->chip_id == BCM5397_DEVICE_ID || + dev->chip_id == BCM5398_DEVICE_ID; +} + +static inline int is531x5(struct b53_device *dev) +{ + return dev->chip_id == BCM53115_DEVICE_ID || + dev->chip_id == BCM53125_DEVICE_ID || + dev->chip_id == BCM53128_DEVICE_ID; +} + +static inline int is63xx(struct b53_device *dev) +{ +#ifdef CONFIG_BCM63XX + return dev->chip_id == BCM63XX_DEVICE_ID; +#else + return 0; +#endif +} + +static inline int is5301x(struct b53_device *dev) +{ + return dev->chip_id == BCM53010_DEVICE_ID || + dev->chip_id == BCM53011_DEVICE_ID || + dev->chip_id == BCM53012_DEVICE_ID || + dev->chip_id == BCM53018_DEVICE_ID || + dev->chip_id == BCM53019_DEVICE_ID; +} + +#define B53_CPU_PORT_25 5 +#define B53_CPU_PORT 8 + +static inline int is_cpu_port(struct b53_device *dev, int port) +{ + return dev->cpu_port; +} + +struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, + void *priv); + +int b53_switch_detect(struct b53_device *dev); + +int b53_switch_register(struct b53_device *dev); + +static inline void b53_switch_remove(struct b53_device *dev) +{ + dsa_unregister_switch(dev->ds); +} + +static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read8(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read16(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read32(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read48(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read64(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write8(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write16(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write32(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write48(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write64(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +struct b53_arl_entry { + u8 port; + u8 mac[ETH_ALEN]; + u16 vid; + u8 is_valid:1; + u8 is_age:1; + u8 is_static:1; +}; + +static inline void b53_mac_from_u64(u64 src, u8 *dst) +{ + unsigned int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; +} + +static inline u64 b53_mac_to_u64(const u8 *src) +{ + unsigned int i; + u64 dst = 0; + + for (i = 0; i < ETH_ALEN; i++) + dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); + + return dst; +} + +static inline void b53_arl_to_entry(struct b53_arl_entry *ent, + u64 mac_vid, u32 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK; + ent->is_valid = !!(fwd_entry & ARLTBL_VALID); + ent->is_age = !!(fwd_entry & ARLTBL_AGE); + ent->is_static = !!(fwd_entry & ARLTBL_STATIC); + b53_mac_from_u64(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S; +} + +static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, + const struct b53_arl_entry *ent) +{ + *mac_vid = b53_mac_to_u64(ent->mac); + *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; + *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK; + if (ent->is_valid) + *fwd_entry |= ARLTBL_VALID; + if (ent->is_static) + *fwd_entry |= ARLTBL_STATIC; + if (ent->is_age) + *fwd_entry |= ARLTBL_AGE; +} + +#ifdef CONFIG_BCM47XX + +#include <linux/version.h> +#include <linux/bcm47xx_nvram.h> +#include <bcm47xx_board.h> +static inline int b53_switch_get_reset_gpio(struct b53_device *dev) +{ + enum bcm47xx_board board = bcm47xx_board_get(); + + switch (board) { + case BCM47XX_BOARD_LINKSYS_WRT300NV11: + case BCM47XX_BOARD_LINKSYS_WRT310NV1: + return 8; + default: + return bcm47xx_nvram_gpio_pin("robo_reset"); + } +} +#else +static inline int b53_switch_get_reset_gpio(struct b53_device *dev) +{ + return -ENOENT; +} +#endif +#endif diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h new file mode 100644 index 000000000..a0b453ea3 --- /dev/null +++ b/drivers/net/dsa/b53/b53_regs.h @@ -0,0 +1,434 @@ +/* + * B53 register definitions + * + * Copyright (C) 2004 Broadcom Corporation + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_REGS_H +#define __B53_REGS_H + +/* Management Port (SMP) Page offsets */ +#define B53_CTRL_PAGE 0x00 /* Control */ +#define B53_STAT_PAGE 0x01 /* Status */ +#define B53_MGMT_PAGE 0x02 /* Management Mode */ +#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */ +#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */ +#define B53_ARLIO_PAGE 0x05 /* ARL Access */ +#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */ +#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */ + +/* PHY Registers */ +#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */ +#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */ +#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */ + +/* MIB registers */ +#define B53_MIB_PAGE(i) (0x20 + (i)) + +/* Quality of Service (QoS) Registers */ +#define B53_QOS_PAGE 0x30 + +/* Port VLAN Page */ +#define B53_PVLAN_PAGE 0x31 + +/* VLAN Registers */ +#define B53_VLAN_PAGE 0x34 + +/* Jumbo Frame Registers */ +#define B53_JUMBO_PAGE 0x40 + +/* CFP Configuration Registers Page */ +#define B53_CFP_PAGE 0xa1 + +/************************************************************************* + * Control Page registers + *************************************************************************/ + +/* Port Control Register (8 bit) */ +#define B53_PORT_CTRL(i) (0x00 + (i)) +#define PORT_CTRL_RX_DISABLE BIT(0) +#define PORT_CTRL_TX_DISABLE BIT(1) +#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */ +#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */ +#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */ +#define PORT_CTRL_STP_STATE_S 5 +#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S) + +/* SMP Control Register (8 bit) */ +#define B53_SMP_CTRL 0x0a + +/* Switch Mode Control Register (8 bit) */ +#define B53_SWITCH_MODE 0x0b +#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */ +#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */ + +/* IMP Port state override register (8 bit) */ +#define B53_PORT_OVERRIDE_CTRL 0x0e +#define PORT_OVERRIDE_LINK BIT(0) +#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */ +#define PORT_OVERRIDE_SPEED_S 2 +#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */ +#define PORT_OVERRIDE_RX_FLOW BIT(4) +#define PORT_OVERRIDE_TX_FLOW BIT(5) +#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */ +#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */ + +/* Power-down mode control */ +#define B53_PD_MODE_CTRL_25 0x0f + +/* IP Multicast control (8 bit) */ +#define B53_IP_MULTICAST_CTRL 0x21 +#define B53_IPMC_FWD_EN BIT(1) +#define B53_UC_FWD_EN BIT(6) +#define B53_MC_FWD_EN BIT(7) + +/* (16 bit) */ +#define B53_UC_FLOOD_MASK 0x32 +#define B53_MC_FLOOD_MASK 0x34 +#define B53_IPMC_FLOOD_MASK 0x36 + +/* + * Override Ports 0-7 State on devices with xMII interfaces (8 bit) + * + * For port 8 still use B53_PORT_OVERRIDE_CTRL + * Please note that not all ports are available on every hardware, e.g. BCM5301X + * don't include overriding port 6, BCM63xx also have some limitations. + */ +#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i)) +#define GMII_PO_LINK BIT(0) +#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */ +#define GMII_PO_SPEED_S 2 +#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S) +#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S) +#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S) +#define GMII_PO_RX_FLOW BIT(4) +#define GMII_PO_TX_FLOW BIT(5) +#define GMII_PO_EN BIT(6) /* Use the register contents */ +#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */ + +#define B53_RGMII_CTRL_IMP 0x60 +#define RGMII_CTRL_ENABLE_GMII BIT(7) +#define RGMII_CTRL_TIMING_SEL BIT(2) +#define RGMII_CTRL_DLL_RXC BIT(1) +#define RGMII_CTRL_DLL_TXC BIT(0) + +#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i)) + +/* Software reset register (8 bit) */ +#define B53_SOFTRESET 0x79 +#define SW_RST BIT(7) +#define EN_SW_RST BIT(4) + +/* Fast Aging Control register (8 bit) */ +#define B53_FAST_AGE_CTRL 0x88 +#define FAST_AGE_STATIC BIT(0) +#define FAST_AGE_DYNAMIC BIT(1) +#define FAST_AGE_PORT BIT(2) +#define FAST_AGE_VLAN BIT(3) +#define FAST_AGE_STP BIT(4) +#define FAST_AGE_MC BIT(5) +#define FAST_AGE_DONE BIT(7) + +/* Fast Aging Port Control register (8 bit) */ +#define B53_FAST_AGE_PORT_CTRL 0x89 + +/* Fast Aging VID Control register (16 bit) */ +#define B53_FAST_AGE_VID_CTRL 0x8a + +/************************************************************************* + * Status Page registers + *************************************************************************/ + +/* Link Status Summary Register (16bit) */ +#define B53_LINK_STAT 0x00 + +/* Link Status Change Register (16 bit) */ +#define B53_LINK_STAT_CHANGE 0x02 + +/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */ +#define B53_SPEED_STAT 0x04 +#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1) +#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3) +#define SPEED_STAT_10M 0 +#define SPEED_STAT_100M 1 +#define SPEED_STAT_1000M 2 + +/* Duplex Status Summary (16 bit) */ +#define B53_DUPLEX_STAT_FE 0x06 +#define B53_DUPLEX_STAT_GE 0x08 +#define B53_DUPLEX_STAT_63XX 0x0c + +/* Revision ID register for BCM5325 */ +#define B53_REV_ID_25 0x50 + +/* Strap Value (48 bit) */ +#define B53_STRAP_VALUE 0x70 +#define SV_GMII_CTRL_115 BIT(27) + +/************************************************************************* + * Management Mode Page Registers + *************************************************************************/ + +/* Global Management Config Register (8 bit) */ +#define B53_GLOBAL_CONFIG 0x00 +#define GC_RESET_MIB 0x01 +#define GC_RX_BPDU_EN 0x02 +#define GC_MIB_AC_HDR_EN 0x10 +#define GC_MIB_AC_EN 0x20 +#define GC_FRM_MGMT_PORT_M 0xC0 +#define GC_FRM_MGMT_PORT_04 0x00 +#define GC_FRM_MGMT_PORT_MII 0x80 + +/* Broadcom Header control register (8 bit) */ +#define B53_BRCM_HDR 0x03 +#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */ +#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */ + +/* Device ID register (8 or 32 bit) */ +#define B53_DEVICE_ID 0x30 + +/* Revision ID register (8 bit) */ +#define B53_REV_ID 0x40 + +/************************************************************************* + * ARL Access Page Registers + *************************************************************************/ + +/* VLAN Table Access Register (8 bit) */ +#define B53_VT_ACCESS 0x80 +#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */ +#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */ +#define VTA_CMD_WRITE 0 +#define VTA_CMD_READ 1 +#define VTA_CMD_CLEAR 2 +#define VTA_START_CMD BIT(7) + +/* VLAN Table Index Register (16 bit) */ +#define B53_VT_INDEX 0x81 +#define B53_VT_INDEX_9798 0x61 +#define B53_VT_INDEX_63XX 0x62 + +/* VLAN Table Entry Register (32 bit) */ +#define B53_VT_ENTRY 0x83 +#define B53_VT_ENTRY_9798 0x63 +#define B53_VT_ENTRY_63XX 0x64 +#define VTE_MEMBERS 0x1ff +#define VTE_UNTAG_S 9 +#define VTE_UNTAG (0x1ff << 9) + +/************************************************************************* + * ARL I/O Registers + *************************************************************************/ + +/* ARL Table Read/Write Register (8 bit) */ +#define B53_ARLTBL_RW_CTRL 0x00 +#define ARLTBL_RW BIT(0) +#define ARLTBL_START_DONE BIT(7) + +/* MAC Address Index Register (48 bit) */ +#define B53_MAC_ADDR_IDX 0x02 + +/* VLAN ID Index Register (16 bit) */ +#define B53_VLAN_ID_IDX 0x08 + +/* ARL Table MAC/VID Entry N Registers (64 bit) + * + * BCM5325 and BCM5365 share most definitions below + */ +#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) +#define ARLTBL_MAC_MASK 0xffffffffffffULL +#define ARLTBL_VID_S 48 +#define ARLTBL_VID_MASK_25 0xff +#define ARLTBL_VID_MASK 0xfff +#define ARLTBL_DATA_PORT_ID_S_25 48 +#define ARLTBL_DATA_PORT_ID_MASK_25 0xf +#define ARLTBL_AGE_25 BIT(61) +#define ARLTBL_STATIC_25 BIT(62) +#define ARLTBL_VALID_25 BIT(63) + +/* ARL Table Data Entry N Registers (32 bit) */ +#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08) +#define ARLTBL_DATA_PORT_ID_MASK 0x1ff +#define ARLTBL_TC(tc) ((3 & tc) << 11) +#define ARLTBL_AGE BIT(14) +#define ARLTBL_STATIC BIT(15) +#define ARLTBL_VALID BIT(16) + +/* ARL Search Control Register (8 bit) */ +#define B53_ARL_SRCH_CTL 0x50 +#define B53_ARL_SRCH_CTL_25 0x20 +#define ARL_SRCH_VLID BIT(0) +#define ARL_SRCH_STDN BIT(7) + +/* ARL Search Address Register (16 bit) */ +#define B53_ARL_SRCH_ADDR 0x51 +#define B53_ARL_SRCH_ADDR_25 0x22 +#define B53_ARL_SRCH_ADDR_65 0x24 +#define ARL_ADDR_MASK GENMASK(14, 0) + +/* ARL Search MAC/VID Result (64 bit) */ +#define B53_ARL_SRCH_RSTL_0_MACVID 0x60 + +/* Single register search result on 5325 */ +#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24 +/* Single register search result on 5365 */ +#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30 + +/* ARL Search Data Result (32 bit) */ +#define B53_ARL_SRCH_RSTL_0 0x68 + +#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10)) +#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10)) + +/************************************************************************* + * Port VLAN Registers + *************************************************************************/ + +/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */ +#define B53_PVLAN_PORT_MASK(i) ((i) * 2) + +/************************************************************************* + * 802.1Q Page Registers + *************************************************************************/ + +/* Global QoS Control (8 bit) */ +#define B53_QOS_GLOBAL_CTL 0x00 + +/* Enable 802.1Q for individual Ports (16 bit) */ +#define B53_802_1P_EN 0x04 + +/************************************************************************* + * VLAN Page Registers + *************************************************************************/ + +/* VLAN Control 0 (8 bit) */ +#define B53_VLAN_CTRL0 0x00 +#define VC0_8021PF_CTRL_MASK 0x3 +#define VC0_8021PF_CTRL_NONE 0x0 +#define VC0_8021PF_CTRL_CHANGE_PRI 0x1 +#define VC0_8021PF_CTRL_CHANGE_VID 0x2 +#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3 +#define VC0_8021QF_CTRL_MASK 0xc +#define VC0_8021QF_CTRL_CHANGE_PRI 0x1 +#define VC0_8021QF_CTRL_CHANGE_VID 0x2 +#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3 +#define VC0_RESERVED_1 BIT(1) +#define VC0_DROP_VID_MISS BIT(4) +#define VC0_VID_HASH_VID BIT(5) +#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */ +#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */ + +/* VLAN Control 1 (8 bit) */ +#define B53_VLAN_CTRL1 0x01 +#define VC1_RX_MCST_TAG_EN BIT(1) +#define VC1_RX_MCST_FWD_EN BIT(2) +#define VC1_RX_MCST_UNTAG_EN BIT(3) + +/* VLAN Control 2 (8 bit) */ +#define B53_VLAN_CTRL2 0x02 + +/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */ +#define B53_VLAN_CTRL3 0x03 +#define B53_VLAN_CTRL3_63XX 0x04 +#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */ +#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */ + +/* VLAN Control 4 (8 bit) */ +#define B53_VLAN_CTRL4 0x05 +#define B53_VLAN_CTRL4_25 0x04 +#define B53_VLAN_CTRL4_63XX 0x06 +#define VC4_ING_VID_CHECK_S 6 +#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S) +#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */ +#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */ +#define VC4_NO_ING_VID_CHK 2 /* do not check */ +#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */ + +/* VLAN Control 5 (8 bit) */ +#define B53_VLAN_CTRL5 0x06 +#define B53_VLAN_CTRL5_25 0x05 +#define B53_VLAN_CTRL5_63XX 0x07 +#define VC5_VID_FFF_EN BIT(2) +#define VC5_DROP_VTABLE_MISS BIT(3) + +/* VLAN Control 6 (8 bit) */ +#define B53_VLAN_CTRL6 0x07 +#define B53_VLAN_CTRL6_63XX 0x08 + +/* VLAN Table Access Register (16 bit) */ +#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */ +#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */ +#define VTA_VID_LOW_MASK_25 0xf +#define VTA_VID_LOW_MASK_65 0xff +#define VTA_VID_HIGH_S_25 4 +#define VTA_VID_HIGH_S_65 8 +#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E) +#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65) +#define VTA_RW_STATE BIT(12) +#define VTA_RW_STATE_RD 0 +#define VTA_RW_STATE_WR BIT(12) +#define VTA_RW_OP_EN BIT(13) + +/* VLAN Read/Write Registers for (16/32 bit) */ +#define B53_VLAN_WRITE_25 0x08 +#define B53_VLAN_WRITE_65 0x0a +#define B53_VLAN_READ 0x0c +#define VA_MEMBER_MASK 0x3f +#define VA_UNTAG_S_25 6 +#define VA_UNTAG_MASK_25 0x3f +#define VA_UNTAG_S_65 7 +#define VA_UNTAG_MASK_65 0x1f +#define VA_VID_HIGH_S 12 +#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S) +#define VA_VALID_25 BIT(20) +#define VA_VALID_25_R4 BIT(24) +#define VA_VALID_65 BIT(14) + +/* VLAN Port Default Tag (16 bit) */ +#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i)) + +/************************************************************************* + * Jumbo Frame Page Registers + *************************************************************************/ + +/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */ +#define B53_JUMBO_PORT_MASK 0x01 +#define B53_JUMBO_PORT_MASK_63XX 0x04 +#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */ + +/* Good Frame Max Size without 802.1Q TAG (16 bit) */ +#define B53_JUMBO_MAX_SIZE 0x05 +#define B53_JUMBO_MAX_SIZE_63XX 0x08 +#define JMS_MIN_SIZE 1518 +#define JMS_MAX_SIZE 9724 + +/************************************************************************* + * CFP Configuration Page Registers + *************************************************************************/ + +/* CFP Control Register with ports map (8 bit) */ +#define B53_CFP_CTRL 0x00 + +#endif /* !__B53_REGS_H */ diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c new file mode 100644 index 000000000..2bda0b5f1 --- /dev/null +++ b/drivers/net/dsa/b53/b53_spi.c @@ -0,0 +1,331 @@ +/* + * B53 register access through SPI + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <asm/unaligned.h> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/platform_data/b53.h> + +#include "b53_priv.h" + +#define B53_SPI_DATA 0xf0 + +#define B53_SPI_STATUS 0xfe +#define B53_SPI_CMD_SPIF BIT(7) +#define B53_SPI_CMD_RACK BIT(5) + +#define B53_SPI_CMD_READ 0x00 +#define B53_SPI_CMD_WRITE 0x01 +#define B53_SPI_CMD_NORMAL 0x60 +#define B53_SPI_CMD_FAST 0x10 + +#define B53_SPI_PAGE_SELECT 0xff + +static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val, + unsigned int len) +{ + u8 txbuf[2]; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ; + txbuf[1] = reg; + + return spi_write_then_read(spi, txbuf, 2, val, len); +} + +static inline int b53_spi_clear_status(struct spi_device *spi) +{ + unsigned int i; + u8 rxbuf; + int ret; + + for (i = 0; i < 10; i++) { + ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1); + if (ret) + return ret; + + if (!(rxbuf & B53_SPI_CMD_SPIF)) + break; + + mdelay(1); + } + + if (i == 10) + return -EIO; + + return 0; +} + +static inline int b53_spi_set_page(struct spi_device *spi, u8 page) +{ + u8 txbuf[3]; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = B53_SPI_PAGE_SELECT; + txbuf[2] = page; + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page) +{ + int ret = b53_spi_clear_status(spi); + + if (ret) + return ret; + + return b53_spi_set_page(spi, page); +} + +static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg) +{ + u8 rxbuf; + int retry_count; + int ret; + + ret = b53_spi_read_reg(spi, reg, &rxbuf, 1); + if (ret) + return ret; + + for (retry_count = 0; retry_count < 10; retry_count++) { + ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1); + if (ret) + return ret; + + if (rxbuf & B53_SPI_CMD_RACK) + break; + + mdelay(1); + } + + if (retry_count == 10) + return -EIO; + + return 0; +} + +static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data, + unsigned int len) +{ + struct spi_device *spi = dev->priv; + int ret; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + ret = b53_spi_prepare_reg_read(spi, reg); + if (ret) + return ret; + + return b53_spi_read_reg(spi, B53_SPI_DATA, data, len); +} + +static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + return b53_spi_read(dev, page, reg, val, 1); +} + +static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2); + + if (!ret) + *val = le16_to_cpu(*val); + + return ret; +} + +static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4); + + if (!ret) + *val = le32_to_cpu(*val); + + return ret; +} + +static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret; + + *val = 0; + ret = b53_spi_read(dev, page, reg, (u8 *)val, 6); + if (!ret) + *val = le64_to_cpu(*val); + + return ret; +} + +static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8); + + if (!ret) + *val = le64_to_cpu(*val); + + return ret; +} + +static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[3]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + txbuf[2] = value; + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[4]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le16(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[6]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le32(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[10]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le64(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf) - 2); +} + +static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[10]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le64(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static struct b53_io_ops b53_spi_ops = { + .read8 = b53_spi_read8, + .read16 = b53_spi_read16, + .read32 = b53_spi_read32, + .read48 = b53_spi_read48, + .read64 = b53_spi_read64, + .write8 = b53_spi_write8, + .write16 = b53_spi_write16, + .write32 = b53_spi_write32, + .write48 = b53_spi_write48, + .write64 = b53_spi_write64, +}; + +static int b53_spi_probe(struct spi_device *spi) +{ + struct b53_device *dev; + int ret; + + dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi); + if (!dev) + return -ENOMEM; + + if (spi->dev.platform_data) + dev->pdata = spi->dev.platform_data; + + ret = b53_switch_register(dev); + if (ret) + return ret; + + spi_set_drvdata(spi, dev); + + return 0; +} + +static int b53_spi_remove(struct spi_device *spi) +{ + struct b53_device *dev = spi_get_drvdata(spi); + + if (dev) + b53_switch_remove(dev); + + return 0; +} + +static struct spi_driver b53_spi_driver = { + .driver = { + .name = "b53-switch", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = b53_spi_probe, + .remove = b53_spi_remove, +}; + +module_spi_driver(b53_spi_driver); + +MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); +MODULE_DESCRIPTION("B53 SPI access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c new file mode 100644 index 000000000..3e2d4a5fc --- /dev/null +++ b/drivers/net/dsa/b53/b53_srab.c @@ -0,0 +1,442 @@ +/* + * B53 register access through Switch Register Access Bridge Registers + * + * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/platform_data/b53.h> +#include <linux/of.h> + +#include "b53_priv.h" + +/* command and status register of the SRAB */ +#define B53_SRAB_CMDSTAT 0x2c +#define B53_SRAB_CMDSTAT_RST BIT(2) +#define B53_SRAB_CMDSTAT_WRITE BIT(1) +#define B53_SRAB_CMDSTAT_GORDYN BIT(0) +#define B53_SRAB_CMDSTAT_PAGE 24 +#define B53_SRAB_CMDSTAT_REG 16 + +/* high order word of write data to switch registe */ +#define B53_SRAB_WD_H 0x30 + +/* low order word of write data to switch registe */ +#define B53_SRAB_WD_L 0x34 + +/* high order word of read data from switch register */ +#define B53_SRAB_RD_H 0x38 + +/* low order word of read data from switch register */ +#define B53_SRAB_RD_L 0x3c + +/* command and status register of the SRAB */ +#define B53_SRAB_CTRLS 0x40 +#define B53_SRAB_CTRLS_RCAREQ BIT(3) +#define B53_SRAB_CTRLS_RCAGNT BIT(4) +#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6) + +/* the register captures interrupt pulses from the switch */ +#define B53_SRAB_INTR 0x44 +#define B53_SRAB_INTR_P(x) BIT(x) +#define B53_SRAB_SWITCH_PHY BIT(8) +#define B53_SRAB_1588_SYNC BIT(9) +#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10) +#define B53_SRAB_P7_SLEEP_TIMER BIT(11) +#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12) + +struct b53_srab_priv { + void __iomem *regs; +}; + +static int b53_srab_request_grant(struct b53_device *dev) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + u32 ctrls; + int i; + + ctrls = readl(regs + B53_SRAB_CTRLS); + ctrls |= B53_SRAB_CTRLS_RCAREQ; + writel(ctrls, regs + B53_SRAB_CTRLS); + + for (i = 0; i < 20; i++) { + ctrls = readl(regs + B53_SRAB_CTRLS); + if (ctrls & B53_SRAB_CTRLS_RCAGNT) + break; + usleep_range(10, 100); + } + if (WARN_ON(i == 5)) + return -EIO; + + return 0; +} + +static void b53_srab_release_grant(struct b53_device *dev) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + u32 ctrls; + + ctrls = readl(regs + B53_SRAB_CTRLS); + ctrls &= ~B53_SRAB_CTRLS_RCAREQ; + writel(ctrls, regs + B53_SRAB_CTRLS); +} + +static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int i; + u32 cmdstat; + + /* set register address */ + cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) | + (reg << B53_SRAB_CMDSTAT_REG) | + B53_SRAB_CMDSTAT_GORDYN | + op; + writel(cmdstat, regs + B53_SRAB_CMDSTAT); + + /* check if operation completed */ + for (i = 0; i < 5; ++i) { + cmdstat = readl(regs + B53_SRAB_CMDSTAT); + if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN)) + break; + usleep_range(10, 100); + } + + if (WARN_ON(i == 5)) + return -EIO; + + return 0; +} + +static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L) & 0xff; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L) & 0xffff; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L); + *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L); + *val += (u64)readl(regs + B53_SRAB_RD_H) << 32; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel(value, regs + B53_SRAB_WD_L); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel(value, regs + B53_SRAB_WD_L); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel(value, regs + B53_SRAB_WD_L); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel((u32)value, regs + B53_SRAB_WD_L); + writel((u16)(value >> 32), regs + B53_SRAB_WD_H); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel((u32)value, regs + B53_SRAB_WD_L); + writel((u32)(value >> 32), regs + B53_SRAB_WD_H); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static struct b53_io_ops b53_srab_ops = { + .read8 = b53_srab_read8, + .read16 = b53_srab_read16, + .read32 = b53_srab_read32, + .read48 = b53_srab_read48, + .read64 = b53_srab_read64, + .write8 = b53_srab_write8, + .write16 = b53_srab_write16, + .write32 = b53_srab_write32, + .write48 = b53_srab_write48, + .write64 = b53_srab_write64, +}; + +static const struct of_device_id b53_srab_of_match[] = { + { .compatible = "brcm,bcm53010-srab" }, + { .compatible = "brcm,bcm53011-srab" }, + { .compatible = "brcm,bcm53012-srab" }, + { .compatible = "brcm,bcm53018-srab" }, + { .compatible = "brcm,bcm53019-srab" }, + { .compatible = "brcm,bcm5301x-srab" }, + { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58622-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, b53_srab_of_match); + +static int b53_srab_probe(struct platform_device *pdev) +{ + struct b53_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + struct b53_srab_priv *priv; + struct b53_device *dev; + struct resource *r; + + if (dn) + of_id = of_match_node(b53_srab_of_match, dn); + + if (of_id) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->chip_id = (u32)(unsigned long)of_id->data; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->regs)) + return -ENOMEM; + + dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv); + if (!dev) + return -ENOMEM; + + if (pdata) + dev->pdata = pdata; + + platform_set_drvdata(pdev, dev); + + return b53_switch_register(dev); +} + +static int b53_srab_remove(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + + if (dev) + b53_switch_remove(dev); + + return 0; +} + +static struct platform_driver b53_srab_driver = { + .probe = b53_srab_probe, + .remove = b53_srab_remove, + .driver = { + .name = "b53-srab-switch", + .of_match_table = b53_srab_of_match, + }, +}; + +module_platform_driver(b53_srab_driver); +MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); +MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 10ddd5a5d..b2b838724 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -22,6 +22,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_net.h> +#include <linux/of_mdio.h> #include <net/dsa.h> #include <linux/ethtool.h> #include <linux/if_bridge.h> @@ -460,19 +461,13 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, return 0; } -/* Fast-ageing of ARL entries for a given port, equivalent to an ARL - * flush for that port. - */ -static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) +static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int timeout = 1000; u32 reg; - core_writel(priv, port, CORE_FAST_AGE_PORT); - reg = core_readl(priv, CORE_FAST_AGE_CTRL); - reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; + reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { @@ -491,13 +486,98 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) return 0; } +/* Fast-ageing of ARL entries for a given port, equivalent to an ARL + * flush for that port. + */ +static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + core_writel(priv, port, CORE_FAST_AGE_PORT); + + return bcm_sf2_fast_age_op(priv); +} + +static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid) +{ + core_writel(priv, vid, CORE_FAST_AGE_VID); + + return bcm_sf2_fast_age_op(priv); +} + +static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 10; + u32 reg; + + do { + reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); + if (!(reg & ARLA_VTBL_STDN)) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) +{ + core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); + + return bcm_sf2_vlan_op_wait(priv); +} + +static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, + struct bcm_sf2_vlan *vlan) +{ + int ret; + + core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); + core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members, + CORE_ARLA_VTBL_ENTRY); + + ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE); + if (ret) + pr_err("failed to write VLAN entry\n"); +} + +static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, + struct bcm_sf2_vlan *vlan) +{ + u32 entry; + int ret; + + core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); + + ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ); + if (ret) + return ret; + + entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY); + vlan->members = entry & FWD_MAP_MASK; + vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK; + + return 0; +} + static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct bcm_sf2_priv *priv = ds_to_priv(ds); + s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; + /* Make this port leave the all VLANs join since we will have proper + * VLAN entries from now on + */ + reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); + reg &= ~BIT(port); + if ((reg & BIT(cpu_port)) == BIT(cpu_port)) + reg &= ~BIT(cpu_port); + core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); + priv->port_sts[port].bridge_dev = bridge; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); @@ -529,6 +609,7 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct net_device *bridge = priv->port_sts[port].bridge_dev; + s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; @@ -552,6 +633,13 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; priv->port_sts[port].bridge_dev = NULL; + + /* Make this port join all VLANs without VLAN entries */ + reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); + reg |= BIT(port); + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); } static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, @@ -804,7 +892,7 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, int (*cb)(struct switchdev_obj *obj)) { struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct net_device *dev = ds->ports[port]; + struct net_device *dev = ds->ports[port].netdev; struct bcm_sf2_arl_entry results[2]; unsigned int count = 0; int ret; @@ -836,6 +924,66 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, return 0; } +static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, + int regnum, u16 val) +{ + int ret = 0; + u32 reg; + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg |= MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + /* Page << 8 | offset */ + reg = 0x70; + reg <<= 2; + core_writel(priv, addr, reg); + + /* Page << 8 | offset */ + reg = 0x80 << 8 | regnum << 1; + reg <<= 2; + + if (op) + ret = core_readl(priv, reg); + else + core_writel(priv, val, reg); + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg &= ~MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + return ret & 0xffff; +} + +static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct bcm_sf2_priv *priv = bus->priv; + + /* Intercept reads from Broadcom pseudo-PHY address, else, send + * them to our master MDIO bus controller + */ + if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) + return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); + else + return mdiobus_read(priv->master_mii_bus, addr, regnum); +} + +static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct bcm_sf2_priv *priv = bus->priv; + + /* Intercept writes to the Broadcom pseudo-PHY address, else, + * send them to our master MDIO bus controller + */ + if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) + bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); + else + mdiobus_write(priv->master_mii_bus, addr, regnum, val); + + return 0; +} + static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; @@ -932,133 +1080,70 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, } } -static int bcm_sf2_sw_setup(struct dsa_switch *ds) +static int bcm_sf2_mdio_register(struct dsa_switch *ds) { - const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct bcm_sf2_priv *priv = ds_to_priv(ds); struct device_node *dn; - void __iomem **base; - unsigned int port; - unsigned int i; - u32 reg, rev; - int ret; - - spin_lock_init(&priv->indir_lock); - mutex_init(&priv->stats_mutex); - - /* All the interesting properties are at the parent device_node - * level - */ - dn = ds->cd->of_node->parent; - bcm_sf2_identify_ports(priv, ds->cd->of_node); - - priv->irq0 = irq_of_parse_and_map(dn, 0); - priv->irq1 = irq_of_parse_and_map(dn, 1); - - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - *base = of_iomap(dn, i); - if (*base == NULL) { - pr_err("unable to find register: %s\n", reg_names[i]); - ret = -ENOMEM; - goto out_unmap; - } - base++; - } - - ret = bcm_sf2_sw_rst(priv); - if (ret) { - pr_err("unable to software reset switch: %d\n", ret); - goto out_unmap; - } - - /* Disable all interrupts and request them */ - bcm_sf2_intr_disable(priv); - - ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); - if (ret < 0) { - pr_err("failed to request switch_0 IRQ\n"); - goto out_unmap; - } - - ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); - if (ret < 0) { - pr_err("failed to request switch_1 IRQ\n"); - goto out_free_irq0; - } - - /* Reset the MIB counters */ - reg = core_readl(priv, CORE_GMNCFGCFG); - reg |= RST_MIB_CNT; - core_writel(priv, reg, CORE_GMNCFGCFG); - reg &= ~RST_MIB_CNT; - core_writel(priv, reg, CORE_GMNCFGCFG); - - /* Get the maximum number of ports for this switch */ - priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; - if (priv->hw_params.num_ports > DSA_MAX_PORTS) - priv->hw_params.num_ports = DSA_MAX_PORTS; - - /* Assume a single GPHY setup if we can't read that property */ - if (of_property_read_u32(dn, "brcm,num-gphy", - &priv->hw_params.num_gphy)) - priv->hw_params.num_gphy = 1; - - /* Enable all valid ports and disable those unused */ - for (port = 0; port < priv->hw_params.num_ports; port++) { - /* IMP port receives special treatment */ - if ((1 << port) & ds->enabled_port_mask) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - bcm_sf2_imp_setup(ds, port); - else - bcm_sf2_port_disable(ds, port, NULL); - } - - /* Include the pseudo-PHY address and the broadcast PHY address to - * divert reads towards our workaround. This is only required for - * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such - * that we can use the regular SWITCH_MDIO master controller instead. + static int index; + int err; + + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); + if (!priv->master_mii_bus) + return -EPROBE_DEFER; + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); + if (!priv->slave_mii_bus) + return -ENOMEM; + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; + priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; + priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; + snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", + index++); + priv->slave_mii_bus->dev.of_node = dn; + + /* Include the pseudo-PHY address to divert reads towards our + * workaround. This is only required for 7445D0, since 7445E0 + * disconnects the internal switch pseudo-PHY such that we can use the + * regular SWITCH_MDIO master controller instead. * - * By default, DSA initializes ds->phys_mii_mask to - * ds->enabled_port_mask to have a 1:1 mapping between Port address - * and PHY address in order to utilize the slave_mii_bus instance to - * read from Port PHYs. This is not what we want here, so we - * initialize phys_mii_mask 0 to always utilize the "master" MDIO - * bus backed by the "mdio-unimac" driver. + * Here we flag the pseudo PHY as needing special treatment and would + * otherwise make all other PHY read/writes go to the master MDIO bus + * controller that comes with this switch backed by the "mdio-unimac" + * driver. */ if (of_machine_is_compatible("brcm,bcm7445d0")) - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); else - ds->phys_mii_mask = 0; + priv->indir_phy_mask = 0; - rev = reg_readl(priv, REG_SWITCH_REVISION); - priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & - SWITCH_TOP_REV_MASK; - priv->hw_params.core_rev = (rev & SF2_REV_MASK); + ds->phys_mii_mask = priv->indir_phy_mask; + ds->slave_mii_bus = priv->slave_mii_bus; + priv->slave_mii_bus->parent = ds->dev->parent; + priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - rev = reg_readl(priv, REG_PHY_REVISION); - priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + if (dn) + err = of_mdiobus_register(priv->slave_mii_bus, dn); + else + err = mdiobus_register(priv->slave_mii_bus); - pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", - priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, - priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, - priv->core, priv->irq0, priv->irq1); + if (err) + of_node_put(dn); - return 0; + return err; +} -out_free_irq0: - free_irq(priv->irq0, priv); -out_unmap: - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - if (*base) - iounmap(*base); - base++; - } - return ret; +static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) +{ + mdiobus_unregister(priv->slave_mii_bus); + if (priv->master_mii_dn) + of_node_put(priv->master_mii_dn); } static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) @@ -1078,68 +1163,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) return priv->hw_params.gphy_rev; } -static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr, - int regnum, u16 val) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - int ret = 0; - u32 reg; - - reg = reg_readl(priv, REG_SWITCH_CNTRL); - reg |= MDIO_MASTER_SEL; - reg_writel(priv, reg, REG_SWITCH_CNTRL); - - /* Page << 8 | offset */ - reg = 0x70; - reg <<= 2; - core_writel(priv, addr, reg); - - /* Page << 8 | offset */ - reg = 0x80 << 8 | regnum << 1; - reg <<= 2; - - if (op) - ret = core_readl(priv, reg); - else - core_writel(priv, val, reg); - - reg = reg_readl(priv, REG_SWITCH_CNTRL); - reg &= ~MDIO_MASTER_SEL; - reg_writel(priv, reg, REG_SWITCH_CNTRL); - - return ret & 0xffff; -} - -static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum) -{ - /* Intercept reads from the MDIO broadcast address or Broadcom - * pseudo-PHY address - */ - switch (addr) { - case 0: - case BRCM_PSEUDO_PHY_ADDR: - return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0); - default: - return 0xffff; - } -} - -static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum, - u16 val) -{ - /* Intercept writes to the MDIO broadcast address or Broadcom - * pseudo-PHY address - */ - switch (addr) { - case 0: - case BRCM_PSEUDO_PHY_ADDR: - bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val); - break; - } - - return 0; -} - static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { @@ -1248,7 +1271,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) - netif_carrier_off(ds->ports[port]); + netif_carrier_off(ds->ports[port].netdev); status->duplex = 1; } else { status->link = 1; @@ -1370,14 +1393,310 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, return p->ethtool_ops->set_wol(p, wol); } +static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable) +{ + u32 mgmt, vc0, vc1, vc4, vc5; + + mgmt = core_readl(priv, CORE_SWMODE); + vc0 = core_readl(priv, CORE_VLAN_CTRL0); + vc1 = core_readl(priv, CORE_VLAN_CTRL1); + vc4 = core_readl(priv, CORE_VLAN_CTRL4); + vc5 = core_readl(priv, CORE_VLAN_CTRL5); + + mgmt &= ~SW_FWDG_MODE; + + if (enable) { + vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL; + vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP; + vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); + vc4 |= INGR_VID_CHK_DROP; + vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD; + } else { + vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL); + vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP); + vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); + vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD); + vc4 |= INGR_VID_CHK_VID_VIOL_IMP; + } + + core_writel(priv, vc0, CORE_VLAN_CTRL0); + core_writel(priv, vc1, CORE_VLAN_CTRL1); + core_writel(priv, 0, CORE_VLAN_CTRL3); + core_writel(priv, vc4, CORE_VLAN_CTRL4); + core_writel(priv, vc5, CORE_VLAN_CTRL5); + core_writel(priv, mgmt, CORE_SWMODE); +} + +static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int port; + + /* Clear all VLANs */ + bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR); + + for (port = 0; port < priv->hw_params.num_ports; port++) { + if (!((1 << port) & ds->enabled_port_mask)) + continue; + + core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port)); + } +} + +static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + return 0; +} + +static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + bcm_sf2_enable_vlan(priv, true); + + return 0; +} + +static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + s8 cpu_port = ds->dst->cpu_port; + struct bcm_sf2_vlan *vl; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &priv->vlans[vid]; + + bcm_sf2_get_vlan_entry(priv, vid, vl); + + vl->members |= BIT(port) | BIT(cpu_port); + if (untagged) + vl->untag |= BIT(port) | BIT(cpu_port); + else + vl->untag &= ~(BIT(port) | BIT(cpu_port)); + + bcm_sf2_set_vlan_entry(priv, vid, vl); + bcm_sf2_sw_fast_age_vlan(priv, vid); + } + + if (pvid) { + core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port)); + core_writel(priv, vlan->vid_end, + CORE_DEFAULT_1Q_TAG_P(cpu_port)); + bcm_sf2_sw_fast_age_vlan(priv, vid); + } +} + +static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + s8 cpu_port = ds->dst->cpu_port; + struct bcm_sf2_vlan *vl; + u16 vid, pvid; + int ret; + + pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &priv->vlans[vid]; + + ret = bcm_sf2_get_vlan_entry(priv, vid, vl); + if (ret) + return ret; + + vl->members &= ~BIT(port); + if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) + vl->members = 0; + if (pvid == vid) + pvid = 0; + if (untagged) { + vl->untag &= ~BIT(port); + if ((vl->untag & BIT(port)) == BIT(cpu_port)) + vl->untag = 0; + } + + bcm_sf2_set_vlan_entry(priv, vid, vl); + bcm_sf2_sw_fast_age_vlan(priv, vid); + } + + core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port)); + core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port)); + bcm_sf2_sw_fast_age_vlan(priv, vid); + + return 0; +} + +static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_port_status *p = &priv->port_sts[port]; + struct bcm_sf2_vlan *vl; + u16 vid, pvid; + int err = 0; + + pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + + for (vid = 0; vid < VLAN_N_VID; vid++) { + vl = &priv->vlans[vid]; + + if (!(vl->members & BIT(port))) + continue; + + vlan->vid_begin = vlan->vid_end = vid; + vlan->flags = 0; + + if (vl->untag & BIT(port)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + if (p->pvid == vid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static int bcm_sf2_sw_setup(struct dsa_switch *ds) +{ + const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct device_node *dn; + void __iomem **base; + unsigned int port; + unsigned int i; + u32 reg, rev; + int ret; + + spin_lock_init(&priv->indir_lock); + mutex_init(&priv->stats_mutex); + + /* All the interesting properties are at the parent device_node + * level + */ + dn = ds->cd->of_node->parent; + bcm_sf2_identify_ports(priv, ds->cd->of_node); + + priv->irq0 = irq_of_parse_and_map(dn, 0); + priv->irq1 = irq_of_parse_and_map(dn, 1); + + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + *base = of_iomap(dn, i); + if (*base == NULL) { + pr_err("unable to find register: %s\n", reg_names[i]); + ret = -ENOMEM; + goto out_unmap; + } + base++; + } + + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("unable to software reset switch: %d\n", ret); + goto out_unmap; + } + + ret = bcm_sf2_mdio_register(ds); + if (ret) { + pr_err("failed to register MDIO bus\n"); + goto out_unmap; + } + + /* Disable all interrupts and request them */ + bcm_sf2_intr_disable(priv); + + ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); + if (ret < 0) { + pr_err("failed to request switch_0 IRQ\n"); + goto out_mdio; + } + + ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); + if (ret < 0) { + pr_err("failed to request switch_1 IRQ\n"); + goto out_free_irq0; + } + + /* Reset the MIB counters */ + reg = core_readl(priv, CORE_GMNCFGCFG); + reg |= RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + reg &= ~RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + + /* Get the maximum number of ports for this switch */ + priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; + if (priv->hw_params.num_ports > DSA_MAX_PORTS) + priv->hw_params.num_ports = DSA_MAX_PORTS; + + /* Assume a single GPHY setup if we can't read that property */ + if (of_property_read_u32(dn, "brcm,num-gphy", + &priv->hw_params.num_gphy)) + priv->hw_params.num_gphy = 1; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->enabled_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port, NULL); + } + + bcm_sf2_sw_configure_vlan(ds); + + rev = reg_readl(priv, REG_SWITCH_REVISION); + priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & + SWITCH_TOP_REV_MASK; + priv->hw_params.core_rev = (rev & SF2_REV_MASK); + + rev = reg_readl(priv, REG_PHY_REVISION); + priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", + priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, + priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, + priv->core, priv->irq0, priv->irq1); + + return 0; + +out_free_irq0: + free_irq(priv->irq0, priv); +out_mdio: + bcm_sf2_mdio_unregister(priv); +out_unmap: + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + if (*base) + iounmap(*base); + base++; + } + return ret; +} + static struct dsa_switch_driver bcm_sf2_switch_driver = { .tag_protocol = DSA_TAG_PROTO_BRCM, .probe = bcm_sf2_sw_drv_probe, .setup = bcm_sf2_sw_setup, .set_addr = bcm_sf2_sw_set_addr, .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .phy_read = bcm_sf2_sw_phy_read, - .phy_write = bcm_sf2_sw_phy_write, .get_strings = bcm_sf2_sw_get_strings, .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, .get_sset_count = bcm_sf2_sw_get_sset_count, @@ -1398,6 +1717,11 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .port_fdb_add = bcm_sf2_sw_fdb_add, .port_fdb_del = bcm_sf2_sw_fdb_del, .port_fdb_dump = bcm_sf2_sw_fdb_dump, + .port_vlan_filtering = bcm_sf2_sw_vlan_filtering, + .port_vlan_prepare = bcm_sf2_sw_vlan_prepare, + .port_vlan_add = bcm_sf2_sw_vlan_add, + .port_vlan_del = bcm_sf2_sw_vlan_del, + .port_vlan_dump = bcm_sf2_sw_vlan_dump, }; static int __init bcm_sf2_init(void) diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 71b1e5298..dd446e466 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -21,6 +21,7 @@ #include <linux/ethtool.h> #include <linux/types.h> #include <linux/bitops.h> +#include <linux/if_vlan.h> #include <net/dsa.h> @@ -50,6 +51,7 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; u32 vlan_ctl_mask; + u16 pvid; struct net_device *bridge_dev; }; @@ -63,6 +65,11 @@ struct bcm_sf2_arl_entry { u8 is_static:1; }; +struct bcm_sf2_vlan { + u16 members; + u16 untag; +}; + static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst) { unsigned int i; @@ -142,6 +149,15 @@ struct bcm_sf2_priv { /* Bitmask of ports having an integrated PHY */ unsigned int int_phy_mask; + + /* Master and slave MDIO bus controller */ + unsigned int indir_phy_mask; + struct device_node *master_mii_dn; + struct mii_bus *slave_mii_bus; + struct mii_bus *master_mii_bus; + + /* Cache of programmed VLANs */ + struct bcm_sf2_vlan vlans[VLAN_N_VID]; }; struct bcm_sf2_hw_stats { diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 97780d43b..9f2a9cb42 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -274,6 +274,23 @@ #define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40)) #define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40)) +#define CORE_ARLA_VTBL_RWCTRL 0x1600 +#define ARLA_VTBL_CMD_WRITE 0 +#define ARLA_VTBL_CMD_READ 1 +#define ARLA_VTBL_CMD_CLEAR 2 +#define ARLA_VTBL_STDN (1 << 7) + +#define CORE_ARLA_VTBL_ADDR 0x1604 +#define VTBL_ADDR_INDEX_MASK 0xfff + +#define CORE_ARLA_VTBL_ENTRY 0x160c +#define FWD_MAP_MASK 0x1ff +#define UNTAG_MAP_MASK 0x1ff +#define UNTAG_MAP_SHIFT 9 +#define MSTP_INDEX_MASK 0x7 +#define MSTP_INDEX_SHIFT 18 +#define FWD_MODE (1 << 21) + #define CORE_MEM_PSM_VDD_CTRL 0x2380 #define P_TXQ_PSM_VDD_SHIFT 2 #define P_TXQ_PSM_VDD_MASK 0x3 @@ -287,6 +304,59 @@ #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff +#define CORE_VLAN_CTRL0 0xd000 +#define CHANGE_1P_VID_INNER (1 << 0) +#define CHANGE_1P_VID_OUTER (1 << 1) +#define CHANGE_1Q_VID (1 << 3) +#define VLAN_LEARN_MODE_SVL (0 << 5) +#define VLAN_LEARN_MODE_IVL (3 << 5) +#define VLAN_EN (1 << 7) + +#define CORE_VLAN_CTRL1 0xd004 +#define EN_RSV_MCAST_FWDMAP (1 << 2) +#define EN_RSV_MCAST_UNTAG (1 << 3) +#define EN_IPMC_BYPASS_FWDMAP (1 << 5) +#define EN_IPMC_BYPASS_UNTAG (1 << 6) + +#define CORE_VLAN_CTRL2 0xd008 +#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2) +#define EN_GMRP_GVRP_V_FWDMAP (1 << 5) +#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6) + +#define CORE_VLAN_CTRL3 0xd00c +#define EN_DROP_NON1Q_MASK 0x1ff + +#define CORE_VLAN_CTRL4 0xd014 +#define RESV_MCAST_FLOOD (1 << 1) +#define EN_DOUBLE_TAG_MASK 0x3 +#define EN_DOUBLE_TAG_SHIFT 2 +#define EN_MGE_REV_GMRP (1 << 4) +#define EN_MGE_REV_GVRP (1 << 5) +#define INGR_VID_CHK_SHIFT 6 +#define INGR_VID_CHK_MASK 0x3 +#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT) +#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT) +#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT) +#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT) + +#define CORE_VLAN_CTRL5 0xd018 +#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0) +#define EN_VID_FFF_FWD (1 << 2) +#define DROP_VTABLE_MISS (1 << 3) +#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4) +#define PRESV_NON1Q (1 << 6) + +#define CORE_VLAN_CTRL6 0xd01c +#define STRICT_SFD_DETECT (1 << 0) +#define DIS_ARL_BUST_LMIT (1 << 4) + +#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8)) +#define CFI_SHIFT 12 +#define PRI_SHIFT 13 +#define PRI_MASK 0x7 + +#define CORE_JOIN_ALL_VLAN_EN 0xd140 + #define CORE_EEE_EN_CTRL 0x24800 #define CORE_EEE_LPI_INDICATE 0x24810 diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig new file mode 100644 index 000000000..490bc06f9 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -0,0 +1,7 @@ +config NET_DSA_MV88E6XXX + tristate "Marvell 88E6xxx Ethernet switch fabric support" + depends on NET_DSA + select NET_DSA_TAG_EDSA + help + This driver adds support for most of the Marvell 88E6xxx models of + Ethernet switch chips, except 88E6060. diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile new file mode 100644 index 000000000..6e29a75ee --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c new file mode 100644 index 000000000..710679067 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -0,0 +1,4093 @@ +/* + * Marvell 88e6xxx Ethernet switch single-chip support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2015 CMC Electronics, Inc. + * Added support for VLAN Table Unit operations + * + * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_bridge.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/netdevice.h> +#include <linux/gpio/consumer.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include <net/switchdev.h> +#include "mv88e6xxx.h" + +static void assert_reg_lock(struct mv88e6xxx_chip *chip) +{ + if (unlikely(!mutex_is_locked(&chip->reg_lock))) { + dev_err(chip->dev, "Switch registers lock not held!\n"); + dump_stack(); + } +} + +/* The switch ADDR[4:1] configuration pins define the chip SMI device address + * (ADDR[0] is always zero, thus only even SMI addresses can be strapped). + * + * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it + * is the only device connected to the SMI master. In this mode it responds to + * all 32 possible SMI addresses, and thus maps directly the internal devices. + * + * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing + * multiple devices to share the SMI interface. In this mode it responds to only + * 2 registers, used to indirectly access the internal SMI devices. + */ + +static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + if (!chip->smi_ops) + return -EOPNOTSUPP; + + return chip->smi_ops->read(chip, addr, reg, val); +} + +static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + if (!chip->smi_ops) + return -EOPNOTSUPP; + + return chip->smi_ops->write(chip, addr, reg, val); +} + +static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + int ret; + + ret = mdiobus_read_nested(chip->bus, addr, reg); + if (ret < 0) + return ret; + + *val = ret & 0xffff; + + return 0; +} + +static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + int ret; + + ret = mdiobus_write_nested(chip->bus, addr, reg, val); + if (ret < 0) + return ret; + + return 0; +} + +static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = { + .read = mv88e6xxx_smi_single_chip_read, + .write = mv88e6xxx_smi_single_chip_write, +}; + +static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip) +{ + int ret; + int i; + + for (i = 0; i < 16; i++) { + ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD); + if (ret < 0) + return ret; + + if ((ret & SMI_CMD_BUSY) == 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + int ret; + + /* Wait for the bus to become free. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + /* Transmit the read command. */ + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, + SMI_CMD_OP_22_READ | (addr << 5) | reg); + if (ret < 0) + return ret; + + /* Wait for the read command to complete. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + /* Read the data. */ + ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA); + if (ret < 0) + return ret; + + *val = ret & 0xffff; + + return 0; +} + +static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + int ret; + + /* Wait for the bus to become free. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + /* Transmit the data to write. */ + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val); + if (ret < 0) + return ret; + + /* Transmit the write command. */ + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, + SMI_CMD_OP_22_WRITE | (addr << 5) | reg); + if (ret < 0) + return ret; + + /* Wait for the write command to complete. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + return 0; +} + +static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = { + .read = mv88e6xxx_smi_multi_chip_read, + .write = mv88e6xxx_smi_multi_chip_write, +}; + +static int mv88e6xxx_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + int err; + + assert_reg_lock(chip); + + err = mv88e6xxx_smi_read(chip, addr, reg, val); + if (err) + return err; + + dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + addr, reg, *val); + + return 0; +} + +static int mv88e6xxx_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + int err; + + assert_reg_lock(chip); + + err = mv88e6xxx_smi_write(chip, addr, reg, val); + if (err) + return err; + + dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + addr, reg, val); + + return 0; +} + +/* Indirect write to single pointer-data register with an Update bit */ +static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 update) +{ + u16 val; + int i, err; + + /* Wait until the previous operation is completed */ + for (i = 0; i < 16; ++i) { + err = mv88e6xxx_read(chip, addr, reg, &val); + if (err) + return err; + + if (!(val & BIT(15))) + break; + } + + if (i == 16) + return -ETIMEDOUT; + + /* Set the Update bit to trigger a write operation */ + val = BIT(15) | update; + + return mv88e6xxx_write(chip, addr, reg, val); +} + +static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg) +{ + u16 val; + int err; + + err = mv88e6xxx_read(chip, addr, reg, &val); + if (err) + return err; + + return val; +} + +static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) +{ + return mv88e6xxx_write(chip, addr, reg, val); +} + +static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip, + int addr, int regnum) +{ + if (addr >= 0) + return _mv88e6xxx_reg_read(chip, addr, regnum); + return 0xffff; +} + +static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip, + int addr, int regnum, u16 val) +{ + if (addr >= 0) + return _mv88e6xxx_reg_write(chip, addr, regnum, val); + return 0; +} + +static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) +{ + int ret; + unsigned long timeout; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, + ret & ~GLOBAL_CONTROL_PPU_ENABLE); + if (ret) + return ret; + + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + + usleep_range(1000, 2000); + if ((ret & GLOBAL_STATUS_PPU_MASK) != + GLOBAL_STATUS_PPU_POLLING) + return 0; + } + + return -ETIMEDOUT; +} + +static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) +{ + int ret, err; + unsigned long timeout; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); + if (ret < 0) + return ret; + + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, + ret | GLOBAL_CONTROL_PPU_ENABLE); + if (err) + return err; + + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + + usleep_range(1000, 2000); + if ((ret & GLOBAL_STATUS_PPU_MASK) == + GLOBAL_STATUS_PPU_POLLING) + return 0; + } + + return -ETIMEDOUT; +} + +static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) +{ + struct mv88e6xxx_chip *chip; + + chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work); + + mutex_lock(&chip->reg_lock); + + if (mutex_trylock(&chip->ppu_mutex)) { + if (mv88e6xxx_ppu_enable(chip) == 0) + chip->ppu_disabled = 0; + mutex_unlock(&chip->ppu_mutex); + } + + mutex_unlock(&chip->reg_lock); +} + +static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) +{ + struct mv88e6xxx_chip *chip = (void *)_ps; + + schedule_work(&chip->ppu_work); +} + +static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip) +{ + int ret; + + mutex_lock(&chip->ppu_mutex); + + /* If the PHY polling unit is enabled, disable it so that + * we can access the PHY registers. If it was already + * disabled, cancel the timer that is going to re-enable + * it. + */ + if (!chip->ppu_disabled) { + ret = mv88e6xxx_ppu_disable(chip); + if (ret < 0) { + mutex_unlock(&chip->ppu_mutex); + return ret; + } + chip->ppu_disabled = 1; + } else { + del_timer(&chip->ppu_timer); + ret = 0; + } + + return ret; +} + +static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip) +{ + /* Schedule a timer to re-enable the PHY polling unit. */ + mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10)); + mutex_unlock(&chip->ppu_mutex); +} + +static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip) +{ + mutex_init(&chip->ppu_mutex); + INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work); + init_timer(&chip->ppu_timer); + chip->ppu_timer.data = (unsigned long)chip; + chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; +} + +static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr, + int regnum) +{ + int ret; + + ret = mv88e6xxx_ppu_access_get(chip); + if (ret >= 0) { + ret = _mv88e6xxx_reg_read(chip, addr, regnum); + mv88e6xxx_ppu_access_put(chip); + } + + return ret; +} + +static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr, + int regnum, u16 val) +{ + int ret; + + ret = mv88e6xxx_ppu_access_get(chip); + if (ret >= 0) { + ret = _mv88e6xxx_reg_write(chip, addr, regnum, val); + mv88e6xxx_ppu_access_put(chip); + } + + return ret; +} + +static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6065; +} + +static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6095; +} + +static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6097; +} + +static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6165; +} + +static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6185; +} + +static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6320; +} + +static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6351; +} + +static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6352; +} + +static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip) +{ + return chip->info->num_databases; +} + +static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip) +{ + /* Does the device have dedicated FID registers for ATU and VTU ops? */ + if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) + return true; + + return false; +} + +/* We expect the switch to perform auto negotiation if there is a real + * phy. However, in the case of a fixed link phy, we force the port + * settings from the fixed link settings. + */ +static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u32 reg; + int ret; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + + mutex_lock(&chip->reg_lock); + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL); + if (ret < 0) + goto out; + + reg = ret & ~(PORT_PCS_CTRL_LINK_UP | + PORT_PCS_CTRL_FORCE_LINK | + PORT_PCS_CTRL_DUPLEX_FULL | + PORT_PCS_CTRL_FORCE_DUPLEX | + PORT_PCS_CTRL_UNFORCED); + + reg |= PORT_PCS_CTRL_FORCE_LINK; + if (phydev->link) + reg |= PORT_PCS_CTRL_LINK_UP; + + if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100) + goto out; + + switch (phydev->speed) { + case SPEED_1000: + reg |= PORT_PCS_CTRL_1000; + break; + case SPEED_100: + reg |= PORT_PCS_CTRL_100; + break; + case SPEED_10: + reg |= PORT_PCS_CTRL_10; + break; + default: + pr_info("Unknown speed"); + goto out; + } + + reg |= PORT_PCS_CTRL_FORCE_DUPLEX; + if (phydev->duplex == DUPLEX_FULL) + reg |= PORT_PCS_CTRL_DUPLEX_FULL; + + if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) && + (port >= chip->info->num_ports - 2)) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK | + PORT_PCS_CTRL_RGMII_DELAY_TXCLK); + } + _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg); + +out: + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip) +{ + int ret; + int i; + + for (i = 0; i < 10; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP); + if ((ret & GLOBAL_STATS_OP_BUSY) == 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) +{ + int ret; + + if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) + port = (port + 1) << 5; + + /* Snapshot the hardware statistics counters for this port. */ + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_CAPTURE_PORT | + GLOBAL_STATS_OP_HIST_RX_TX | port); + if (ret < 0) + return ret; + + /* Wait for the snapshotting to complete. */ + ret = _mv88e6xxx_stats_wait(chip); + if (ret < 0) + return ret; + + return 0; +} + +static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip, + int stat, u32 *val) +{ + u32 _val; + int ret; + + *val = 0; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_READ_CAPTURED | + GLOBAL_STATS_OP_HIST_RX_TX | stat); + if (ret < 0) + return; + + ret = _mv88e6xxx_stats_wait(chip); + if (ret < 0) + return; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); + if (ret < 0) + return; + + _val = ret << 16; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); + if (ret < 0) + return; + + *val = _val | ret; +} + +static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = { + { "in_good_octets", 8, 0x00, BANK0, }, + { "in_bad_octets", 4, 0x02, BANK0, }, + { "in_unicast", 4, 0x04, BANK0, }, + { "in_broadcasts", 4, 0x06, BANK0, }, + { "in_multicasts", 4, 0x07, BANK0, }, + { "in_pause", 4, 0x16, BANK0, }, + { "in_undersize", 4, 0x18, BANK0, }, + { "in_fragments", 4, 0x19, BANK0, }, + { "in_oversize", 4, 0x1a, BANK0, }, + { "in_jabber", 4, 0x1b, BANK0, }, + { "in_rx_error", 4, 0x1c, BANK0, }, + { "in_fcs_error", 4, 0x1d, BANK0, }, + { "out_octets", 8, 0x0e, BANK0, }, + { "out_unicast", 4, 0x10, BANK0, }, + { "out_broadcasts", 4, 0x13, BANK0, }, + { "out_multicasts", 4, 0x12, BANK0, }, + { "out_pause", 4, 0x15, BANK0, }, + { "excessive", 4, 0x11, BANK0, }, + { "collisions", 4, 0x1e, BANK0, }, + { "deferred", 4, 0x05, BANK0, }, + { "single", 4, 0x14, BANK0, }, + { "multiple", 4, 0x17, BANK0, }, + { "out_fcs_error", 4, 0x03, BANK0, }, + { "late", 4, 0x1f, BANK0, }, + { "hist_64bytes", 4, 0x08, BANK0, }, + { "hist_65_127bytes", 4, 0x09, BANK0, }, + { "hist_128_255bytes", 4, 0x0a, BANK0, }, + { "hist_256_511bytes", 4, 0x0b, BANK0, }, + { "hist_512_1023bytes", 4, 0x0c, BANK0, }, + { "hist_1024_max_bytes", 4, 0x0d, BANK0, }, + { "sw_in_discards", 4, 0x10, PORT, }, + { "sw_in_filtered", 2, 0x12, PORT, }, + { "sw_out_filtered", 2, 0x13, PORT, }, + { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, }, +}; + +static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_hw_stat *stat) +{ + switch (stat->type) { + case BANK0: + return true; + case BANK1: + return mv88e6xxx_6320_family(chip); + case PORT: + return mv88e6xxx_6095_family(chip) || + mv88e6xxx_6185_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6352_family(chip); + } + return false; +} + +static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_hw_stat *s, + int port) +{ + u32 low; + u32 high = 0; + int ret; + u64 value; + + switch (s->type) { + case PORT: + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg); + if (ret < 0) + return UINT64_MAX; + + low = ret; + if (s->sizeof_stat == 4) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), + s->reg + 1); + if (ret < 0) + return UINT64_MAX; + high = ret; + } + break; + case BANK0: + case BANK1: + _mv88e6xxx_stats_read(chip, s->reg, &low); + if (s->sizeof_stat == 8) + _mv88e6xxx_stats_read(chip, s->reg + 1, &high); + } + value = (((u64)high) << 16) | low; + return value; +} + +static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, + uint8_t *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int i, j; + + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(chip, stat)) { + memcpy(data + j * ETH_GSTRING_LEN, stat->string, + ETH_GSTRING_LEN); + j++; + } + } +} + +static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int i, j; + + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(chip, stat)) + j++; + } + return j; +} + +static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int ret; + int i, j; + + mutex_lock(&chip->reg_lock); + + ret = _mv88e6xxx_stats_snapshot(chip, port); + if (ret < 0) { + mutex_unlock(&chip->reg_lock); + return; + } + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(chip, stat)) { + data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port); + j++; + } + } + + mutex_unlock(&chip->reg_lock); +} + +static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) +{ + return 32 * sizeof(u16); +} + +static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 *p = _p; + int i; + + regs->version = 0; + + memset(p, 0xff, 32 * sizeof(u16)); + + mutex_lock(&chip->reg_lock); + + for (i = 0; i < 32; i++) { + int ret; + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i); + if (ret >= 0) + p[i] = ret; + } + + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset, + u16 mask) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = _mv88e6xxx_reg_read(chip, reg, offset); + if (ret < 0) + return ret; + if (!(ret & mask)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, + GLOBAL2_SMI_OP_BUSY); +} + +static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, + GLOBAL_ATU_OP_BUSY); +} + +static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip, + int addr, int regnum) +{ + int ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, + GLOBAL2_SMI_OP_22_READ | (addr << 5) | + regnum); + if (ret < 0) + return ret; + + ret = mv88e6xxx_mdio_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA); + + return ret; +} + +static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip, + int addr, int regnum, u16 val) +{ + int ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, + GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | + regnum); + + return mv88e6xxx_mdio_wait(chip); +} + +static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int reg; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + reg = mv88e6xxx_mdio_read_indirect(chip, port, 16); + if (reg < 0) + goto out; + + e->eee_enabled = !!(reg & 0x0200); + e->tx_lpi_enabled = !!(reg & 0x0100); + + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); + if (reg < 0) + goto out; + + e->eee_active = !!(reg & PORT_STATUS_EEE); + reg = 0; + +out: + mutex_unlock(&chip->reg_lock); + return reg; +} + +static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, + struct phy_device *phydev, struct ethtool_eee *e) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int reg; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + ret = mv88e6xxx_mdio_read_indirect(chip, port, 16); + if (ret < 0) + goto out; + + reg = ret & ~0x0300; + if (e->eee_enabled) + reg |= 0x0200; + if (e->tx_lpi_enabled) + reg |= 0x0100; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg); +out: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd) +{ + int ret; + + if (mv88e6xxx_has_fid_reg(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID, + fid); + if (ret < 0) + return ret; + } else if (mv88e6xxx_num_databases(chip) == 256) { + /* ATU DBNum[7:4] are located in ATU Control 15:12 */ + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, + (ret & 0xfff) | + ((fid << 8) & 0xf000)); + if (ret < 0) + return ret; + + /* ATU DBNum[3:0] are located in ATU Operation 3:0 */ + cmd |= fid & 0xf; + } + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd); + if (ret < 0) + return ret; + + return _mv88e6xxx_atu_wait(chip); +} + +static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK; + + if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (entry->trunk) { + data |= GLOBAL_ATU_DATA_TRUNK; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + data |= (entry->portv_trunkid << shift) & mask; + } + + return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data); +} + +static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry, + bool static_too) +{ + int op; + int err; + + err = _mv88e6xxx_atu_wait(chip); + if (err) + return err; + + err = _mv88e6xxx_atu_data_write(chip, entry); + if (err) + return err; + + if (entry->fid) { + op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB : + GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; + } else { + op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL : + GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; + } + + return _mv88e6xxx_atu_cmd(chip, entry->fid, op); +} + +static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip, + u16 fid, bool static_too) +{ + struct mv88e6xxx_atu_entry entry = { + .fid = fid, + .state = 0, /* EntryState bits must be 0 */ + }; + + return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); +} + +static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid, + int from_port, int to_port, bool static_too) +{ + struct mv88e6xxx_atu_entry entry = { + .trunk = false, + .fid = fid, + }; + + /* EntryState bits must be 0xF */ + entry.state = GLOBAL_ATU_DATA_STATE_MASK; + + /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */ + entry.portv_trunkid = (to_port & 0x0f) << 4; + entry.portv_trunkid |= from_port & 0x0f; + + return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); +} + +static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, + int port, bool static_too) +{ + /* Destination port 0xF means remove the entries */ + return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too); +} + +static const char * const mv88e6xxx_port_state_names[] = { + [PORT_CONTROL_STATE_DISABLED] = "Disabled", + [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening", + [PORT_CONTROL_STATE_LEARNING] = "Learning", + [PORT_CONTROL_STATE_FORWARDING] = "Forwarding", +}; + +static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port, + u8 state) +{ + struct dsa_switch *ds = chip->ds; + int reg, ret = 0; + u8 oldstate; + + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL); + if (reg < 0) + return reg; + + oldstate = reg & PORT_CONTROL_STATE_MASK; + + if (oldstate != state) { + /* Flush forwarding database if we're moving a port + * from Learning or Forwarding state to Disabled or + * Blocking or Listening state. + */ + if ((oldstate == PORT_CONTROL_STATE_LEARNING || + oldstate == PORT_CONTROL_STATE_FORWARDING) && + (state == PORT_CONTROL_STATE_DISABLED || + state == PORT_CONTROL_STATE_BLOCKING)) { + ret = _mv88e6xxx_atu_remove(chip, 0, port, false); + if (ret) + return ret; + } + + reg = (reg & ~PORT_CONTROL_STATE_MASK) | state; + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL, + reg); + if (ret) + return ret; + + netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n", + mv88e6xxx_port_state_names[state], + mv88e6xxx_port_state_names[oldstate]); + } + + return ret; +} + +static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) +{ + struct net_device *bridge = chip->ports[port].bridge_dev; + const u16 mask = (1 << chip->info->num_ports) - 1; + struct dsa_switch *ds = chip->ds; + u16 output_ports = 0; + int reg; + int i; + + /* allow CPU port or DSA link(s) to send frames to every port */ + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + output_ports = mask; + } else { + for (i = 0; i < chip->info->num_ports; ++i) { + /* allow sending frames to every group member */ + if (bridge && chip->ports[i].bridge_dev == bridge) + output_ports |= BIT(i); + + /* allow sending frames to CPU port and DSA link(s) */ + if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) + output_ports |= BIT(i); + } + } + + /* prevent frames from going back out of the port they came in on */ + output_ports &= ~BIT(port); + + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN); + if (reg < 0) + return reg; + + reg &= ~mask; + reg |= output_ports & mask; + + return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg); +} + +static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int stp_state; + int err; + + switch (state) { + case BR_STATE_DISABLED: + stp_state = PORT_CONTROL_STATE_DISABLED; + break; + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + stp_state = PORT_CONTROL_STATE_BLOCKING; + break; + case BR_STATE_LEARNING: + stp_state = PORT_CONTROL_STATE_LEARNING; + break; + case BR_STATE_FORWARDING: + default: + stp_state = PORT_CONTROL_STATE_FORWARDING; + break; + } + + mutex_lock(&chip->reg_lock); + err = _mv88e6xxx_port_state(chip, port, stp_state); + mutex_unlock(&chip->reg_lock); + + if (err) + netdev_err(ds->ports[port].netdev, + "failed to update state to %s\n", + mv88e6xxx_port_state_names[stp_state]); +} + +static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port, + u16 *new, u16 *old) +{ + struct dsa_switch *ds = chip->ds; + u16 pvid; + int ret; + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN); + if (ret < 0) + return ret; + + pvid = ret & PORT_DEFAULT_VLAN_MASK; + + if (new) { + ret &= ~PORT_DEFAULT_VLAN_MASK; + ret |= *new & PORT_DEFAULT_VLAN_MASK; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_DEFAULT_VLAN, ret); + if (ret < 0) + return ret; + + netdev_dbg(ds->ports[port].netdev, + "DefaultVID %d (was %d)\n", *new, pvid); + } + + if (old) + *old = pvid; + + return 0; +} + +static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip, + int port, u16 *pvid) +{ + return _mv88e6xxx_port_pvid(chip, port, NULL, pvid); +} + +static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip, + int port, u16 pvid) +{ + return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL); +} + +static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, + GLOBAL_VTU_OP_BUSY); +} + +static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op) +{ + int ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_wait(chip); +} + +static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip) +{ + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL); +} + +static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + u16 regs[3]; + int i; + int ret; + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i); + if (ret < 0) + return ret; + + regs[i] = ret; + } + + for (i = 0; i < chip->info->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u16 reg = regs[i / 4]; + + entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK; + } + + return 0; +} + +static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0); +} + +static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2); +} + +static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + u16 regs[3] = { 0 }; + int i; + int ret; + + for (i = 0; i < chip->info->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u8 data = entry->data[i]; + + regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift; + } + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i, regs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0); +} + +static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2); +} + +static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid) +{ + return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, + vid & GLOBAL_VTU_VID_MASK); +} + +static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_vtu_stu_entry next = { 0 }; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.vid = ret & GLOBAL_VTU_VID_MASK; + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = mv88e6xxx_vtu_data_read(chip, &next); + if (ret < 0) + return ret; + + if (mv88e6xxx_has_fid_reg(chip)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_FID); + if (ret < 0) + return ret; + + next.fid = ret & GLOBAL_VTU_FID_MASK; + } else if (mv88e6xxx_num_databases(chip) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_OP); + if (ret < 0) + return ret; + + next.fid = (ret & 0xf00) >> 4; + next.fid |= ret & 0xf; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + } + } + + *entry = next; + return 0; +} + +static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry next; + u16 pvid; + int err; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + err = _mv88e6xxx_port_pvid_get(chip, port, &pvid); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &next); + if (err) + break; + + if (!next.valid) + break; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + continue; + + /* reinit and dump this VLAN obj */ + vlan->vid_begin = next.vid; + vlan->vid_end = next.vid; + vlan->flags = 0; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + + if (next.vid == pvid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } while (next.vid < GLOBAL_VTU_VID_MASK); + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + if (!entry->valid) + goto loadpurge; + + /* Write port member tags */ + ret = mv88e6xxx_vtu_data_write(chip, entry); + if (ret < 0) + return ret; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, + reg); + if (ret < 0) + return ret; + } + + if (mv88e6xxx_has_fid_reg(chip)) { + reg = entry->fid & GLOBAL_VTU_FID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID, + reg); + if (ret < 0) + return ret; + } else if (mv88e6xxx_num_databases(chip) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + op |= (entry->fid & 0xf0) << 8; + op |= entry->fid & 0xf; + } + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + reg |= entry->vid & GLOBAL_VTU_VID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(chip, op); +} + +static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_vtu_stu_entry next = { 0 }; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, + sid & GLOBAL_VTU_SID_MASK); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = mv88e6xxx_stu_data_read(chip, &next); + if (ret < 0) + return ret; + } + + *entry = next; + return 0; +} + +static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + if (!entry->valid) + goto loadpurge; + + /* Write port states */ + ret = mv88e6xxx_stu_data_write(chip, entry); + if (ret < 0) + return ret; + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); +} + +static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port, + u16 *new, u16 *old) +{ + struct dsa_switch *ds = chip->ds; + u16 upper_mask; + u16 fid; + int ret; + + if (mv88e6xxx_num_databases(chip) == 4096) + upper_mask = 0xff; + else if (mv88e6xxx_num_databases(chip) == 256) + upper_mask = 0xf; + else + return -EOPNOTSUPP; + + /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN); + if (ret < 0) + return ret; + + fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12; + + if (new) { + ret &= ~PORT_BASE_VLAN_FID_3_0_MASK; + ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, + ret); + if (ret < 0) + return ret; + } + + /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */ + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1); + if (ret < 0) + return ret; + + fid |= (ret & upper_mask) << 4; + + if (new) { + ret &= ~upper_mask; + ret |= (*new >> 4) & upper_mask; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1, + ret); + if (ret < 0) + return ret; + + netdev_dbg(ds->ports[port].netdev, + "FID %d (was %d)\n", *new, fid); + } + + if (old) + *old = fid; + + return 0; +} + +static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip, + int port, u16 *fid) +{ + return _mv88e6xxx_port_fid(chip, port, NULL, fid); +} + +static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip, + int port, u16 fid) +{ + return _mv88e6xxx_port_fid(chip, port, &fid, NULL); +} + +static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) +{ + DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); + + /* Set every FID bit used by the (un)bridged ports */ + for (i = 0; i < chip->info->num_ports; ++i) { + err = _mv88e6xxx_port_fid_get(chip, i, fid); + if (err) + return err; + + set_bit(*fid, fid_bitmap); + } + + /* Set every FID bit used by the VLAN entries */ + err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); + if (err) + return err; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + return err; + + if (!vlan.valid) + break; + + set_bit(vlan.fid, fid_bitmap); + } while (vlan.vid < GLOBAL_VTU_VID_MASK); + + /* The reset value 0x000 is used to indicate that multiple address + * databases are not needed. Return the next positive available. + */ + *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); + if (unlikely(*fid >= mv88e6xxx_num_databases(chip))) + return -ENOSPC; + + /* Clear the database */ + return _mv88e6xxx_atu_flush(chip, *fid, true); +} + +static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct dsa_switch *ds = chip->ds; + struct mv88e6xxx_vtu_stu_entry vlan = { + .valid = true, + .vid = vid, + }; + int i, err; + + err = _mv88e6xxx_fid_new(chip, &vlan.fid); + if (err) + return err; + + /* exclude all ports except the CPU and DSA ports */ + for (i = 0; i < chip->info->num_ports; ++i) + vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i) + ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED + : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) { + struct mv88e6xxx_vtu_stu_entry vstp; + + /* Adding a VTU entry requires a valid STU entry. As VSTP is not + * implemented, only one STU entry is needed to cover all VTU + * entries. Thus, validate the SID 0. + */ + vlan.sid = 0; + err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp); + if (err) + return err; + + if (vstp.sid != vlan.sid || !vstp.valid) { + memset(&vstp, 0, sizeof(vstp)); + vstp.valid = true; + vstp.sid = vlan.sid; + + err = _mv88e6xxx_stu_loadpurge(chip, &vstp); + if (err) + return err; + } + } + + *entry = vlan; + return 0; +} + +static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry, bool creat) +{ + int err; + + if (!vid) + return -EINVAL; + + err = _mv88e6xxx_vtu_vid_write(chip, vid - 1); + if (err) + return err; + + err = _mv88e6xxx_vtu_getnext(chip, entry); + if (err) + return err; + + if (entry->vid != vid || !entry->valid) { + if (!creat) + return -EOPNOTSUPP; + /* -ENOENT would've been more appropriate, but switchdev expects + * -EOPNOTSUPP to inform bridge about an eventual software VLAN. + */ + + err = _mv88e6xxx_vtu_new(chip, vid, entry); + } + + return err; +} + +static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, + u16 vid_begin, u16 vid_end) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + if (!vid_begin) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + goto unlock; + + if (!vlan.valid) + break; + + if (vlan.vid > vid_end) + break; + + for (i = 0; i < chip->info->num_ports; ++i) { + if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) + continue; + + if (vlan.data[i] == + GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + continue; + + if (chip->ports[i].bridge_dev == + chip->ports[port].bridge_dev) + break; /* same bridge, check next VLAN */ + + netdev_warn(ds->ports[port].netdev, + "hardware VLAN %d already used by %s\n", + vlan.vid, + netdev_name(chip->ports[i].bridge_dev)); + err = -EOPNOTSUPP; + goto unlock; + } + } while (vlan.vid < vid_end); + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static const char * const mv88e6xxx_port_8021q_mode_names[] = { + [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled", + [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback", + [PORT_CONTROL_2_8021Q_CHECK] = "Check", + [PORT_CONTROL_2_8021Q_SECURE] = "Secure", +}; + +static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE : + PORT_CONTROL_2_8021Q_DISABLED; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2); + if (ret < 0) + goto unlock; + + old = ret & PORT_CONTROL_2_8021Q_MASK; + + if (new != old) { + ret &= ~PORT_CONTROL_2_8021Q_MASK; + ret |= new & PORT_CONTROL_2_8021Q_MASK; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2, + ret); + if (ret < 0) + goto unlock; + + netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n", + mv88e6xxx_port_8021q_mode_names[new], + mv88e6xxx_port_8021q_mode_names[old]); + } + + ret = 0; +unlock: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int +mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + /* If the requested port doesn't belong to the same bridge as the VLAN + * members, do not support it (yet) and fallback to software VLAN. + */ + err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin, + vlan->vid_end); + if (err) + return err; + + /* We don't need any dynamic resource from the kernel (yet), + * so skip the prepare phase. + */ + return 0; +} + +static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, + u16 vid, bool untagged) +{ + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true); + if (err) + return err; + + vlan.data[port] = untagged ? + GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : + GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; + + return _mv88e6xxx_vtu_loadpurge(chip, &vlan); +} + +static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + u16 vid; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return; + + mutex_lock(&chip->reg_lock); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) + if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged)) + netdev_err(ds->ports[port].netdev, + "failed to add VLAN %d%c\n", + vid, untagged ? 'u' : 't'); + + if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end)) + netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n", + vlan->vid_end); + + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, + int port, u16 vid) +{ + struct dsa_switch *ds = chip->ds; + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); + if (err) + return err; + + /* Tell switchdev if this VLAN is handled in software */ + if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + return -EOPNOTSUPP; + + vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + /* keep the VLAN unless all ports are excluded */ + vlan.valid = false; + for (i = 0; i < chip->info->num_ports; ++i) { + if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) + continue; + + if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + vlan.valid = true; + break; + } + } + + err = _mv88e6xxx_vtu_loadpurge(chip, &vlan); + if (err) + return err; + + return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false); +} + +static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 pvid, vid; + int err = 0; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + err = _mv88e6xxx_port_pvid_get(chip, port, &pvid); + if (err) + goto unlock; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + err = _mv88e6xxx_port_vlan_del(chip, port, vid); + if (err) + goto unlock; + + if (vid == pvid) { + err = _mv88e6xxx_port_pvid_set(chip, port, 0); + if (err) + goto unlock; + } + } + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip, + const unsigned char *addr) +{ + int i, ret; + + for (i = 0; i < 3; i++) { + ret = _mv88e6xxx_reg_write( + chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, + (addr[i * 2] << 8) | addr[i * 2 + 1]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip, + unsigned char *addr) +{ + int i, ret; + + for (i = 0; i < 3; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_ATU_MAC_01 + i); + if (ret < 0) + return ret; + addr[i * 2] = ret >> 8; + addr[i * 2 + 1] = ret & 0xff; + } + + return 0; +} + +static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + int ret; + + ret = _mv88e6xxx_atu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_write(chip, entry->mac); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_data_write(chip, entry); + if (ret < 0) + return ret; + + return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB); +} + +static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry = { 0 }; + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + /* Null VLAN ID corresponds to the port private database */ + if (vid == 0) + err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid); + else + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); + if (err) + return err; + + entry.fid = vlan.fid; + entry.state = state; + ether_addr_copy(entry.mac, addr); + if (state != GLOBAL_ATU_DATA_STATE_UNUSED) { + entry.trunk = false; + entry.portv_trunkid = BIT(port); + } + + return _mv88e6xxx_atu_load(chip, &entry); +} + +static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + /* We don't need any dynamic resource from the kernel (yet), + * so skip the prepare phase. + */ + return 0; +} + +static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + int state = is_multicast_ether_addr(fdb->addr) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + mutex_lock(&chip->reg_lock); + if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state)) + netdev_err(ds->ports[port].netdev, + "failed to load MAC address\n"); + mutex_unlock(&chip->reg_lock); +} + +static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, + GLOBAL_ATU_DATA_STATE_UNUSED); + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry) +{ + struct mv88e6xxx_atu_entry next = { 0 }; + int ret; + + next.fid = fid; + + ret = _mv88e6xxx_atu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_read(chip, next.mac); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA); + if (ret < 0) + return ret; + + next.state = ret & GLOBAL_ATU_DATA_STATE_MASK; + if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (ret & GLOBAL_ATU_DATA_TRUNK) { + next.trunk = true; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + next.trunk = false; + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + next.portv_trunkid = (ret & mask) >> shift; + } + + *entry = next; + return 0; +} + +static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip, + u16 fid, u16 vid, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_atu_entry addr = { + .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; + int err; + + err = _mv88e6xxx_atu_mac_write(chip, addr.mac); + if (err) + return err; + + do { + err = _mv88e6xxx_atu_getnext(chip, fid, &addr); + if (err) + break; + + if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) + break; + + if (!addr.trunk && addr.portv_trunkid & BIT(port)) { + bool is_static = addr.state == + (is_multicast_ether_addr(addr.mac) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); + + fdb->vid = vid; + ether_addr_copy(fdb->addr, addr.mac); + fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; + + err = cb(&fdb->obj); + if (err) + break; + } + } while (!is_broadcast_ether_addr(addr.mac)); + + return err; +} + +static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan = { + .vid = GLOBAL_VTU_VID_MASK, /* all ones */ + }; + u16 fid; + int err; + + mutex_lock(&chip->reg_lock); + + /* Dump port's default Filtering Information Database (VLAN ID 0) */ + err = _mv88e6xxx_port_fid_get(chip, port, &fid); + if (err) + goto unlock; + + err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb); + if (err) + goto unlock; + + /* Dump VLANs' Filtering Information Databases */ + err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + break; + + if (!vlan.valid) + break; + + err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid, + port, fdb, cb); + if (err) + break; + } while (vlan.vid < GLOBAL_VTU_VID_MASK); + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int i, err = 0; + + mutex_lock(&chip->reg_lock); + + /* Assign the bridge and remap each port's VLANTable */ + chip->ports[port].bridge_dev = bridge; + + for (i = 0; i < chip->info->num_ports; ++i) { + if (chip->ports[i].bridge_dev == bridge) { + err = _mv88e6xxx_port_based_vlan_map(chip, i); + if (err) + break; + } + } + + mutex_unlock(&chip->reg_lock); + + return err; +} + +static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct net_device *bridge = chip->ports[port].bridge_dev; + int i; + + mutex_lock(&chip->reg_lock); + + /* Unassign the bridge and remap each port's VLANTable */ + chip->ports[port].bridge_dev = NULL; + + for (i = 0; i < chip->info->num_ports; ++i) + if (i == port || chip->ports[i].bridge_dev == bridge) + if (_mv88e6xxx_port_based_vlan_map(chip, i)) + netdev_warn(ds->ports[i].netdev, + "failed to remap\n"); + + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip, + int port, int page, int reg, int val) +{ + int ret; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); + if (ret < 0) + goto restore_page_0; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val); +restore_page_0: + mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); + + return ret; +} + +static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip, + int port, int page, int reg) +{ + int ret; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); + if (ret < 0) + goto restore_page_0; + + ret = mv88e6xxx_mdio_read_indirect(chip, port, reg); +restore_page_0: + mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); + + return ret; +} + +static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) +{ + bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE); + u16 is_reset = (ppu_active ? 0x8800 : 0xc800); + struct gpio_desc *gpiod = chip->reset; + unsigned long timeout; + int ret; + int i; + + /* Set all ports to the disabled state. */ + for (i = 0; i < chip->info->num_ports; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL, + ret & 0xfffc); + if (ret) + return ret; + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* If there is a gpio connected to the reset pin, toggle it */ + if (gpiod) { + gpiod_set_value_cansleep(gpiod, 1); + usleep_range(10000, 20000); + gpiod_set_value_cansleep(gpiod, 0); + usleep_range(10000, 20000); + } + + /* Reset the switch. Keep the PPU active if requested. The PPU + * needs to be active to support indirect phy register access + * through global registers 0x18 and 0x19. + */ + if (ppu_active) + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000); + else + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400); + if (ret) + return ret; + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00); + if (ret < 0) + return ret; + + if ((ret & is_reset) == is_reset) + break; + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + ret = -ETIMEDOUT; + else + ret = 0; + + return ret; +} + +static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip) +{ + int ret; + + ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES, + PAGE_FIBER_SERDES, MII_BMCR); + if (ret < 0) + return ret; + + if (ret & BMCR_PDOWN) { + ret &= ~BMCR_PDOWN; + ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES, + PAGE_FIBER_SERDES, MII_BMCR, + ret); + } + + return ret; +} + +static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, + int reg, u16 *val) +{ + int addr = chip->info->port_base_addr + port; + + if (port >= chip->info->num_ports) + return -EINVAL; + + return mv88e6xxx_read(chip, addr, reg, val); +} + +static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) +{ + struct dsa_switch *ds = chip->ds; + int ret; + u16 reg; + + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) || + mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) { + /* MAC Forcing register: don't force link, speed, + * duplex or flow control state to any particular + * values on physical ports, but force the CPU port + * and all DSA ports to their maximum bandwidth and + * full duplex. + */ + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL); + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + reg &= ~PORT_PCS_CTRL_UNFORCED; + reg |= PORT_PCS_CTRL_FORCE_LINK | + PORT_PCS_CTRL_LINK_UP | + PORT_PCS_CTRL_DUPLEX_FULL | + PORT_PCS_CTRL_FORCE_DUPLEX; + if (mv88e6xxx_6065_family(chip)) + reg |= PORT_PCS_CTRL_100; + else + reg |= PORT_PCS_CTRL_1000; + } else { + reg |= PORT_PCS_CTRL_UNFORCED; + } + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_PCS_CTRL, reg); + if (ret) + return ret; + } + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + reg = 0; + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip)) + reg = PORT_CONTROL_IGMP_MLD_SNOOP | + PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | + PORT_CONTROL_STATE_FORWARDING; + if (dsa_is_cpu_port(ds, port)) { + if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) + reg |= PORT_CONTROL_DSA_TAG; + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA | + PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; + } + + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || + mv88e6xxx_6065_family(chip) || + mv88e6xxx_6185_family(chip) || + mv88e6xxx_6320_family(chip)) { + reg |= PORT_CONTROL_EGRESS_ADD_TAG; + } + } + if (dsa_is_dsa_port(ds, port)) { + if (mv88e6xxx_6095_family(chip) || + mv88e6xxx_6185_family(chip)) + reg |= PORT_CONTROL_DSA_TAG; + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + reg |= PORT_CONTROL_FRAME_MODE_DSA; + } + + if (port == dsa_upstream_port(ds)) + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; + } + if (reg) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_CONTROL, reg); + if (ret) + return ret; + } + + /* If this port is connected to a SerDes, make sure the SerDes is not + * powered down. + */ + if (mv88e6xxx_6352_family(chip)) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); + if (ret < 0) + return ret; + ret &= PORT_STATUS_CMODE_MASK; + if ((ret == PORT_STATUS_CMODE_100BASE_X) || + (ret == PORT_STATUS_CMODE_1000BASE_X) || + (ret == PORT_STATUS_CMODE_SGMII)) { + ret = mv88e6xxx_power_on_serdes(chip); + if (ret < 0) + return ret; + } + } + + /* Port Control 2: don't force a good FCS, set the maximum frame size to + * 10240 bytes, disable 802.1q tags checking, don't discard tagged or + * untagged frames on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't send a + * copy of all transmitted/received frames on this port to the CPU. + */ + reg = 0; + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || + mv88e6xxx_6185_family(chip)) + reg = PORT_CONTROL_2_MAP_DA; + + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip)) + reg |= PORT_CONTROL_2_JUMBO_10240; + + if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { + /* Set the upstream port this port should use */ + reg |= dsa_upstream_port(ds); + /* enable forwarding of unknown multicast addresses to + * the upstream port + */ + if (port == dsa_upstream_port(ds)) + reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; + } + + reg |= PORT_CONTROL_2_8021Q_DISABLED; + + if (reg) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_CONTROL_2, reg); + if (ret) + return ret; + } + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + reg = 1 << port; + /* Disable learning for CPU port */ + if (dsa_is_cpu_port(ds, port)) + reg = 0; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR, + reg); + if (ret) + return ret; + + /* Egress rate control 2: disable egress rate control. */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2, + 0x0000); + if (ret) + return ret; + + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + /* Do not limit the period of time that this port can + * be paused for by the remote end or the period of + * time that this port can pause the remote end. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_PAUSE_CTRL, 0x0000); + if (ret) + return ret; + + /* Port ATU control: disable limiting the number of + * address database entries that this port is allowed + * to use. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_ATU_CONTROL, 0x0000); + /* Priority Override: disable DA, SA and VTU priority + * override. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_PRI_OVERRIDE, 0x0000); + if (ret) + return ret; + + /* Port Ethertype: use the Ethertype DSA Ethertype + * value. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_ETH_TYPE, ETH_P_EDSA); + if (ret) + return ret; + /* Tag Remap: use an identity 802.1p prio -> switch + * prio mapping. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_TAG_REGMAP_0123, 0x3210); + if (ret) + return ret; + + /* Tag Remap 2: use an identity 802.1p prio -> switch + * prio mapping. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_TAG_REGMAP_4567, 0x7654); + if (ret) + return ret; + } + + /* Rate Control: disable ingress rate limiting. */ + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_RATE_CONTROL, 0x0001); + if (ret) + return ret; + } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_RATE_CONTROL, 0x0000); + if (ret) + return ret; + } + + /* Port Control 1: disable trunking, disable sending + * learning messages to this port. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1, + 0x0000); + if (ret) + return ret; + + /* Port based VLAN map: give each port the same default address + * database, and allow bidirectional communication between the + * CPU and DSA port(s), and the other ports. + */ + ret = _mv88e6xxx_port_fid_set(chip, port, 0); + if (ret) + return ret; + + ret = _mv88e6xxx_port_based_vlan_map(chip, port); + if (ret) + return ret; + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN, + 0x0000); + if (ret) + return ret; + + return 0; +} + +static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) +{ + int err; + + err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_01, + (addr[0] << 8) | addr[1]); + if (err) + return err; + + err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_23, + (addr[2] << 8) | addr[3]); + if (err) + return err; + + return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_45, + (addr[4] << 8) | addr[5]); +} + +static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip, + unsigned int msecs) +{ + const unsigned int coeff = chip->info->age_time_coeff; + const unsigned int min = 0x01 * coeff; + const unsigned int max = 0xff * coeff; + u8 age_time; + u16 val; + int err; + + if (msecs < min || msecs > max) + return -ERANGE; + + /* Round to nearest multiple of coeff */ + age_time = (msecs + coeff / 2) / coeff; + + err = mv88e6xxx_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, &val); + if (err) + return err; + + /* AgeTime is 11:4 bits */ + val &= ~0xff0; + val |= age_time << 4; + + return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, val); +} + +static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, + unsigned int ageing_time) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_set_age_time(chip, ageing_time); + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) +{ + struct dsa_switch *ds = chip->ds; + u32 upstream_port = dsa_upstream_port(ds); + u16 reg; + int err; + + /* Enable the PHY Polling Unit if present, don't discard any packets, + * and mask all interrupt sources. + */ + reg = 0; + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) || + mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE)) + reg |= GLOBAL_CONTROL_PPU_ENABLE; + + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg); + if (err) + return err; + + /* Configure the upstream port, and configure it as the port to which + * ingress and egress and ARP monitor frames are to be sent. + */ + reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, + reg); + if (err) + return err; + + /* Disable remote management, and set the switch's DSA device number. */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2, + GLOBAL_CONTROL_2_MULTIPLE_CASCADE | + (ds->index & 0x1f)); + if (err) + return err; + + /* Clear all the VTU and STU entries */ + err = _mv88e6xxx_vtu_stu_flush(chip); + if (err < 0) + return err; + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, + GLOBAL_ATU_CONTROL_LEARN2ALL); + if (err) + return err; + + err = mv88e6xxx_g1_set_age_time(chip, 300000); + if (err) + return err; + + /* Clear all ATU entries */ + err = _mv88e6xxx_atu_flush(chip, 0, true); + if (err) + return err; + + /* Configure the IP ToS mapping registers. */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + if (err) + return err; + + /* Configure the IEEE 802.1p priority mapping register. */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + if (err) + return err; + + /* Clear the statistics counters for all ports */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_FLUSH_ALL); + if (err) + return err; + + /* Wait for the flush to complete. */ + err = _mv88e6xxx_stats_wait(chip); + if (err) + return err; + + return 0; +} + +static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, + int target, int port) +{ + u16 val = (target << 8) | (port & 0xf); + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, val); +} + +static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip) +{ + int target, port; + int err; + + /* Initialize the routing port to the 32 possible target devices */ + for (target = 0; target < 32; ++target) { + port = 0xf; + + if (target < DSA_MAX_SWITCHES) { + port = chip->ds->rtable[target]; + if (port == DSA_RTABLE_NONE) + port = 0xf; + } + + err = mv88e6xxx_g2_device_mapping_write(chip, target, port); + if (err) + break; + } + + return err; +} + +static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, + bool hask, u16 mask) +{ + const u16 port_mask = BIT(chip->info->num_ports) - 1; + u16 val = (num << 12) | (mask & port_mask); + + if (hask) + val |= GLOBAL2_TRUNK_MASK_HASK; + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, val); +} + +static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id, + u16 map) +{ + const u16 port_mask = BIT(chip->info->num_ports) - 1; + u16 val = (id << 11) | (map & port_mask); + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, val); +} + +static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip) +{ + const u16 port_mask = BIT(chip->info->num_ports) - 1; + int i, err; + + /* Clear all eight possible Trunk Mask vectors */ + for (i = 0; i < 8; ++i) { + err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask); + if (err) + return err; + } + + /* Clear all sixteen possible Trunk ID routing vectors */ + for (i = 0; i < 16; ++i) { + err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0); + if (err) + return err; + } + + return 0; +} + +static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip) +{ + int port, err; + + /* Init all Ingress Rate Limit resources of all ports */ + for (port = 0; port < chip->info->num_ports; ++port) { + /* XXX newer chips (like 88E6390) have different 2-bit ops */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, + GLOBAL2_IRL_CMD_OP_INIT_ALL | + (port << 8)); + if (err) + break; + + /* Wait for the operation to complete */ + err = _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, + GLOBAL2_IRL_CMD_BUSY); + if (err) + break; + } + + return err; +} + +/* Indirect write to the Switch MAC/WoL/WoF register */ +static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip, + unsigned int pointer, u8 data) +{ + u16 val = (pointer << 8) | data; + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, val); +} + +static int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) +{ + int i, err; + + for (i = 0; i < 6; i++) { + err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]); + if (err) + break; + } + + return err; +} + +static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, + u8 data) +{ + u16 val = (pointer << 8) | (data & 0x7); + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, val); +} + +static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) +{ + int i, err; + + /* Clear all sixteen possible Priority Override entries */ + for (i = 0; i < 16; i++) { + err = mv88e6xxx_g2_pot_write(chip, i, 0); + if (err) + break; + } + + return err; +} + +static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, + GLOBAL2_EEPROM_CMD_BUSY | + GLOBAL2_EEPROM_CMD_RUNNING); +} + +static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) +{ + int err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, cmd); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_wait(chip); +} + +static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip, + u8 addr, u16 *data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_eeprom_cmd(chip, cmd); + if (err) + return err; + + return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); +} + +static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, + u8 addr, u16 data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_cmd(chip, cmd); +} + +static int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) +{ + u16 reg; + int err; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) { + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:2x as MGMT. + */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, + 0xffff); + if (err) + return err; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) { + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:0x as MGMT. + */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, + 0xffff); + if (err) + return err; + } + + /* Ignore removed tag data on doubly tagged packets, disable + * flow control messages, force flow control priority to the + * highest, and send all special multicast frames to the CPU + * port at the highest priority. + */ + reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4); + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) || + mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) + reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7; + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, reg); + if (err) + return err; + + /* Program the DSA routing table. */ + err = mv88e6xxx_g2_set_device_mapping(chip); + if (err) + return err; + + /* Clear all trunk masks and mapping. */ + err = mv88e6xxx_g2_clear_trunk(chip); + if (err) + return err; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) { + /* Disable ingress rate limiting by resetting all per port + * ingress rate limit resources to their initial state. + */ + err = mv88e6xxx_g2_clear_irl(chip); + if (err) + return err; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) { + /* Initialize Cross-chip Port VLAN Table to reset defaults */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR, + GLOBAL2_PVT_ADDR_OP_INIT_ONES); + if (err) + return err; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) { + /* Clear the priority override table. */ + err = mv88e6xxx_g2_clear_pot(chip); + if (err) + return err; + } + + return 0; +} + +static int mv88e6xxx_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + int i; + + chip->ds = ds; + ds->slave_mii_bus = chip->mdio_bus; + + mutex_lock(&chip->reg_lock); + + err = mv88e6xxx_switch_reset(chip); + if (err) + goto unlock; + + /* Setup Switch Port Registers */ + for (i = 0; i < chip->info->num_ports; i++) { + err = mv88e6xxx_setup_port(chip, i); + if (err) + goto unlock; + } + + /* Setup Switch Global 1 Registers */ + err = mv88e6xxx_g1_setup(chip); + if (err) + goto unlock; + + /* Setup Switch Global 2 Registers */ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + err = mv88e6xxx_g2_setup(chip); + if (err) + goto unlock; + } + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + mutex_lock(&chip->reg_lock); + + /* Has an indirect Switch MAC/WoL/WoF register in Global 2? */ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_SWITCH_MAC)) + err = mv88e6xxx_g2_set_switch_mac(chip, addr); + else + err = mv88e6xxx_g1_set_switch_mac(chip, addr); + + mutex_unlock(&chip->reg_lock); + + return err; +} + +#ifdef CONFIG_NET_DSA_HWMON +static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, + int reg) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg); + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, + int reg, int val) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val); + mutex_unlock(&chip->reg_lock); + + return ret; +} +#endif + +static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) +{ + if (port >= 0 && port < chip->info->num_ports) + return port; + return -EINVAL; +} + +static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum) +{ + struct mv88e6xxx_chip *chip = bus->priv; + int addr = mv88e6xxx_port_to_mdio_addr(chip, port); + int ret; + + if (addr < 0) + return 0xffff; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum); + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) + ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum); + else + ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum); + + mutex_unlock(&chip->reg_lock); + return ret; +} + +static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum, + u16 val) +{ + struct mv88e6xxx_chip *chip = bus->priv; + int addr = mv88e6xxx_port_to_mdio_addr(chip, port); + int ret; + + if (addr < 0) + return 0xffff; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val); + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) + ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val); + else + ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val); + + mutex_unlock(&chip->reg_lock); + return ret; +} + +static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, + struct device_node *np) +{ + static int index; + struct mii_bus *bus; + int err; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + mv88e6xxx_ppu_state_init(chip); + + if (np) + chip->mdio_np = of_get_child_by_name(np, "mdio"); + + bus = devm_mdiobus_alloc(chip->dev); + if (!bus) + return -ENOMEM; + + bus->priv = (void *)chip; + if (np) { + bus->name = np->full_name; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); + } else { + bus->name = "mv88e6xxx SMI"; + snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++); + } + + bus->read = mv88e6xxx_mdio_read; + bus->write = mv88e6xxx_mdio_write; + bus->parent = chip->dev; + + if (chip->mdio_np) + err = of_mdiobus_register(bus, chip->mdio_np); + else + err = mdiobus_register(bus); + if (err) { + dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); + goto out; + } + chip->mdio_bus = bus; + + return 0; + +out: + if (chip->mdio_np) + of_node_put(chip->mdio_np); + + return err; +} + +static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) + +{ + struct mii_bus *bus = chip->mdio_bus; + + mdiobus_unregister(bus); + + if (chip->mdio_np) + of_node_put(chip->mdio_np); +} + +#ifdef CONFIG_NET_DSA_HWMON + +static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + int val; + + *temp = 0; + + mutex_lock(&chip->reg_lock); + + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); + if (ret < 0) + goto error; + + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5)); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5)); + if (ret < 0) + goto error; + + *temp = ((val & 0x1f) - 5) * 5; + +error: + mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0); + mutex_unlock(&chip->reg_lock); + return ret; +} + +static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + *temp = 0; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27); + if (ret < 0) + return ret; + + *temp = (ret & 0xff) - 25; + + return 0; +} + +static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP)) + return -EOPNOTSUPP; + + if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) + return mv88e63xx_get_temp(ds, temp); + + return mv88e61xx_get_temp(ds, temp); +} + +static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) + return -EOPNOTSUPP; + + *temp = 0; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *temp = (((ret >> 8) & 0x1f) * 5) - 25; + + return 0; +} + +static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) + return -EOPNOTSUPP; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + return mv88e6xxx_mdio_page_write(ds, phy, 6, 26, + (ret & 0xe0ff) | (temp << 8)); +} + +static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) + return -EOPNOTSUPP; + + *alarm = false; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *alarm = !!(ret & 0x40); + + return 0; +} +#endif /* CONFIG_NET_DSA_HWMON */ + +static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + return chip->eeprom_len; +} + +static int mv88e6xxx_get_eeprom16(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + u16 val; + int err; + + eeprom->len = 0; + + if (offset & 1) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + *data++ = (val >> 8) & 0xff; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + *data++ = val & 0xff; + *data++ = (val >> 8) & 0xff; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + *data++ = val & 0xff; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16)) + err = mv88e6xxx_get_eeprom16(chip, eeprom, data); + else + err = -EOPNOTSUPP; + + mutex_unlock(&chip->reg_lock); + + if (err) + return err; + + eeprom->magic = 0xc3ec4951; + + return 0; +} + +static int mv88e6xxx_set_eeprom16(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + u16 val; + int err; + + /* Ensure the RO WriteEn bit is set */ + err = mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, &val); + if (err) + return err; + + if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN)) + return -EROFS; + + eeprom->len = 0; + + if (offset & 1) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + val = (*data++ << 8) | (val & 0xff); + + err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val); + if (err) + return err; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + val = *data++; + val |= *data++ << 8; + + err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val); + if (err) + return err; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + val = (val & 0xff00) | *data++; + + err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val); + if (err) + return err; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + if (eeprom->magic != 0xc3ec4951) + return -EINVAL; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16)) + err = mv88e6xxx_set_eeprom16(chip, eeprom, data); + else + err = -EOPNOTSUPP; + + mutex_unlock(&chip->reg_lock); + + return err; +} + +static const struct mv88e6xxx_info mv88e6xxx_table[] = { + [MV88E6085] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, + .family = MV88E6XXX_FAMILY_6097, + .name = "Marvell 88E6085", + .num_databases = 4096, + .num_ports = 10, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6097, + }, + + [MV88E6095] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, + .family = MV88E6XXX_FAMILY_6095, + .name = "Marvell 88E6095/88E6095F", + .num_databases = 256, + .num_ports = 11, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6095, + }, + + [MV88E6123] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6123", + .num_databases = 4096, + .num_ports = 3, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6131] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, + .family = MV88E6XXX_FAMILY_6185, + .name = "Marvell 88E6131", + .num_databases = 256, + .num_ports = 8, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, + }, + + [MV88E6161] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6161", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6165] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6165", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6171] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6171", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6172] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6172", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6175] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6175", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6176] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6176", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6185] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, + .family = MV88E6XXX_FAMILY_6185, + .name = "Marvell 88E6185", + .num_databases = 256, + .num_ports = 10, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, + }, + + [MV88E6240] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6240", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6320] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, + .family = MV88E6XXX_FAMILY_6320, + .name = "Marvell 88E6320", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, + }, + + [MV88E6321] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, + .family = MV88E6XXX_FAMILY_6320, + .name = "Marvell 88E6321", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, + }, + + [MV88E6350] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6350", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6351] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6351", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6352] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6352", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, +}; + +static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i) + if (mv88e6xxx_table[i].prod_num == prod_num) + return &mv88e6xxx_table[i]; + + return NULL; +} + +static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip) +{ + const struct mv88e6xxx_info *info; + unsigned int prod_num, rev; + u16 id; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_read(chip, 0, PORT_SWITCH_ID, &id); + mutex_unlock(&chip->reg_lock); + if (err) + return err; + + prod_num = (id & 0xfff0) >> 4; + rev = id & 0x000f; + + info = mv88e6xxx_lookup_info(prod_num); + if (!info) + return -ENODEV; + + /* Update the compatible info with the probed one */ + chip->info = info; + + dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n", + chip->info->prod_num, chip->info->name, rev); + + return 0; +} + +static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) +{ + struct mv88e6xxx_chip *chip; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return NULL; + + chip->dev = dev; + + mutex_init(&chip->reg_lock); + + return chip; +} + +static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int sw_addr) +{ + /* ADDR[0] pin is unavailable externally and considered zero */ + if (sw_addr & 0x1) + return -EINVAL; + + if (sw_addr == 0) + chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP)) + chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; + else + return -EINVAL; + + chip->bus = bus; + chip->sw_addr = sw_addr; + + return 0; +} + +static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) +{ + struct mv88e6xxx_chip *chip; + struct mii_bus *bus; + int err; + + bus = dsa_host_dev_to_mii_bus(host_dev); + if (!bus) + return NULL; + + chip = mv88e6xxx_alloc_chip(dsa_dev); + if (!chip) + return NULL; + + /* Legacy SMI probing will only support chips similar to 88E6085 */ + chip->info = &mv88e6xxx_table[MV88E6085]; + + err = mv88e6xxx_smi_init(chip, bus, sw_addr); + if (err) + goto free; + + err = mv88e6xxx_detect(chip); + if (err) + goto free; + + err = mv88e6xxx_mdio_register(chip, NULL); + if (err) + goto free; + + *priv = chip; + + return chip->info->name; +free: + devm_kfree(dsa_dev, chip); + + return NULL; +} + +static struct dsa_switch_driver mv88e6xxx_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .probe = mv88e6xxx_drv_probe, + .setup = mv88e6xxx_setup, + .set_addr = mv88e6xxx_set_addr, + .adjust_link = mv88e6xxx_adjust_link, + .get_strings = mv88e6xxx_get_strings, + .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, + .get_sset_count = mv88e6xxx_get_sset_count, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, +#endif + .get_eeprom_len = mv88e6xxx_get_eeprom_len, + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, + .set_ageing_time = mv88e6xxx_set_ageing_time, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, + .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, + .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, +}; + +static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, + struct device_node *np) +{ + struct device *dev = chip->dev; + struct dsa_switch *ds; + + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + if (!ds) + return -ENOMEM; + + ds->dev = dev; + ds->priv = chip; + ds->drv = &mv88e6xxx_switch_driver; + + dev_set_drvdata(dev, ds); + + return dsa_register_switch(ds, np); +} + +static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) +{ + dsa_unregister_switch(chip->ds); +} + +static int mv88e6xxx_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct device_node *np = dev->of_node; + const struct mv88e6xxx_info *compat_info; + struct mv88e6xxx_chip *chip; + u32 eeprom_len; + int err; + + compat_info = of_device_get_match_data(dev); + if (!compat_info) + return -EINVAL; + + chip = mv88e6xxx_alloc_chip(dev); + if (!chip) + return -ENOMEM; + + chip->info = compat_info; + + err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); + if (err) + return err; + + err = mv88e6xxx_detect(chip); + if (err) + return err; + + chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(chip->reset)) + return PTR_ERR(chip->reset); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16) && + !of_property_read_u32(np, "eeprom-length", &eeprom_len)) + chip->eeprom_len = eeprom_len; + + err = mv88e6xxx_mdio_register(chip, np); + if (err) + return err; + + err = mv88e6xxx_register_switch(chip, np); + if (err) { + mv88e6xxx_mdio_unregister(chip); + return err; + } + + return 0; +} + +static void mv88e6xxx_remove(struct mdio_device *mdiodev) +{ + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + mv88e6xxx_unregister_switch(chip); + mv88e6xxx_mdio_unregister(chip); +} + +static const struct of_device_id mv88e6xxx_of_match[] = { + { + .compatible = "marvell,mv88e6085", + .data = &mv88e6xxx_table[MV88E6085], + }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match); + +static struct mdio_driver mv88e6xxx_driver = { + .probe = mv88e6xxx_probe, + .remove = mv88e6xxx_remove, + .mdiodrv.driver = { + .name = "mv88e6085", + .of_match_table = mv88e6xxx_of_match, + }, +}; + +static int __init mv88e6xxx_init(void) +{ + register_switch_driver(&mv88e6xxx_switch_driver); + return mdio_driver_register(&mv88e6xxx_driver); +} +module_init(mv88e6xxx_init); + +static void __exit mv88e6xxx_cleanup(void) +{ + mdio_driver_unregister(&mv88e6xxx_driver); + unregister_switch_driver(&mv88e6xxx_switch_driver); +} +module_exit(mv88e6xxx_cleanup); + +MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); +MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h new file mode 100644 index 000000000..48d6ea77f --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -0,0 +1,678 @@ +/* + * Marvell 88e6xxx common definitions + * + * Copyright (c) 2008 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __MV88E6XXX_H +#define __MV88E6XXX_H + +#include <linux/if_vlan.h> +#include <linux/gpio/consumer.h> + +#ifndef UINT64_MAX +#define UINT64_MAX (u64)(~((u64)0)) +#endif + +#define SMI_CMD 0x00 +#define SMI_CMD_BUSY BIT(15) +#define SMI_CMD_CLAUSE_22 BIT(12) +#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22) +#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22) +#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY) +#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY) +#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY) +#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) +#define SMI_DATA 0x01 + +/* Fiber/SERDES Registers are located at SMI address F, page 1 */ +#define REG_FIBER_SERDES 0x0f +#define PAGE_FIBER_SERDES 0x01 + +#define REG_PORT(p) (0x10 + (p)) +#define PORT_STATUS 0x00 +#define PORT_STATUS_PAUSE_EN BIT(15) +#define PORT_STATUS_MY_PAUSE BIT(14) +#define PORT_STATUS_HD_FLOW BIT(13) +#define PORT_STATUS_PHY_DETECT BIT(12) +#define PORT_STATUS_LINK BIT(11) +#define PORT_STATUS_DUPLEX BIT(10) +#define PORT_STATUS_SPEED_MASK 0x0300 +#define PORT_STATUS_SPEED_10 0x0000 +#define PORT_STATUS_SPEED_100 0x0100 +#define PORT_STATUS_SPEED_1000 0x0200 +#define PORT_STATUS_EEE BIT(6) /* 6352 */ +#define PORT_STATUS_AM_DIS BIT(6) /* 6165 */ +#define PORT_STATUS_MGMII BIT(6) /* 6185 */ +#define PORT_STATUS_TX_PAUSED BIT(5) +#define PORT_STATUS_FLOW_CTRL BIT(4) +#define PORT_STATUS_CMODE_MASK 0x0f +#define PORT_STATUS_CMODE_100BASE_X 0x8 +#define PORT_STATUS_CMODE_1000BASE_X 0x9 +#define PORT_STATUS_CMODE_SGMII 0xa +#define PORT_PCS_CTRL 0x01 +#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) +#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) +#define PORT_PCS_CTRL_FC BIT(7) +#define PORT_PCS_CTRL_FORCE_FC BIT(6) +#define PORT_PCS_CTRL_LINK_UP BIT(5) +#define PORT_PCS_CTRL_FORCE_LINK BIT(4) +#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3) +#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2) +#define PORT_PCS_CTRL_10 0x00 +#define PORT_PCS_CTRL_100 0x01 +#define PORT_PCS_CTRL_1000 0x02 +#define PORT_PCS_CTRL_UNFORCED 0x03 +#define PORT_PAUSE_CTRL 0x02 +#define PORT_SWITCH_ID 0x03 +#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a +#define PORT_SWITCH_ID_PROD_NUM_6095 0x095 +#define PORT_SWITCH_ID_PROD_NUM_6131 0x106 +#define PORT_SWITCH_ID_PROD_NUM_6320 0x115 +#define PORT_SWITCH_ID_PROD_NUM_6123 0x121 +#define PORT_SWITCH_ID_PROD_NUM_6161 0x161 +#define PORT_SWITCH_ID_PROD_NUM_6165 0x165 +#define PORT_SWITCH_ID_PROD_NUM_6171 0x171 +#define PORT_SWITCH_ID_PROD_NUM_6172 0x172 +#define PORT_SWITCH_ID_PROD_NUM_6175 0x175 +#define PORT_SWITCH_ID_PROD_NUM_6176 0x176 +#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7 +#define PORT_SWITCH_ID_PROD_NUM_6240 0x240 +#define PORT_SWITCH_ID_PROD_NUM_6321 0x310 +#define PORT_SWITCH_ID_PROD_NUM_6352 0x352 +#define PORT_SWITCH_ID_PROD_NUM_6350 0x371 +#define PORT_SWITCH_ID_PROD_NUM_6351 0x375 +#define PORT_CONTROL 0x04 +#define PORT_CONTROL_USE_CORE_TAG BIT(15) +#define PORT_CONTROL_DROP_ON_LOCK BIT(14) +#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12) +#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12) +#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12) +#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12) +#define PORT_CONTROL_HEADER BIT(11) +#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10) +#define PORT_CONTROL_DOUBLE_TAG BIT(9) +#define PORT_CONTROL_FRAME_MODE_NORMAL (0x0 << 8) +#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8) +#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8) +#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8) +#define PORT_CONTROL_DSA_TAG BIT(8) +#define PORT_CONTROL_VLAN_TUNNEL BIT(7) +#define PORT_CONTROL_TAG_IF_BOTH BIT(6) +#define PORT_CONTROL_USE_IP BIT(5) +#define PORT_CONTROL_USE_TAG BIT(4) +#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3) +#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2) +#define PORT_CONTROL_STATE_MASK 0x03 +#define PORT_CONTROL_STATE_DISABLED 0x00 +#define PORT_CONTROL_STATE_BLOCKING 0x01 +#define PORT_CONTROL_STATE_LEARNING 0x02 +#define PORT_CONTROL_STATE_FORWARDING 0x03 +#define PORT_CONTROL_1 0x05 +#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0) +#define PORT_BASE_VLAN 0x06 +#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12) +#define PORT_DEFAULT_VLAN 0x07 +#define PORT_DEFAULT_VLAN_MASK 0xfff +#define PORT_CONTROL_2 0x08 +#define PORT_CONTROL_2_IGNORE_FCS BIT(15) +#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14) +#define PORT_CONTROL_2_SA_PRIO_OVERRIDE BIT(13) +#define PORT_CONTROL_2_DA_PRIO_OVERRIDE BIT(12) +#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12) +#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12) +#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12) +#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10) +#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10) +#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10) +#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10) +#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10) +#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9) +#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8) +#define PORT_CONTROL_2_MAP_DA BIT(7) +#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6) +#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6) +#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5) +#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4) +#define PORT_RATE_CONTROL 0x09 +#define PORT_RATE_CONTROL_2 0x0a +#define PORT_ASSOC_VECTOR 0x0b +#define PORT_ASSOC_VECTOR_HOLD_AT_1 BIT(15) +#define PORT_ASSOC_VECTOR_INT_AGE_OUT BIT(14) +#define PORT_ASSOC_VECTOR_LOCKED_PORT BIT(13) +#define PORT_ASSOC_VECTOR_IGNORE_WRONG BIT(12) +#define PORT_ASSOC_VECTOR_REFRESH_LOCKED BIT(11) +#define PORT_ATU_CONTROL 0x0c +#define PORT_PRI_OVERRIDE 0x0d +#define PORT_ETH_TYPE 0x0f +#define PORT_IN_DISCARD_LO 0x10 +#define PORT_IN_DISCARD_HI 0x11 +#define PORT_IN_FILTERED 0x12 +#define PORT_OUT_FILTERED 0x13 +#define PORT_TAG_REGMAP_0123 0x18 +#define PORT_TAG_REGMAP_4567 0x19 + +#define REG_GLOBAL 0x1b +#define GLOBAL_STATUS 0x00 +#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */ +/* Two bits for 6165, 6185 etc */ +#define GLOBAL_STATUS_PPU_MASK (0x3 << 14) +#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14) +#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14) +#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14) +#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14) +#define GLOBAL_MAC_01 0x01 +#define GLOBAL_MAC_23 0x02 +#define GLOBAL_MAC_45 0x03 +#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID_MASK 0xfff +#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_SID_MASK 0x3f +#define GLOBAL_CONTROL 0x04 +#define GLOBAL_CONTROL_SW_RESET BIT(15) +#define GLOBAL_CONTROL_PPU_ENABLE BIT(14) +#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */ +#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */ +#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */ +#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */ +#define GLOBAL_CONTROL_DEVICE_EN BIT(7) +#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6) +#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5) +#define GLOBAL_CONTROL_VTU_DONE_EN BIT(4) +#define GLOBAL_CONTROL_ATU_PROBLEM_EN BIT(3) +#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2) +#define GLOBAL_CONTROL_TCAM_EN BIT(1) +#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0) +#define GLOBAL_VTU_OP 0x05 +#define GLOBAL_VTU_OP_BUSY BIT(15) +#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_VID 0x06 +#define GLOBAL_VTU_VID_MASK 0xfff +#define GLOBAL_VTU_VID_VALID BIT(12) +#define GLOBAL_VTU_DATA_0_3 0x07 +#define GLOBAL_VTU_DATA_4_7 0x08 +#define GLOBAL_VTU_DATA_8_11 0x09 +#define GLOBAL_VTU_STU_DATA_MASK 0x03 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01 +#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02 +#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03 +#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00 +#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01 +#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02 +#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03 +#define GLOBAL_ATU_CONTROL 0x0a +#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3) +#define GLOBAL_ATU_OP 0x0b +#define GLOBAL_ATU_OP_BUSY BIT(15) +#define GLOBAL_ATU_OP_NOP (0 << 12) +#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_DATA 0x0c +#define GLOBAL_ATU_DATA_TRUNK BIT(15) +#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0 +#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4 +#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0 +#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4 +#define GLOBAL_ATU_DATA_STATE_MASK 0x0f +#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00 +#define GLOBAL_ATU_DATA_STATE_UC_MGMT 0x0d +#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e +#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER 0x0f +#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE 0x05 +#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07 +#define GLOBAL_ATU_DATA_STATE_MC_MGMT 0x0e +#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER 0x0f +#define GLOBAL_ATU_MAC_01 0x0d +#define GLOBAL_ATU_MAC_23 0x0e +#define GLOBAL_ATU_MAC_45 0x0f +#define GLOBAL_IP_PRI_0 0x10 +#define GLOBAL_IP_PRI_1 0x11 +#define GLOBAL_IP_PRI_2 0x12 +#define GLOBAL_IP_PRI_3 0x13 +#define GLOBAL_IP_PRI_4 0x14 +#define GLOBAL_IP_PRI_5 0x15 +#define GLOBAL_IP_PRI_6 0x16 +#define GLOBAL_IP_PRI_7 0x17 +#define GLOBAL_IEEE_PRI 0x18 +#define GLOBAL_CORE_TAG_TYPE 0x19 +#define GLOBAL_MONITOR_CONTROL 0x1a +#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12 +#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8 +#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4 +#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0 +#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0) +#define GLOBAL_CONTROL_2 0x1c +#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000 +#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000 + +#define GLOBAL_STATS_OP 0x1d +#define GLOBAL_STATS_OP_BUSY BIT(15) +#define GLOBAL_STATS_OP_NOP (0 << 12) +#define GLOBAL_STATS_OP_FLUSH_ALL ((1 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_FLUSH_PORT ((2 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_READ_CAPTURED ((4 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_CAPTURE_PORT ((5 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_BANK_1 BIT(9) +#define GLOBAL_STATS_COUNTER_32 0x1e +#define GLOBAL_STATS_COUNTER_01 0x1f + +#define REG_GLOBAL2 0x1c +#define GLOBAL2_INT_SOURCE 0x00 +#define GLOBAL2_INT_MASK 0x01 +#define GLOBAL2_MGMT_EN_2X 0x02 +#define GLOBAL2_MGMT_EN_0X 0x03 +#define GLOBAL2_FLOW_CONTROL 0x04 +#define GLOBAL2_SWITCH_MGMT 0x05 +#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15) +#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14) +#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13) +#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7) +#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3) +#define GLOBAL2_DEVICE_MAPPING 0x06 +#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15) +#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8 +#define GLOBAL2_DEVICE_MAPPING_PORT_MASK 0x0f +#define GLOBAL2_TRUNK_MASK 0x07 +#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15) +#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12 +#define GLOBAL2_TRUNK_MASK_HASK BIT(11) +#define GLOBAL2_TRUNK_MAPPING 0x08 +#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15) +#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11 +#define GLOBAL2_IRL_CMD 0x09 +#define GLOBAL2_IRL_CMD_BUSY BIT(15) +#define GLOBAL2_IRL_CMD_OP_INIT_ALL ((0x001 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_CMD_OP_INIT_SEL ((0x010 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_CMD_OP_WRITE_SEL ((0x011 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_CMD_OP_READ_SEL ((0x100 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_DATA 0x0a +#define GLOBAL2_PVT_ADDR 0x0b +#define GLOBAL2_PVT_ADDR_BUSY BIT(15) +#define GLOBAL2_PVT_ADDR_OP_INIT_ONES ((0x01 << 12) | GLOBAL2_PVT_ADDR_BUSY) +#define GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN ((0x03 << 12) | GLOBAL2_PVT_ADDR_BUSY) +#define GLOBAL2_PVT_ADDR_OP_READ ((0x04 << 12) | GLOBAL2_PVT_ADDR_BUSY) +#define GLOBAL2_PVT_DATA 0x0c +#define GLOBAL2_SWITCH_MAC 0x0d +#define GLOBAL2_ATU_STATS 0x0e +#define GLOBAL2_PRIO_OVERRIDE 0x0f +#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7) +#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4 +#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3) +#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0 +#define GLOBAL2_EEPROM_CMD 0x14 +#define GLOBAL2_EEPROM_CMD_BUSY BIT(15) +#define GLOBAL2_EEPROM_CMD_OP_WRITE ((0x3 << 12) | GLOBAL2_EEPROM_CMD_BUSY) +#define GLOBAL2_EEPROM_CMD_OP_READ ((0x4 << 12) | GLOBAL2_EEPROM_CMD_BUSY) +#define GLOBAL2_EEPROM_CMD_OP_LOAD ((0x6 << 12) | GLOBAL2_EEPROM_CMD_BUSY) +#define GLOBAL2_EEPROM_CMD_RUNNING BIT(11) +#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10) +#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff +#define GLOBAL2_EEPROM_DATA 0x15 +#define GLOBAL2_PTP_AVB_OP 0x16 +#define GLOBAL2_PTP_AVB_DATA 0x17 +#define GLOBAL2_SMI_OP 0x18 +#define GLOBAL2_SMI_OP_BUSY BIT(15) +#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12) +#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \ + GLOBAL2_SMI_OP_CLAUSE_22) +#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \ + GLOBAL2_SMI_OP_CLAUSE_22) +#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY) +#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY) +#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY) +#define GLOBAL2_SMI_DATA 0x19 +#define GLOBAL2_SCRATCH_MISC 0x1a +#define GLOBAL2_SCRATCH_BUSY BIT(15) +#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8 +#define GLOBAL2_SCRATCH_VALUE_MASK 0xff +#define GLOBAL2_WDOG_CONTROL 0x1b +#define GLOBAL2_QOS_WEIGHT 0x1c +#define GLOBAL2_MISC 0x1d + +#define MV88E6XXX_N_FID 4096 + +/* List of supported models */ +enum mv88e6xxx_model { + MV88E6085, + MV88E6095, + MV88E6123, + MV88E6131, + MV88E6161, + MV88E6165, + MV88E6171, + MV88E6172, + MV88E6175, + MV88E6176, + MV88E6185, + MV88E6240, + MV88E6320, + MV88E6321, + MV88E6350, + MV88E6351, + MV88E6352, +}; + +enum mv88e6xxx_family { + MV88E6XXX_FAMILY_NONE, + MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */ + MV88E6XXX_FAMILY_6095, /* 6092 6095 */ + MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */ + MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ + MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ + MV88E6XXX_FAMILY_6320, /* 6320 6321 */ + MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ + MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ +}; + +enum mv88e6xxx_cap { + /* Energy Efficient Ethernet. + */ + MV88E6XXX_CAP_EEE, + + /* Switch Global 2 Registers. + * The device contains a second set of global 16-bit registers. + */ + MV88E6XXX_CAP_GLOBAL2, + MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ + MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ + MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */ + MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */ + MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */ + MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */ + MV88E6XXX_CAP_G2_SWITCH_MAC, /* (0x0d) Switch MAC/WoL/WoF */ + MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ + MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */ + MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */ + + /* Multi-chip Addressing Mode. + * Some chips require an indirect SMI access when their SMI device + * address is not zero. See SMI_CMD and SMI_DATA. + */ + MV88E6XXX_CAP_MULTI_CHIP, + + /* PHY Polling Unit. + * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. + */ + MV88E6XXX_CAP_PPU, + MV88E6XXX_CAP_PPU_ACTIVE, + + /* SMI PHY Command and Data registers. + * This requires an indirect access to PHY registers through + * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done. + */ + MV88E6XXX_CAP_SMI_PHY, + + /* Per VLAN Spanning Tree Unit (STU). + * The Port State database, if present, is accessed through VTU + * operations and dedicated SID registers. See GLOBAL_VTU_SID. + */ + MV88E6XXX_CAP_STU, + + /* Internal temperature sensor. + * Available from any enabled port's PHY register 26, page 6. + */ + MV88E6XXX_CAP_TEMP, + MV88E6XXX_CAP_TEMP_LIMIT, + + /* VLAN Table Unit. + * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP. + */ + MV88E6XXX_CAP_VTU, +}; + +/* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) +#define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2) +#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X) +#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X) +#define MV88E6XXX_FLAG_G2_IRL_CMD BIT(MV88E6XXX_CAP_G2_IRL_CMD) +#define MV88E6XXX_FLAG_G2_IRL_DATA BIT(MV88E6XXX_CAP_G2_IRL_DATA) +#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT(MV88E6XXX_CAP_G2_PVT_ADDR) +#define MV88E6XXX_FLAG_G2_PVT_DATA BIT(MV88E6XXX_CAP_G2_PVT_DATA) +#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT(MV88E6XXX_CAP_G2_SWITCH_MAC) +#define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT) +#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD) +#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA) +#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP) +#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) +#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) +#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) +#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU) +#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) +#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) +#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU) + +/* EEPROM Programming via Global2 with 16-bit data */ +#define MV88E6XXX_FLAGS_EEPROM16 \ + (MV88E6XXX_FLAG_G2_EEPROM_CMD | \ + MV88E6XXX_FLAG_G2_EEPROM_DATA) + +/* Ingress Rate Limit unit */ +#define MV88E6XXX_FLAGS_IRL \ + (MV88E6XXX_FLAG_G2_IRL_CMD | \ + MV88E6XXX_FLAG_G2_IRL_DATA) + +/* Cross-chip Port VLAN Table */ +#define MV88E6XXX_FLAGS_PVT \ + (MV88E6XXX_FLAG_G2_PVT_ADDR | \ + MV88E6XXX_FLAG_G2_PVT_DATA) + +#define MV88E6XXX_FLAGS_FAMILY_6095 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VTU) + +#define MV88E6XXX_FLAGS_FAMILY_6097 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6165 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6185 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VTU) + +#define MV88E6XXX_FLAGS_FAMILY_6320 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_TEMP_LIMIT | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_EEPROM16 | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6351 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6352 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_TEMP_LIMIT | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_EEPROM16 | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +struct mv88e6xxx_info { + enum mv88e6xxx_family family; + u16 prod_num; + const char *name; + unsigned int num_databases; + unsigned int num_ports; + unsigned int port_base_addr; + unsigned int age_time_coeff; + unsigned long flags; +}; + +struct mv88e6xxx_atu_entry { + u16 fid; + u8 state; + bool trunk; + u16 portv_trunkid; + u8 mac[ETH_ALEN]; +}; + +struct mv88e6xxx_vtu_stu_entry { + /* VTU only */ + u16 vid; + u16 fid; + + /* VTU and STU */ + u8 sid; + bool valid; + u8 data[DSA_MAX_PORTS]; +}; + +struct mv88e6xxx_ops; + +struct mv88e6xxx_priv_port { + struct net_device *bridge_dev; +}; + +struct mv88e6xxx_chip { + const struct mv88e6xxx_info *info; + + /* The dsa_switch this private structure is related to */ + struct dsa_switch *ds; + + /* The device this structure is associated to */ + struct device *dev; + + /* This mutex protects the access to the switch registers */ + struct mutex reg_lock; + + /* The MII bus and the address on the bus that is used to + * communication with the switch + */ + const struct mv88e6xxx_ops *smi_ops; + struct mii_bus *bus; + int sw_addr; + + /* Handles automatic disabling and re-enabling of the PHY + * polling unit. + */ + struct mutex ppu_mutex; + int ppu_disabled; + struct work_struct ppu_work; + struct timer_list ppu_timer; + + /* This mutex serialises access to the statistics unit. + * Hold this mutex over snapshot + dump sequences. + */ + struct mutex stats_mutex; + + struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; + + /* A switch may have a GPIO line tied to its reset pin. Parse + * this from the device tree, and use it before performing + * switch soft reset. + */ + struct gpio_desc *reset; + + /* set to size of eeprom if supported by the switch */ + int eeprom_len; + + /* Device node for the MDIO bus */ + struct device_node *mdio_np; + + /* And the MDIO bus itself */ + struct mii_bus *mdio_bus; +}; + +struct mv88e6xxx_ops { + int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); + int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); +}; + +enum stat_type { + BANK0, + BANK1, + PORT, +}; + +struct mv88e6xxx_hw_stat { + char string[ETH_GSTRING_LEN]; + int sizeof_stat; + int reg; + enum stat_type type; +}; + +static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, + unsigned long flags) +{ + return (chip->info->flags & flags) == flags; +} + +#endif |