diff options
Diffstat (limited to 'drivers/net')
746 files changed, 64243 insertions, 20140 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 70dac7302..9599ed6f1 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -152,7 +152,7 @@ module_param(lacp_rate, charp, 0); MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " "0 for slow, 1 for fast"); module_param(ad_select, charp, 0); -MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " +MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; " "0 for stable (default), 1 for bandwidth, " "2 for count"); module_param(min_links, int, 0); @@ -4148,6 +4148,8 @@ static const struct net_device_ops bond_netdev_ops = { .ndo_add_slave = bond_enslave, .ndo_del_slave = bond_release, .ndo_fix_features = bond_fix_features, + .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, + .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, @@ -4618,26 +4620,6 @@ static int bond_check_params(struct bond_params *params) return 0; } -static struct lock_class_key bonding_netdev_xmit_lock_key; -static struct lock_class_key bonding_netdev_addr_lock_key; -static struct lock_class_key bonding_tx_busylock_key; - -static void bond_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, - &bonding_netdev_xmit_lock_key); -} - -static void bond_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, - &bonding_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL); - dev->qdisc_tx_busylock = &bonding_tx_busylock_key; -} - /* Called from registration process */ static int bond_init(struct net_device *bond_dev) { @@ -4650,7 +4632,7 @@ static int bond_init(struct net_device *bond_dev) if (!bond->wq) return -ENOMEM; - bond_set_lockdep_class(bond_dev); + netdev_lockdep_set_classes(bond_dev); list_add_tail(&bond->bond_list, &bn->dev_list); diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index 547098086..f81df91a9 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -52,5 +52,5 @@ config CAIF_VIRTIO The caif driver for CAIF over Virtio. if CAIF_VIRTIO -source "drivers/vhost/Kconfig" +source "drivers/vhost/Kconfig.vringh" endif diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 615c65da3..ddabce759 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -1201,7 +1201,7 @@ static int cfhsi_open(struct net_device *ndev) clear_bit(CFHSI_AWAKE, &cfhsi->bits); /* Create work thread. */ - cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name); + cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); if (!cfhsi->wq) { netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", __func__); @@ -1267,9 +1267,6 @@ static int cfhsi_close(struct net_device *ndev) /* going to shutdown driver */ set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - /* Flush workqueue */ - flush_workqueue(cfhsi->wq); - /* Delete timers if pending */ del_timer_sync(&cfhsi->inactivity_timer); del_timer_sync(&cfhsi->rx_slowpath_timer); diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 4721948a9..3a529fbe5 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -185,8 +185,8 @@ static ssize_t print_frame(char *buf, size_t size, char *frm, /* Fast forward. */ i = count - cut; len += snprintf((buf + len), (size - len), - "--- %u bytes skipped ---\n", - (int)(count - (cut * 2))); + "--- %zu bytes skipped ---\n", + count - (cut * 2)); } if ((!(i % 10)) && i) { diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 0d40aef92..22570ea3a 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -104,16 +104,6 @@ config CAN_JANZ_ICAN3 This driver can also be built as a module. If so, the module will be called janz-ican3.ko. -config CAN_RCAR - tristate "Renesas R-Car CAN controller" - depends on ARCH_RENESAS || ARM - ---help--- - Say Y here if you want to use CAN controller found on Renesas R-Car - SoCs. - - To compile this driver as a module, choose M here: the module will - be called rcar_can. - config CAN_SUN4I tristate "Allwinner A10 CAN controller" depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST @@ -152,6 +142,7 @@ source "drivers/net/can/cc770/Kconfig" source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" +source "drivers/net/can/rcar/Kconfig" source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/softing/Kconfig" source "drivers/net/can/spi/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index e3db0c807..26ba4b794 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -10,6 +10,7 @@ can-dev-y := dev.o can-dev-$(CONFIG_CAN_LEDS) += led.o +obj-y += rcar/ obj-y += spi/ obj-y += usb/ obj-y += softing/ @@ -24,7 +25,6 @@ obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ -obj-$(CONFIG_CAN_RCAR) += rcar_can.o obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index ad535a854..8d6208c0b 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -21,6 +21,7 @@ #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/if_arp.h> +#include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/skb.h> @@ -69,6 +70,7 @@ EXPORT_SYMBOL_GPL(can_len2dlc); #ifdef CONFIG_CAN_CALC_BITTIMING #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ +#define CAN_CALC_SYNC_SEG 1 /* * Bit-timing calculation derived from: @@ -83,98 +85,126 @@ EXPORT_SYMBOL_GPL(can_len2dlc); * registers of the CAN controller. You can find more information * in the header file linux/can/netlink.h. */ -static int can_update_spt(const struct can_bittiming_const *btc, - int sampl_pt, int tseg, int *tseg1, int *tseg2) +static int can_update_sample_point(const struct can_bittiming_const *btc, + unsigned int sample_point_nominal, unsigned int tseg, + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr, + unsigned int *sample_point_error_ptr) { - *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000; - if (*tseg2 < btc->tseg2_min) - *tseg2 = btc->tseg2_min; - if (*tseg2 > btc->tseg2_max) - *tseg2 = btc->tseg2_max; - *tseg1 = tseg - *tseg2; - if (*tseg1 > btc->tseg1_max) { - *tseg1 = btc->tseg1_max; - *tseg2 = tseg - *tseg1; + unsigned int sample_point_error, best_sample_point_error = UINT_MAX; + unsigned int sample_point, best_sample_point = 0; + unsigned int tseg1, tseg2; + int i; + + for (i = 0; i <= 1; i++) { + tseg2 = tseg + CAN_CALC_SYNC_SEG - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) / 1000 - i; + tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max); + tseg1 = tseg - tseg2; + if (tseg1 > btc->tseg1_max) { + tseg1 = btc->tseg1_max; + tseg2 = tseg - tseg1; + } + + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) / (tseg + CAN_CALC_SYNC_SEG); + sample_point_error = abs(sample_point_nominal - sample_point); + + if ((sample_point <= sample_point_nominal) && (sample_point_error < best_sample_point_error)) { + best_sample_point = sample_point; + best_sample_point_error = sample_point_error; + *tseg1_ptr = tseg1; + *tseg2_ptr = tseg2; + } } - return 1000 * (tseg + 1 - *tseg2) / (tseg + 1); + + if (sample_point_error_ptr) + *sample_point_error_ptr = best_sample_point_error; + + return best_sample_point; } static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, const struct can_bittiming_const *btc) { struct can_priv *priv = netdev_priv(dev); - long best_error = 1000000000, error = 0; - int best_tseg = 0, best_brp = 0, brp = 0; - int tsegall, tseg = 0, tseg1 = 0, tseg2 = 0; - int spt_error = 1000, spt = 0, sampl_pt; - long rate; + unsigned int bitrate; /* current bitrate */ + unsigned int bitrate_error; /* difference between current and nominal value */ + unsigned int best_bitrate_error = UINT_MAX; + unsigned int sample_point_error; /* difference between current and nominal value */ + unsigned int best_sample_point_error = UINT_MAX; + unsigned int sample_point_nominal; /* nominal sample point */ + unsigned int best_tseg = 0; /* current best value for tseg */ + unsigned int best_brp = 0; /* current best value for brp */ + unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0; u64 v64; /* Use CiA recommended sample points */ if (bt->sample_point) { - sampl_pt = bt->sample_point; + sample_point_nominal = bt->sample_point; } else { if (bt->bitrate > 800000) - sampl_pt = 750; + sample_point_nominal = 750; else if (bt->bitrate > 500000) - sampl_pt = 800; + sample_point_nominal = 800; else - sampl_pt = 875; + sample_point_nominal = 875; } /* tseg even = round down, odd = round up */ for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1; tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) { - tsegall = 1 + tseg / 2; + tsegall = CAN_CALC_SYNC_SEG + tseg / 2; + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */ brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2; - /* chose brp step which is possible in system */ + + /* choose brp step which is possible in system */ brp = (brp / btc->brp_inc) * btc->brp_inc; if ((brp < btc->brp_min) || (brp > btc->brp_max)) continue; - rate = priv->clock.freq / (brp * tsegall); - error = bt->bitrate - rate; + + bitrate = priv->clock.freq / (brp * tsegall); + bitrate_error = abs(bt->bitrate - bitrate); + /* tseg brp biterror */ - if (error < 0) - error = -error; - if (error > best_error) + if (bitrate_error > best_bitrate_error) continue; - best_error = error; - if (error == 0) { - spt = can_update_spt(btc, sampl_pt, tseg / 2, - &tseg1, &tseg2); - error = sampl_pt - spt; - if (error < 0) - error = -error; - if (error > spt_error) - continue; - spt_error = error; - } + + /* reset sample point error if we have a better bitrate */ + if (bitrate_error < best_bitrate_error) + best_sample_point_error = UINT_MAX; + + can_update_sample_point(btc, sample_point_nominal, tseg / 2, &tseg1, &tseg2, &sample_point_error); + if (sample_point_error > best_sample_point_error) + continue; + + best_sample_point_error = sample_point_error; + best_bitrate_error = bitrate_error; best_tseg = tseg / 2; best_brp = brp; - if (error == 0) + + if (bitrate_error == 0 && sample_point_error == 0) break; } - if (best_error) { + if (best_bitrate_error) { /* Error in one-tenth of a percent */ - error = (best_error * 1000) / bt->bitrate; - if (error > CAN_CALC_MAX_ERROR) { + v64 = (u64)best_bitrate_error * 1000; + do_div(v64, bt->bitrate); + bitrate_error = (u32)v64; + if (bitrate_error > CAN_CALC_MAX_ERROR) { netdev_err(dev, - "bitrate error %ld.%ld%% too high\n", - error / 10, error % 10); + "bitrate error %d.%d%% too high\n", + bitrate_error / 10, bitrate_error % 10); return -EDOM; - } else { - netdev_warn(dev, "bitrate error %ld.%ld%%\n", - error / 10, error % 10); } + netdev_warn(dev, "bitrate error %d.%d%%\n", + bitrate_error / 10, bitrate_error % 10); } /* real sample point */ - bt->sample_point = can_update_spt(btc, sampl_pt, best_tseg, - &tseg1, &tseg2); + bt->sample_point = can_update_sample_point(btc, sample_point_nominal, best_tseg, + &tseg1, &tseg2, NULL); - v64 = (u64)best_brp * 1000000000UL; + v64 = (u64)best_brp * 1000 * 1000 * 1000; do_div(v64, priv->clock.freq); bt->tq = (u32)v64; bt->prop_seg = tseg1 / 2; @@ -182,9 +212,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, bt->phase_seg2 = tseg2; /* check for sjw user settings */ - if (!bt->sjw || !btc->sjw_max) + if (!bt->sjw || !btc->sjw_max) { bt->sjw = 1; - else { + } else { /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */ if (bt->sjw > btc->sjw_max) bt->sjw = btc->sjw_max; @@ -194,8 +224,9 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt, } bt->brp = best_brp; - /* real bit-rate */ - bt->bitrate = priv->clock.freq / (bt->brp * (tseg1 + tseg2 + 1)); + + /* real bitrate */ + bt->bitrate = priv->clock.freq / (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2)); return 0; } @@ -471,9 +502,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb); /* * CAN device restart for bus-off recovery */ -static void can_restart(unsigned long data) +static void can_restart(struct net_device *dev) { - struct net_device *dev = (struct net_device *)data; struct can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; @@ -513,6 +543,14 @@ restart: netdev_err(dev, "Error %d during restart", err); } +static void can_restart_work(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); + + can_restart(priv->dev); +} + int can_restart_now(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); @@ -526,8 +564,8 @@ int can_restart_now(struct net_device *dev) if (priv->state != CAN_STATE_BUS_OFF) return -EBUSY; - /* Runs as soon as possible in the timer context */ - mod_timer(&priv->restart_timer, jiffies); + cancel_delayed_work_sync(&priv->restart_work); + can_restart(dev); return 0; } @@ -548,8 +586,8 @@ void can_bus_off(struct net_device *dev) netif_carrier_off(dev); if (priv->restart_ms) - mod_timer(&priv->restart_timer, - jiffies + (priv->restart_ms * HZ) / 1000); + schedule_delayed_work(&priv->restart_work, + msecs_to_jiffies(priv->restart_ms)); } EXPORT_SYMBOL_GPL(can_bus_off); @@ -658,6 +696,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) return NULL; priv = netdev_priv(dev); + priv->dev = dev; if (echo_skb_max) { priv->echo_skb_max = echo_skb_max; @@ -667,7 +706,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max) priv->state = CAN_STATE_STOPPED; - init_timer(&priv->restart_timer); + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); return dev; } @@ -748,8 +787,6 @@ int open_candev(struct net_device *dev) if (!netif_carrier_ok(dev)) netif_carrier_on(dev); - setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); - return 0; } EXPORT_SYMBOL_GPL(open_candev); @@ -764,7 +801,7 @@ void close_candev(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); - del_timer_sync(&priv->restart_timer); + cancel_delayed_work_sync(&priv->restart_work); can_flush_echo_skb(dev); } EXPORT_SYMBOL_GPL(close_candev); diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig new file mode 100644 index 000000000..7b03a3a37 --- /dev/null +++ b/drivers/net/can/rcar/Kconfig @@ -0,0 +1,21 @@ +config CAN_RCAR + tristate "Renesas R-Car CAN controller" + depends on ARCH_RENESAS || ARM + ---help--- + Say Y here if you want to use CAN controller found on Renesas R-Car + SoCs. + + To compile this driver as a module, choose M here: the module will + be called rcar_can. + +config CAN_RCAR_CANFD + tristate "Renesas R-Car CAN FD controller" + depends on ARCH_RENESAS || ARM + ---help--- + Say Y here if you want to use CAN FD controller found on + Renesas R-Car SoCs. The driver puts the controller in CAN FD only + mode, which can interoperate with CAN2.0 nodes but does not support + dedicated CAN 2.0 mode. + + To compile this driver as a module, choose M here: the module will + be called rcar_canfd. diff --git a/drivers/net/can/rcar/Makefile b/drivers/net/can/rcar/Makefile new file mode 100644 index 000000000..08de36a4c --- /dev/null +++ b/drivers/net/can/rcar/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for the Renesas R-Car CAN & CAN FD controller drivers +# + +obj-$(CONFIG_CAN_RCAR) += rcar_can.o +obj-$(CONFIG_CAN_RCAR_CANFD) += rcar_canfd.o diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c new file mode 100644 index 000000000..788459f6b --- /dev/null +++ b/drivers/net/can/rcar/rcar_can.c @@ -0,0 +1,929 @@ +/* Renesas R-Car CAN device driver + * + * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com> + * Copyright (C) 2013 Renesas Solutions Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/can/led.h> +#include <linux/can/dev.h> +#include <linux/clk.h> +#include <linux/can/platform/rcar_can.h> +#include <linux/of.h> + +#define RCAR_CAN_DRV_NAME "rcar_can" + +/* Mailbox configuration: + * mailbox 60 - 63 - Rx FIFO mailboxes + * mailbox 56 - 59 - Tx FIFO mailboxes + * non-FIFO mailboxes are not used + */ +#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */ +#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */ +#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */ +#define RCAR_CAN_FIFO_DEPTH 4 + +/* Mailbox registers structure */ +struct rcar_can_mbox_regs { + u32 id; /* IDE and RTR bits, SID and EID */ + u8 stub; /* Not used */ + u8 dlc; /* Data Length Code - bits [0..3] */ + u8 data[8]; /* Data Bytes */ + u8 tsh; /* Time Stamp Higher Byte */ + u8 tsl; /* Time Stamp Lower Byte */ +}; + +struct rcar_can_regs { + struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */ + u32 mkr_2_9[8]; /* Mask Registers 2-9 */ + u32 fidcr[2]; /* FIFO Received ID Compare Register */ + u32 mkivlr1; /* Mask Invalid Register 1 */ + u32 mier1; /* Mailbox Interrupt Enable Register 1 */ + u32 mkr_0_1[2]; /* Mask Registers 0-1 */ + u32 mkivlr0; /* Mask Invalid Register 0*/ + u32 mier0; /* Mailbox Interrupt Enable Register 0 */ + u8 pad_440[0x3c0]; + u8 mctl[64]; /* Message Control Registers */ + u16 ctlr; /* Control Register */ + u16 str; /* Status register */ + u8 bcr[3]; /* Bit Configuration Register */ + u8 clkr; /* Clock Select Register */ + u8 rfcr; /* Receive FIFO Control Register */ + u8 rfpcr; /* Receive FIFO Pointer Control Register */ + u8 tfcr; /* Transmit FIFO Control Register */ + u8 tfpcr; /* Transmit FIFO Pointer Control Register */ + u8 eier; /* Error Interrupt Enable Register */ + u8 eifr; /* Error Interrupt Factor Judge Register */ + u8 recr; /* Receive Error Count Register */ + u8 tecr; /* Transmit Error Count Register */ + u8 ecsr; /* Error Code Store Register */ + u8 cssr; /* Channel Search Support Register */ + u8 mssr; /* Mailbox Search Status Register */ + u8 msmr; /* Mailbox Search Mode Register */ + u16 tsr; /* Time Stamp Register */ + u8 afsr; /* Acceptance Filter Support Register */ + u8 pad_857; + u8 tcr; /* Test Control Register */ + u8 pad_859[7]; + u8 ier; /* Interrupt Enable Register */ + u8 isr; /* Interrupt Status Register */ + u8 pad_862; + u8 mbsmr; /* Mailbox Search Mask Register */ +}; + +struct rcar_can_priv { + struct can_priv can; /* Must be the first member! */ + struct net_device *ndev; + struct napi_struct napi; + struct rcar_can_regs __iomem *regs; + struct clk *clk; + struct clk *can_clk; + u8 tx_dlc[RCAR_CAN_FIFO_DEPTH]; + u32 tx_head; + u32 tx_tail; + u8 clock_select; + u8 ier; +}; + +static const struct can_bittiming_const rcar_can_bittiming_const = { + .name = RCAR_CAN_DRV_NAME, + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +/* Control Register bits */ +#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */ +#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */ + /* at bus-off entry */ +#define RCAR_CAN_CTLR_SLPM (1 << 10) +#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */ +#define RCAR_CAN_CTLR_CANM_HALT (1 << 9) +#define RCAR_CAN_CTLR_CANM_RESET (1 << 8) +#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8) +#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */ +#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */ +#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */ +#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */ + +/* Status Register bits */ +#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */ + +/* FIFO Received ID Compare Registers 0 and 1 bits */ +#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */ +#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */ + +/* Receive FIFO Control Register bits */ +#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */ +#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */ + +/* Transmit FIFO Control Register bits */ +#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */ + /* Number Status Bits */ +#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */ + /* Message Number Status Bits */ +#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */ + +#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */ + /* for Rx mailboxes 0-31 */ +#define RCAR_CAN_N_RX_MKREGS2 8 + +/* Bit Configuration Register settings */ +#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20) +#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8) +#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4) +#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07) + +/* Mailbox and Mask Registers bits */ +#define RCAR_CAN_IDE (1 << 31) +#define RCAR_CAN_RTR (1 << 30) +#define RCAR_CAN_SID_SHIFT 18 + +/* Mailbox Interrupt Enable Register 1 bits */ +#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */ +#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */ + +/* Interrupt Enable Register bits */ +#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */ +#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */ + /* Enable Bit */ +#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */ + /* Enable Bit */ +/* Interrupt Status Register bits */ +#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */ +#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */ + /* Status Bit */ +#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */ + /* Status Bit */ + +/* Error Interrupt Enable Register bits */ +#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */ +#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */ + /* Interrupt Enable */ +#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */ +#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */ +#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */ +#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */ +#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */ +#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */ + +/* Error Interrupt Factor Judge Register bits */ +#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */ +#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */ + /* Detect Flag */ +#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */ +#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */ +#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */ +#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */ +#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */ +#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */ + +/* Error Code Store Register bits */ +#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */ +#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */ +#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */ +#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */ +#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */ +#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */ +#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */ +#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */ + +#define RCAR_CAN_NAPI_WEIGHT 4 +#define MAX_STR_READS 0x100 + +static void tx_failure_cleanup(struct net_device *ndev) +{ + int i; + + for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++) + can_free_echo_skb(ndev, i); +} + +static void rcar_can_error(struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u8 eifr, txerr = 0, rxerr = 0; + + /* Propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(ndev, &cf); + + eifr = readb(&priv->regs->eifr); + if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) { + txerr = readb(&priv->regs->tecr); + rxerr = readb(&priv->regs->recr); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + } + if (eifr & RCAR_CAN_EIFR_BEIF) { + int rx_errors = 0, tx_errors = 0; + u8 ecsr; + + netdev_dbg(priv->ndev, "Bus error interrupt:\n"); + if (skb) + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; + + ecsr = readb(&priv->regs->ecsr); + if (ecsr & RCAR_CAN_ECSR_ADEF) { + netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); + tx_errors++; + writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); + if (skb) + cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; + } + if (ecsr & RCAR_CAN_ECSR_BE0F) { + netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); + tx_errors++; + writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr); + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT0; + } + if (ecsr & RCAR_CAN_ECSR_BE1F) { + netdev_dbg(priv->ndev, "Bit Error (recessive)\n"); + tx_errors++; + writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr); + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT1; + } + if (ecsr & RCAR_CAN_ECSR_CEF) { + netdev_dbg(priv->ndev, "CRC Error\n"); + rx_errors++; + writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); + if (skb) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } + if (ecsr & RCAR_CAN_ECSR_AEF) { + netdev_dbg(priv->ndev, "ACK Error\n"); + tx_errors++; + writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); + if (skb) { + cf->can_id |= CAN_ERR_ACK; + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } + } + if (ecsr & RCAR_CAN_ECSR_FEF) { + netdev_dbg(priv->ndev, "Form Error\n"); + rx_errors++; + writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr); + if (skb) + cf->data[2] |= CAN_ERR_PROT_FORM; + } + if (ecsr & RCAR_CAN_ECSR_SEF) { + netdev_dbg(priv->ndev, "Stuff Error\n"); + rx_errors++; + writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr); + if (skb) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } + + priv->can.can_stats.bus_error++; + ndev->stats.rx_errors += rx_errors; + ndev->stats.tx_errors += tx_errors; + writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr); + } + if (eifr & RCAR_CAN_EIFR_EWIF) { + netdev_dbg(priv->ndev, "Error warning interrupt\n"); + priv->can.state = CAN_STATE_ERROR_WARNING; + priv->can.can_stats.error_warning++; + /* Clear interrupt condition */ + writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr); + if (skb) + cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + } + if (eifr & RCAR_CAN_EIFR_EPIF) { + netdev_dbg(priv->ndev, "Error passive interrupt\n"); + priv->can.state = CAN_STATE_ERROR_PASSIVE; + priv->can.can_stats.error_passive++; + /* Clear interrupt condition */ + writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr); + if (skb) + cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + } + if (eifr & RCAR_CAN_EIFR_BOEIF) { + netdev_dbg(priv->ndev, "Bus-off entry interrupt\n"); + tx_failure_cleanup(ndev); + priv->ier = RCAR_CAN_IER_ERSIE; + writeb(priv->ier, &priv->regs->ier); + priv->can.state = CAN_STATE_BUS_OFF; + /* Clear interrupt condition */ + writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr); + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; + } + if (eifr & RCAR_CAN_EIFR_ORIF) { + netdev_dbg(priv->ndev, "Receive overrun error interrupt\n"); + ndev->stats.rx_over_errors++; + ndev->stats.rx_errors++; + writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr); + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + } + if (eifr & RCAR_CAN_EIFR_OLIF) { + netdev_dbg(priv->ndev, + "Overload Frame Transmission error interrupt\n"); + ndev->stats.rx_over_errors++; + ndev->stats.rx_errors++; + writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr); + if (skb) { + cf->can_id |= CAN_ERR_PROT; + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } + } + + if (skb) { + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +static void rcar_can_tx_done(struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + u8 isr; + + while (1) { + u8 unsent = readb(&priv->regs->tfcr); + + unsent = (unsent & RCAR_CAN_TFCR_TFUST) >> + RCAR_CAN_TFCR_TFUST_SHIFT; + if (priv->tx_head - priv->tx_tail <= unsent) + break; + stats->tx_packets++; + stats->tx_bytes += priv->tx_dlc[priv->tx_tail % + RCAR_CAN_FIFO_DEPTH]; + priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0; + can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH); + priv->tx_tail++; + netif_wake_queue(ndev); + } + /* Clear interrupt */ + isr = readb(&priv->regs->isr); + writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr); + can_led_event(ndev, CAN_LED_EVENT_TX); +} + +static irqreturn_t rcar_can_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct rcar_can_priv *priv = netdev_priv(ndev); + u8 isr; + + isr = readb(&priv->regs->isr); + if (!(isr & priv->ier)) + return IRQ_NONE; + + if (isr & RCAR_CAN_ISR_ERSF) + rcar_can_error(ndev); + + if (isr & RCAR_CAN_ISR_TXFF) + rcar_can_tx_done(ndev); + + if (isr & RCAR_CAN_ISR_RXFF) { + if (napi_schedule_prep(&priv->napi)) { + /* Disable Rx FIFO interrupts */ + priv->ier &= ~RCAR_CAN_IER_RXFIE; + writeb(priv->ier, &priv->regs->ier); + __napi_schedule(&priv->napi); + } + } + + return IRQ_HANDLED; +} + +static void rcar_can_set_bittiming(struct net_device *dev) +{ + struct rcar_can_priv *priv = netdev_priv(dev); + struct can_bittiming *bt = &priv->can.bittiming; + u32 bcr; + + bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) | + RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) | + RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1); + /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access. + * All the registers are big-endian but they get byte-swapped on 32-bit + * read/write (but not on 8-bit, contrary to the manuals)... + */ + writel((bcr << 8) | priv->clock_select, &priv->regs->bcr); +} + +static void rcar_can_start(struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + u16 ctlr; + int i; + + /* Set controller to known mode: + * - FIFO mailbox mode + * - accept all messages + * - overrun mode + * CAN is in sleep mode after MCU hardware or software reset. + */ + ctlr = readw(&priv->regs->ctlr); + ctlr &= ~RCAR_CAN_CTLR_SLPM; + writew(ctlr, &priv->regs->ctlr); + /* Go to reset mode */ + ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; + writew(ctlr, &priv->regs->ctlr); + for (i = 0; i < MAX_STR_READS; i++) { + if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST) + break; + } + rcar_can_set_bittiming(ndev); + ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */ + ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */ + /* at bus-off */ + ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */ + ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */ + writew(ctlr, &priv->regs->ctlr); + + /* Accept all SID and EID */ + writel(0, &priv->regs->mkr_2_9[6]); + writel(0, &priv->regs->mkr_2_9[7]); + /* In FIFO mailbox mode, write "0" to bits 24 to 31 */ + writel(0, &priv->regs->mkivlr1); + /* Accept all frames */ + writel(0, &priv->regs->fidcr[0]); + writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]); + /* Enable and configure FIFO mailbox interrupts */ + writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1); + + priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE | + RCAR_CAN_IER_TXFIE; + writeb(priv->ier, &priv->regs->ier); + + /* Accumulate error codes */ + writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr); + /* Enable error interrupts */ + writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE | + (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ? + RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE | + RCAR_CAN_EIER_OLIE, &priv->regs->eier); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + /* Go to operation mode */ + writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr); + for (i = 0; i < MAX_STR_READS; i++) { + if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)) + break; + } + /* Enable Rx and Tx FIFO */ + writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr); + writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr); +} + +static int rcar_can_open(struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + int err; + + err = clk_prepare_enable(priv->clk); + if (err) { + netdev_err(ndev, + "failed to enable peripheral clock, error %d\n", + err); + goto out; + } + err = clk_prepare_enable(priv->can_clk); + if (err) { + netdev_err(ndev, "failed to enable CAN clock, error %d\n", + err); + goto out_clock; + } + err = open_candev(ndev); + if (err) { + netdev_err(ndev, "open_candev() failed, error %d\n", err); + goto out_can_clock; + } + napi_enable(&priv->napi); + err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); + if (err) { + netdev_err(ndev, "request_irq(%d) failed, error %d\n", + ndev->irq, err); + goto out_close; + } + can_led_event(ndev, CAN_LED_EVENT_OPEN); + rcar_can_start(ndev); + netif_start_queue(ndev); + return 0; +out_close: + napi_disable(&priv->napi); + close_candev(ndev); +out_can_clock: + clk_disable_unprepare(priv->can_clk); +out_clock: + clk_disable_unprepare(priv->clk); +out: + return err; +} + +static void rcar_can_stop(struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + u16 ctlr; + int i; + + /* Go to (force) reset mode */ + ctlr = readw(&priv->regs->ctlr); + ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; + writew(ctlr, &priv->regs->ctlr); + for (i = 0; i < MAX_STR_READS; i++) { + if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST) + break; + } + writel(0, &priv->regs->mier0); + writel(0, &priv->regs->mier1); + writeb(0, &priv->regs->ier); + writeb(0, &priv->regs->eier); + /* Go to sleep mode */ + ctlr |= RCAR_CAN_CTLR_SLPM; + writew(ctlr, &priv->regs->ctlr); + priv->can.state = CAN_STATE_STOPPED; +} + +static int rcar_can_close(struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + rcar_can_stop(ndev); + free_irq(ndev->irq, ndev); + napi_disable(&priv->napi); + clk_disable_unprepare(priv->can_clk); + clk_disable_unprepare(priv->clk); + close_candev(ndev); + can_led_event(ndev, CAN_LED_EVENT_STOP); + return 0; +} + +static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct rcar_can_priv *priv = netdev_priv(ndev); + struct can_frame *cf = (struct can_frame *)skb->data; + u32 data, i; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ + data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE; + else /* Standard frame format */ + data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT; + + if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ + data |= RCAR_CAN_RTR; + } else { + for (i = 0; i < cf->can_dlc; i++) + writeb(cf->data[i], + &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]); + } + + writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id); + + writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); + + priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc; + can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH); + priv->tx_head++; + /* Start Tx: write 0xff to the TFPCR register to increment + * the CPU-side pointer for the transmit FIFO to the next + * mailbox location + */ + writeb(0xff, &priv->regs->tfpcr); + /* Stop the queue if we've filled all FIFO entries */ + if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH) + netif_stop_queue(ndev); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops rcar_can_netdev_ops = { + .ndo_open = rcar_can_open, + .ndo_stop = rcar_can_close, + .ndo_start_xmit = rcar_can_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static void rcar_can_rx_pkt(struct rcar_can_priv *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 data; + u8 dlc; + + skb = alloc_can_skb(priv->ndev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id); + if (data & RCAR_CAN_IDE) + cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK; + + dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc); + cf->can_dlc = get_can_dlc(dlc); + if (data & RCAR_CAN_RTR) { + cf->can_id |= CAN_RTR_FLAG; + } else { + for (dlc = 0; dlc < cf->can_dlc; dlc++) + cf->data[dlc] = + readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]); + } + + can_led_event(priv->ndev, CAN_LED_EVENT_RX); + + stats->rx_bytes += cf->can_dlc; + stats->rx_packets++; + netif_receive_skb(skb); +} + +static int rcar_can_rx_poll(struct napi_struct *napi, int quota) +{ + struct rcar_can_priv *priv = container_of(napi, + struct rcar_can_priv, napi); + int num_pkts; + + for (num_pkts = 0; num_pkts < quota; num_pkts++) { + u8 rfcr, isr; + + isr = readb(&priv->regs->isr); + /* Clear interrupt bit */ + if (isr & RCAR_CAN_ISR_RXFF) + writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr); + rfcr = readb(&priv->regs->rfcr); + if (rfcr & RCAR_CAN_RFCR_RFEST) + break; + rcar_can_rx_pkt(priv); + /* Write 0xff to the RFPCR register to increment + * the CPU-side pointer for the receive FIFO + * to the next mailbox location + */ + writeb(0xff, &priv->regs->rfpcr); + } + /* All packets processed */ + if (num_pkts < quota) { + napi_complete(napi); + priv->ier |= RCAR_CAN_IER_RXFIE; + writeb(priv->ier, &priv->regs->ier); + } + return num_pkts; +} + +static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + rcar_can_start(ndev); + netif_wake_queue(ndev); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int rcar_can_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct rcar_can_priv *priv = netdev_priv(dev); + int err; + + err = clk_prepare_enable(priv->clk); + if (err) + return err; + bec->txerr = readb(&priv->regs->tecr); + bec->rxerr = readb(&priv->regs->recr); + clk_disable_unprepare(priv->clk); + return 0; +} + +static const char * const clock_names[] = { + [CLKR_CLKP1] = "clkp1", + [CLKR_CLKP2] = "clkp2", + [CLKR_CLKEXT] = "can_clk", +}; + +static int rcar_can_probe(struct platform_device *pdev) +{ + struct rcar_can_platform_data *pdata; + struct rcar_can_priv *priv; + struct net_device *ndev; + struct resource *mem; + void __iomem *addr; + u32 clock_select = CLKR_CLKP1; + int err = -ENODEV; + int irq; + + if (pdev->dev.of_node) { + of_property_read_u32(pdev->dev.of_node, + "renesas,can-clock-select", &clock_select); + } else { + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "No platform data provided!\n"); + goto fail; + } + clock_select = pdata->clock_select; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + err = irq; + goto fail; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(addr)) { + err = PTR_ERR(addr); + goto fail; + } + + ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH); + if (!ndev) { + dev_err(&pdev->dev, "alloc_candev() failed\n"); + err = -ENOMEM; + goto fail; + } + + priv = netdev_priv(ndev); + + priv->clk = devm_clk_get(&pdev->dev, "clkp1"); + if (IS_ERR(priv->clk)) { + err = PTR_ERR(priv->clk); + dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", + err); + goto fail_clk; + } + + if (clock_select >= ARRAY_SIZE(clock_names)) { + err = -EINVAL; + dev_err(&pdev->dev, "invalid CAN clock selected\n"); + goto fail_clk; + } + priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); + if (IS_ERR(priv->can_clk)) { + err = PTR_ERR(priv->can_clk); + dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err); + goto fail_clk; + } + + ndev->netdev_ops = &rcar_can_netdev_ops; + ndev->irq = irq; + ndev->flags |= IFF_ECHO; + priv->ndev = ndev; + priv->regs = addr; + priv->clock_select = clock_select; + priv->can.clock.freq = clk_get_rate(priv->can_clk); + priv->can.bittiming_const = &rcar_can_bittiming_const; + priv->can.do_set_mode = rcar_can_do_set_mode; + priv->can.do_get_berr_counter = rcar_can_get_berr_counter; + priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, &pdev->dev); + + netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll, + RCAR_CAN_NAPI_WEIGHT); + err = register_candev(ndev); + if (err) { + dev_err(&pdev->dev, "register_candev() failed, error %d\n", + err); + goto fail_candev; + } + + devm_can_led_init(ndev); + + dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n", + priv->regs, ndev->irq); + + return 0; +fail_candev: + netif_napi_del(&priv->napi); +fail_clk: + free_candev(ndev); +fail: + return err; +} + +static int rcar_can_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct rcar_can_priv *priv = netdev_priv(ndev); + + unregister_candev(ndev); + netif_napi_del(&priv->napi); + free_candev(ndev); + return 0; +} + +static int __maybe_unused rcar_can_suspend(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct rcar_can_priv *priv = netdev_priv(ndev); + u16 ctlr; + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + ctlr = readw(&priv->regs->ctlr); + ctlr |= RCAR_CAN_CTLR_CANM_HALT; + writew(ctlr, &priv->regs->ctlr); + ctlr |= RCAR_CAN_CTLR_SLPM; + writew(ctlr, &priv->regs->ctlr); + priv->can.state = CAN_STATE_SLEEPING; + + clk_disable(priv->clk); + return 0; +} + +static int __maybe_unused rcar_can_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct rcar_can_priv *priv = netdev_priv(ndev); + u16 ctlr; + int err; + + err = clk_enable(priv->clk); + if (err) { + netdev_err(ndev, "clk_enable() failed, error %d\n", err); + return err; + } + + ctlr = readw(&priv->regs->ctlr); + ctlr &= ~RCAR_CAN_CTLR_SLPM; + writew(ctlr, &priv->regs->ctlr); + ctlr &= ~RCAR_CAN_CTLR_CANM; + writew(ctlr, &priv->regs->ctlr); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + return 0; +} + +static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume); + +static const struct of_device_id rcar_can_of_table[] __maybe_unused = { + { .compatible = "renesas,can-r8a7778" }, + { .compatible = "renesas,can-r8a7779" }, + { .compatible = "renesas,can-r8a7790" }, + { .compatible = "renesas,can-r8a7791" }, + { .compatible = "renesas,rcar-gen1-can" }, + { .compatible = "renesas,rcar-gen2-can" }, + { .compatible = "renesas,rcar-gen3-can" }, + { } +}; +MODULE_DEVICE_TABLE(of, rcar_can_of_table); + +static struct platform_driver rcar_can_driver = { + .driver = { + .name = RCAR_CAN_DRV_NAME, + .of_match_table = of_match_ptr(rcar_can_of_table), + .pm = &rcar_can_pm_ops, + }, + .probe = rcar_can_probe, + .remove = rcar_can_remove, +}; + +module_platform_driver(rcar_can_driver); + +MODULE_AUTHOR("Cogent Embedded, Inc."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC"); +MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c new file mode 100644 index 000000000..43cdd5544 --- /dev/null +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -0,0 +1,1858 @@ +/* Renesas R-Car CAN FD device driver + * + * Copyright (C) 2015 Renesas Electronics Corp. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +/* The R-Car CAN FD controller can operate in either one of the below two modes + * - CAN FD only mode + * - Classical CAN (CAN 2.0) only mode + * + * This driver puts the controller in CAN FD only mode by default. In this + * mode, the controller acts as a CAN FD node that can also interoperate with + * CAN 2.0 nodes. + * + * To switch the controller to Classical CAN (CAN 2.0) only mode, add + * "renesas,no-can-fd" optional property to the device tree node. A h/w reset is + * also required to switch modes. + * + * Note: The h/w manual register naming convention is clumsy and not acceptable + * to use as it is in the driver. However, those names are added as comments + * wherever it is modified to a readable name. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/errno.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> +#include <linux/can/led.h> +#include <linux/can/dev.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> + +#define RCANFD_DRV_NAME "rcar_canfd" + +/* Global register bits */ + +/* RSCFDnCFDGRMCFG */ +#define RCANFD_GRMCFG_RCMC BIT(0) + +/* RSCFDnCFDGCFG / RSCFDnGCFG */ +#define RCANFD_GCFG_EEFE BIT(6) +#define RCANFD_GCFG_CMPOC BIT(5) /* CAN FD only */ +#define RCANFD_GCFG_DCS BIT(4) +#define RCANFD_GCFG_DCE BIT(1) +#define RCANFD_GCFG_TPRI BIT(0) + +/* RSCFDnCFDGCTR / RSCFDnGCTR */ +#define RCANFD_GCTR_TSRST BIT(16) +#define RCANFD_GCTR_CFMPOFIE BIT(11) /* CAN FD only */ +#define RCANFD_GCTR_THLEIE BIT(10) +#define RCANFD_GCTR_MEIE BIT(9) +#define RCANFD_GCTR_DEIE BIT(8) +#define RCANFD_GCTR_GSLPR BIT(2) +#define RCANFD_GCTR_GMDC_MASK (0x3) +#define RCANFD_GCTR_GMDC_GOPM (0x0) +#define RCANFD_GCTR_GMDC_GRESET (0x1) +#define RCANFD_GCTR_GMDC_GTEST (0x2) + +/* RSCFDnCFDGSTS / RSCFDnGSTS */ +#define RCANFD_GSTS_GRAMINIT BIT(3) +#define RCANFD_GSTS_GSLPSTS BIT(2) +#define RCANFD_GSTS_GHLTSTS BIT(1) +#define RCANFD_GSTS_GRSTSTS BIT(0) +/* Non-operational status */ +#define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RSCFDnCFDGERFL / RSCFDnGERFL */ +#define RCANFD_GERFL_EEF1 BIT(17) +#define RCANFD_GERFL_EEF0 BIT(16) +#define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */ +#define RCANFD_GERFL_THLES BIT(2) +#define RCANFD_GERFL_MES BIT(1) +#define RCANFD_GERFL_DEF BIT(0) + +#define RCANFD_GERFL_ERR(gpriv, x) ((x) & (RCANFD_GERFL_EEF1 |\ + RCANFD_GERFL_EEF0 | RCANFD_GERFL_MES |\ + (gpriv->fdmode ?\ + RCANFD_GERFL_CMPOF : 0))) + +/* AFL Rx rules registers */ + +/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ +#define RCANFD_GAFLCFG_SETRNC(n, x) (((x) & 0xff) << (24 - n * 8)) +#define RCANFD_GAFLCFG_GETRNC(n, x) (((x) >> (24 - n * 8)) & 0xff) + +/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ +#define RCANFD_GAFLECTR_AFLDAE BIT(8) +#define RCANFD_GAFLECTR_AFLPN(x) ((x) & 0x1f) + +/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ +#define RCANFD_GAFLID_GAFLLB BIT(29) + +/* RSCFDnCFDGAFLP1_j / RSCFDnGAFLP1_j */ +#define RCANFD_GAFLP1_GAFLFDP(x) (1 << (x)) + +/* Channel register bits */ + +/* RSCFDnCmCFG - Classical CAN only */ +#define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24) +#define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20) +#define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16) +#define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0) + +/* RSCFDnCFDCmNCFG - CAN FD only */ +#define RCANFD_NCFG_NTSEG2(x) (((x) & 0x1f) << 24) +#define RCANFD_NCFG_NTSEG1(x) (((x) & 0x7f) << 16) +#define RCANFD_NCFG_NSJW(x) (((x) & 0x1f) << 11) +#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0) + +/* RSCFDnCFDCmCTR / RSCFDnCmCTR */ +#define RCANFD_CCTR_CTME BIT(24) +#define RCANFD_CCTR_ERRD BIT(23) +#define RCANFD_CCTR_BOM_MASK (0x3 << 21) +#define RCANFD_CCTR_BOM_ISO (0x0 << 21) +#define RCANFD_CCTR_BOM_BENTRY (0x1 << 21) +#define RCANFD_CCTR_BOM_BEND (0x2 << 21) +#define RCANFD_CCTR_TDCVFIE BIT(19) +#define RCANFD_CCTR_SOCOIE BIT(18) +#define RCANFD_CCTR_EOCOIE BIT(17) +#define RCANFD_CCTR_TAIE BIT(16) +#define RCANFD_CCTR_ALIE BIT(15) +#define RCANFD_CCTR_BLIE BIT(14) +#define RCANFD_CCTR_OLIE BIT(13) +#define RCANFD_CCTR_BORIE BIT(12) +#define RCANFD_CCTR_BOEIE BIT(11) +#define RCANFD_CCTR_EPIE BIT(10) +#define RCANFD_CCTR_EWIE BIT(9) +#define RCANFD_CCTR_BEIE BIT(8) +#define RCANFD_CCTR_CSLPR BIT(2) +#define RCANFD_CCTR_CHMDC_MASK (0x3) +#define RCANFD_CCTR_CHDMC_COPM (0x0) +#define RCANFD_CCTR_CHDMC_CRESET (0x1) +#define RCANFD_CCTR_CHDMC_CHLT (0x2) + +/* RSCFDnCFDCmSTS / RSCFDnCmSTS */ +#define RCANFD_CSTS_COMSTS BIT(7) +#define RCANFD_CSTS_RECSTS BIT(6) +#define RCANFD_CSTS_TRMSTS BIT(5) +#define RCANFD_CSTS_BOSTS BIT(4) +#define RCANFD_CSTS_EPSTS BIT(3) +#define RCANFD_CSTS_SLPSTS BIT(2) +#define RCANFD_CSTS_HLTSTS BIT(1) +#define RCANFD_CSTS_CRSTSTS BIT(0) + +#define RCANFD_CSTS_TECCNT(x) (((x) >> 24) & 0xff) +#define RCANFD_CSTS_RECCNT(x) (((x) >> 16) & 0xff) + +/* RSCFDnCFDCmERFL / RSCFDnCmERFL */ +#define RCANFD_CERFL_ADERR BIT(14) +#define RCANFD_CERFL_B0ERR BIT(13) +#define RCANFD_CERFL_B1ERR BIT(12) +#define RCANFD_CERFL_CERR BIT(11) +#define RCANFD_CERFL_AERR BIT(10) +#define RCANFD_CERFL_FERR BIT(9) +#define RCANFD_CERFL_SERR BIT(8) +#define RCANFD_CERFL_ALF BIT(7) +#define RCANFD_CERFL_BLF BIT(6) +#define RCANFD_CERFL_OVLF BIT(5) +#define RCANFD_CERFL_BORF BIT(4) +#define RCANFD_CERFL_BOEF BIT(3) +#define RCANFD_CERFL_EPF BIT(2) +#define RCANFD_CERFL_EWF BIT(1) +#define RCANFD_CERFL_BEF BIT(0) + +#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */ + +/* RSCFDnCFDCmDCFG */ +#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24) +#define RCANFD_DCFG_DTSEG2(x) (((x) & 0x7) << 20) +#define RCANFD_DCFG_DTSEG1(x) (((x) & 0xf) << 16) +#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0) + +/* RSCFDnCFDCmFDCFG */ +#define RCANFD_FDCFG_TDCE BIT(9) +#define RCANFD_FDCFG_TDCOC BIT(8) +#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16) + +/* RSCFDnCFDRFCCx */ +#define RCANFD_RFCC_RFIM BIT(12) +#define RCANFD_RFCC_RFDC(x) (((x) & 0x7) << 8) +#define RCANFD_RFCC_RFPLS(x) (((x) & 0x7) << 4) +#define RCANFD_RFCC_RFIE BIT(1) +#define RCANFD_RFCC_RFE BIT(0) + +/* RSCFDnCFDRFSTSx */ +#define RCANFD_RFSTS_RFIF BIT(3) +#define RCANFD_RFSTS_RFMLT BIT(2) +#define RCANFD_RFSTS_RFFLL BIT(1) +#define RCANFD_RFSTS_RFEMP BIT(0) + +/* RSCFDnCFDRFIDx */ +#define RCANFD_RFID_RFIDE BIT(31) +#define RCANFD_RFID_RFRTR BIT(30) + +/* RSCFDnCFDRFPTRx */ +#define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf) +#define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff) +#define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff) + +/* RSCFDnCFDRFFDSTSx */ +#define RCANFD_RFFDSTS_RFFDF BIT(2) +#define RCANFD_RFFDSTS_RFBRS BIT(1) +#define RCANFD_RFFDSTS_RFESI BIT(0) + +/* Common FIFO bits */ + +/* RSCFDnCFDCFCCk */ +#define RCANFD_CFCC_CFTML(x) (((x) & 0xf) << 20) +#define RCANFD_CFCC_CFM(x) (((x) & 0x3) << 16) +#define RCANFD_CFCC_CFIM BIT(12) +#define RCANFD_CFCC_CFDC(x) (((x) & 0x7) << 8) +#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4) +#define RCANFD_CFCC_CFTXIE BIT(2) +#define RCANFD_CFCC_CFE BIT(0) + +/* RSCFDnCFDCFSTSk */ +#define RCANFD_CFSTS_CFMC(x) (((x) >> 8) & 0xff) +#define RCANFD_CFSTS_CFTXIF BIT(4) +#define RCANFD_CFSTS_CFMLT BIT(2) +#define RCANFD_CFSTS_CFFLL BIT(1) +#define RCANFD_CFSTS_CFEMP BIT(0) + +/* RSCFDnCFDCFIDk */ +#define RCANFD_CFID_CFIDE BIT(31) +#define RCANFD_CFID_CFRTR BIT(30) +#define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff) + +/* RSCFDnCFDCFPTRk */ +#define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28) +#define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16) +#define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0) + +/* RSCFDnCFDCFFDCSTSk */ +#define RCANFD_CFFDCSTS_CFFDF BIT(2) +#define RCANFD_CFFDCSTS_CFBRS BIT(1) +#define RCANFD_CFFDCSTS_CFESI BIT(0) + +/* This controller supports either Classical CAN only mode or CAN FD only mode. + * These modes are supported in two separate set of register maps & names. + * However, some of the register offsets are common for both modes. Those + * offsets are listed below as Common registers. + * + * The CAN FD only mode specific registers & Classical CAN only mode specific + * registers are listed separately. Their register names starts with + * RCANFD_F_xxx & RCANFD_C_xxx respectively. + */ + +/* Common registers */ + +/* RSCFDnCFDCmNCFG / RSCFDnCmCFG */ +#define RCANFD_CCFG(m) (0x0000 + (0x10 * (m))) +/* RSCFDnCFDCmCTR / RSCFDnCmCTR */ +#define RCANFD_CCTR(m) (0x0004 + (0x10 * (m))) +/* RSCFDnCFDCmSTS / RSCFDnCmSTS */ +#define RCANFD_CSTS(m) (0x0008 + (0x10 * (m))) +/* RSCFDnCFDCmERFL / RSCFDnCmERFL */ +#define RCANFD_CERFL(m) (0x000C + (0x10 * (m))) + +/* RSCFDnCFDGCFG / RSCFDnGCFG */ +#define RCANFD_GCFG (0x0084) +/* RSCFDnCFDGCTR / RSCFDnGCTR */ +#define RCANFD_GCTR (0x0088) +/* RSCFDnCFDGCTS / RSCFDnGCTS */ +#define RCANFD_GSTS (0x008c) +/* RSCFDnCFDGERFL / RSCFDnGERFL */ +#define RCANFD_GERFL (0x0090) +/* RSCFDnCFDGTSC / RSCFDnGTSC */ +#define RCANFD_GTSC (0x0094) +/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ +#define RCANFD_GAFLECTR (0x0098) +/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ +#define RCANFD_GAFLCFG0 (0x009c) +/* RSCFDnCFDGAFLCFG1 / RSCFDnGAFLCFG1 */ +#define RCANFD_GAFLCFG1 (0x00a0) +/* RSCFDnCFDRMNB / RSCFDnRMNB */ +#define RCANFD_RMNB (0x00a4) +/* RSCFDnCFDRMND / RSCFDnRMND */ +#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y))) + +/* RSCFDnCFDRFCCx / RSCFDnRFCCx */ +#define RCANFD_RFCC(x) (0x00b8 + (0x04 * (x))) +/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */ +#define RCANFD_RFSTS(x) (0x00d8 + (0x04 * (x))) +/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */ +#define RCANFD_RFPCTR(x) (0x00f8 + (0x04 * (x))) + +/* Common FIFO Control registers */ + +/* RSCFDnCFDCFCCx / RSCFDnCFCCx */ +#define RCANFD_CFCC(ch, idx) (0x0118 + (0x0c * (ch)) + \ + (0x04 * (idx))) +/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */ +#define RCANFD_CFSTS(ch, idx) (0x0178 + (0x0c * (ch)) + \ + (0x04 * (idx))) +/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */ +#define RCANFD_CFPCTR(ch, idx) (0x01d8 + (0x0c * (ch)) + \ + (0x04 * (idx))) + +/* RSCFDnCFDFESTS / RSCFDnFESTS */ +#define RCANFD_FESTS (0x0238) +/* RSCFDnCFDFFSTS / RSCFDnFFSTS */ +#define RCANFD_FFSTS (0x023c) +/* RSCFDnCFDFMSTS / RSCFDnFMSTS */ +#define RCANFD_FMSTS (0x0240) +/* RSCFDnCFDRFISTS / RSCFDnRFISTS */ +#define RCANFD_RFISTS (0x0244) +/* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */ +#define RCANFD_CFRISTS (0x0248) +/* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */ +#define RCANFD_CFTISTS (0x024c) + +/* RSCFDnCFDTMCp / RSCFDnTMCp */ +#define RCANFD_TMC(p) (0x0250 + (0x01 * (p))) +/* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */ +#define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p))) + +/* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */ +#define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y))) +/* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */ +#define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y))) +/* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */ +#define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y))) +/* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */ +#define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y))) +/* RSCFDnCFDTMIECy / RSCFDnTMIECy */ +#define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y))) + +/* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */ +#define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m))) +/* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */ +#define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m))) +/* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */ +#define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m))) + +/* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */ +#define RCANFD_THLCC(m) (0x0400 + (0x04 * (m))) +/* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */ +#define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m))) +/* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */ +#define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m))) + +/* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */ +#define RCANFD_GTINTSTS0 (0x0460) +/* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */ +#define RCANFD_GTINTSTS1 (0x0464) +/* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */ +#define RCANFD_GTSTCFG (0x0468) +/* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */ +#define RCANFD_GTSTCTR (0x046c) +/* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */ +#define RCANFD_GLOCKK (0x047c) +/* RSCFDnCFDGRMCFG */ +#define RCANFD_GRMCFG (0x04fc) + +/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ +#define RCANFD_GAFLID(offset, j) ((offset) + (0x10 * (j))) +/* RSCFDnCFDGAFLMj / RSCFDnGAFLMj */ +#define RCANFD_GAFLM(offset, j) ((offset) + 0x04 + (0x10 * (j))) +/* RSCFDnCFDGAFLP0j / RSCFDnGAFLP0j */ +#define RCANFD_GAFLP0(offset, j) ((offset) + 0x08 + (0x10 * (j))) +/* RSCFDnCFDGAFLP1j / RSCFDnGAFLP1j */ +#define RCANFD_GAFLP1(offset, j) ((offset) + 0x0c + (0x10 * (j))) + +/* Classical CAN only mode register map */ + +/* RSCFDnGAFLXXXj offset */ +#define RCANFD_C_GAFL_OFFSET (0x0500) + +/* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */ +#define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q))) +#define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q))) +#define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q))) +#define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q))) + +/* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */ +#define RCANFD_C_RFOFFSET (0x0e00) +#define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x))) +#define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + \ + (0x10 * (x))) +#define RCANFD_C_RFDF(x, df) (RCANFD_C_RFOFFSET + 0x08 + \ + (0x10 * (x)) + (0x04 * (df))) + +/* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */ +#define RCANFD_C_CFOFFSET (0x0e80) +#define RCANFD_C_CFID(ch, idx) (RCANFD_C_CFOFFSET + (0x30 * (ch)) + \ + (0x10 * (idx))) +#define RCANFD_C_CFPTR(ch, idx) (RCANFD_C_CFOFFSET + 0x04 + \ + (0x30 * (ch)) + (0x10 * (idx))) +#define RCANFD_C_CFDF(ch, idx, df) (RCANFD_C_CFOFFSET + 0x08 + \ + (0x30 * (ch)) + (0x10 * (idx)) + \ + (0x04 * (df))) + +/* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */ +#define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p))) +#define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p))) +#define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p))) +#define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p))) + +/* RSCFDnTHLACCm */ +#define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m))) +/* RSCFDnRPGACCr */ +#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) + +/* CAN FD mode specific regsiter map */ + +/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ +#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m))) +#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m))) +#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m))) +#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m))) +#define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m))) + +/* RSCFDnCFDGAFLXXXj offset */ +#define RCANFD_F_GAFL_OFFSET (0x1000) + +/* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */ +#define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q))) +#define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q))) +#define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q))) +#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q))) + +/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */ +#define RCANFD_F_RFOFFSET (0x3000) +#define RCANFD_F_RFID(x) (RCANFD_F_RFOFFSET + (0x80 * (x))) +#define RCANFD_F_RFPTR(x) (RCANFD_F_RFOFFSET + 0x04 + \ + (0x80 * (x))) +#define RCANFD_F_RFFDSTS(x) (RCANFD_F_RFOFFSET + 0x08 + \ + (0x80 * (x))) +#define RCANFD_F_RFDF(x, df) (RCANFD_F_RFOFFSET + 0x0c + \ + (0x80 * (x)) + (0x04 * (df))) + +/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */ +#define RCANFD_F_CFOFFSET (0x3400) +#define RCANFD_F_CFID(ch, idx) (RCANFD_F_CFOFFSET + (0x180 * (ch)) + \ + (0x80 * (idx))) +#define RCANFD_F_CFPTR(ch, idx) (RCANFD_F_CFOFFSET + 0x04 + \ + (0x180 * (ch)) + (0x80 * (idx))) +#define RCANFD_F_CFFDCSTS(ch, idx) (RCANFD_F_CFOFFSET + 0x08 + \ + (0x180 * (ch)) + (0x80 * (idx))) +#define RCANFD_F_CFDF(ch, idx, df) (RCANFD_F_CFOFFSET + 0x0c + \ + (0x180 * (ch)) + (0x80 * (idx)) + \ + (0x04 * (df))) + +/* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */ +#define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p))) +#define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p))) +#define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p))) +#define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b))) + +/* RSCFDnCFDTHLACCm */ +#define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m))) +/* RSCFDnCFDRPGACCr */ +#define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r))) + +/* Constants */ +#define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */ +#define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */ + +#define RCANFD_NUM_CHANNELS 2 /* Two channels max */ +#define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1) + +#define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16) +#define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */ + +/* Rx FIFO is a global resource of the controller. There are 8 such FIFOs + * available. Each channel gets a dedicated Rx FIFO (i.e.) the channel + * number is added to RFFIFO index. + */ +#define RCANFD_RFFIFO_IDX 0 + +/* Tx/Rx or Common FIFO is a per channel resource. Each channel has 3 Common + * FIFOs dedicated to them. Use the first (index 0) FIFO out of the 3 for Tx. + */ +#define RCANFD_CFFIFO_IDX 0 + +/* fCAN clock select register settings */ +enum rcar_canfd_fcanclk { + RCANFD_CANFDCLK = 0, /* CANFD clock */ + RCANFD_EXTCLK, /* Externally input clock */ +}; + +struct rcar_canfd_global; + +/* Channel priv data */ +struct rcar_canfd_channel { + struct can_priv can; /* Must be the first member */ + struct net_device *ndev; + struct rcar_canfd_global *gpriv; /* Controller reference */ + void __iomem *base; /* Register base address */ + struct napi_struct napi; + u8 tx_len[RCANFD_FIFO_DEPTH]; /* For net stats */ + u32 tx_head; /* Incremented on xmit */ + u32 tx_tail; /* Incremented on xmit done */ + u32 channel; /* Channel number */ + spinlock_t tx_lock; /* To protect tx path */ +}; + +/* Global priv data */ +struct rcar_canfd_global { + struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS]; + void __iomem *base; /* Register base address */ + struct platform_device *pdev; /* Respective platform device */ + struct clk *clkp; /* Peripheral clock */ + struct clk *can_clk; /* fCAN clock */ + enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */ + unsigned long channels_mask; /* Enabled channels mask */ + bool fdmode; /* CAN FD or Classical CAN only mode */ +}; + +/* CAN FD mode nominal rate constants */ +static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 128, + .tseg2_min = 2, + .tseg2_max = 32, + .sjw_max = 32, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +/* CAN FD mode data rate constants */ +static const struct can_bittiming_const rcar_canfd_data_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 2, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 8, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + +/* Classical CAN mode bitrate constants */ +static const struct can_bittiming_const rcar_canfd_bittiming_const = { + .name = RCANFD_DRV_NAME, + .tseg1_min = 4, + .tseg1_max = 16, + .tseg2_min = 2, + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +/* Helper functions */ +static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) +{ + u32 data = readl(reg); + + data &= ~mask; + data |= (val & mask); + writel(data, reg); +} + +static inline u32 rcar_canfd_read(void __iomem *base, u32 offset) +{ + return readl(base + (offset)); +} + +static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val) +{ + writel(val, base + (offset)); +} + +static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val) +{ + rcar_canfd_update(val, val, base + (reg)); +} + +static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val) +{ + rcar_canfd_update(val, 0, base + (reg)); +} + +static void rcar_canfd_update_bit(void __iomem *base, u32 reg, + u32 mask, u32 val) +{ + rcar_canfd_update(mask, val, base + (reg)); +} + +static void rcar_canfd_get_data(struct rcar_canfd_channel *priv, + struct canfd_frame *cf, u32 off) +{ + u32 i, lwords; + + lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); + for (i = 0; i < lwords; i++) + *((u32 *)cf->data + i) = + rcar_canfd_read(priv->base, off + (i * sizeof(u32))); +} + +static void rcar_canfd_put_data(struct rcar_canfd_channel *priv, + struct canfd_frame *cf, u32 off) +{ + u32 i, lwords; + + lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); + for (i = 0; i < lwords; i++) + rcar_canfd_write(priv->base, off + (i * sizeof(u32)), + *((u32 *)cf->data + i)); +} + +static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) +{ + u32 i; + + for (i = 0; i < RCANFD_FIFO_DEPTH; i++) + can_free_echo_skb(ndev, i); +} + +static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) +{ + u32 sts, ch; + int err; + + /* Check RAMINIT flag as CAN RAM initialization takes place + * after the MCU reset + */ + err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, + !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000); + if (err) { + dev_dbg(&gpriv->pdev->dev, "global raminit failed\n"); + return err; + } + + /* Transition to Global Reset mode */ + rcar_canfd_clear_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); + rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, + RCANFD_GCTR_GMDC_MASK, RCANFD_GCTR_GMDC_GRESET); + + /* Ensure Global reset mode */ + err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, + (sts & RCANFD_GSTS_GRSTSTS), 2, 500000); + if (err) { + dev_dbg(&gpriv->pdev->dev, "global reset failed\n"); + return err; + } + + /* Reset Global error flags */ + rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0); + + /* Set the controller into appropriate mode */ + if (gpriv->fdmode) + rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, + RCANFD_GRMCFG_RCMC); + else + rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG, + RCANFD_GRMCFG_RCMC); + + /* Transition all Channels to reset mode */ + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + rcar_canfd_clear_bit(gpriv->base, + RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR); + + rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch), + RCANFD_CCTR_CHMDC_MASK, + RCANFD_CCTR_CHDMC_CRESET); + + /* Ensure Channel reset mode */ + err = readl_poll_timeout((gpriv->base + RCANFD_CSTS(ch)), sts, + (sts & RCANFD_CSTS_CRSTSTS), + 2, 500000); + if (err) { + dev_dbg(&gpriv->pdev->dev, + "channel %u reset failed\n", ch); + return err; + } + } + return 0; +} + +static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) +{ + u32 cfg, ch; + + /* Global configuration settings */ + + /* ECC Error flag Enable */ + cfg = RCANFD_GCFG_EEFE; + + if (gpriv->fdmode) + /* Truncate payload to configured message size RFPLS */ + cfg |= RCANFD_GCFG_CMPOC; + + /* Set External Clock if selected */ + if (gpriv->fcan != RCANFD_CANFDCLK) + cfg |= RCANFD_GCFG_DCS; + + rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg); + + /* Channel configuration settings */ + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch), + RCANFD_CCTR_ERRD); + rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch), + RCANFD_CCTR_BOM_MASK, + RCANFD_CCTR_BOM_BENTRY); + } +} + +static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, + u32 ch) +{ + u32 cfg; + int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + if (ch == 0) { + start = 0; /* Channel 0 always starts from 0th rule */ + } else { + /* Get number of Channel 0 rules and adjust */ + cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG0); + start = RCANFD_GAFLCFG_GETRNC(0, cfg); + } + + /* Enable write access to entry */ + page = RCANFD_GAFL_PAGENUM(start); + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, + (RCANFD_GAFLECTR_AFLPN(page) | + RCANFD_GAFLECTR_AFLDAE)); + + /* Write number of rules for channel */ + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG0, + RCANFD_GAFLCFG_SETRNC(ch, num_rules)); + if (gpriv->fdmode) + offset = RCANFD_F_GAFL_OFFSET; + else + offset = RCANFD_C_GAFL_OFFSET; + + /* Accept all IDs */ + rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); + /* IDE or RTR is not considered for matching */ + rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); + /* Any data length accepted */ + rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); + /* Place the msg in corresponding Rx FIFO entry */ + rcar_canfd_write(gpriv->base, RCANFD_GAFLP1(offset, start), + RCANFD_GAFLP1_GAFLFDP(ridx)); + + /* Disable write access to page */ + rcar_canfd_clear_bit(gpriv->base, + RCANFD_GAFLECTR, RCANFD_GAFLECTR_AFLDAE); +} + +static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch) +{ + /* Rx FIFO is used for reception */ + u32 cfg; + u16 rfdc, rfpls; + + /* Select Rx FIFO based on channel */ + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + rfdc = 2; /* b010 - 8 messages Rx FIFO depth */ + if (gpriv->fdmode) + rfpls = 7; /* b111 - Max 64 bytes payload */ + else + rfpls = 0; /* b000 - Max 8 bytes payload */ + + cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) | + RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE); + rcar_canfd_write(gpriv->base, RCANFD_RFCC(ridx), cfg); +} + +static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch) +{ + /* Tx/Rx(Common) FIFO configured in Tx mode is + * used for transmission + * + * Each channel has 3 Common FIFO dedicated to them. + * Use the 1st (index 0) out of 3 + */ + u32 cfg; + u16 cftml, cfm, cfdc, cfpls; + + cftml = 0; /* 0th buffer */ + cfm = 1; /* b01 - Transmit mode */ + cfdc = 2; /* b010 - 8 messages Tx FIFO depth */ + if (gpriv->fdmode) + cfpls = 7; /* b111 - Max 64 bytes payload */ + else + cfpls = 0; /* b000 - Max 8 bytes payload */ + + cfg = (RCANFD_CFCC_CFTML(cftml) | RCANFD_CFCC_CFM(cfm) | + RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(cfdc) | + RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE); + rcar_canfd_write(gpriv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), cfg); + + if (gpriv->fdmode) + /* Clear FD mode specific control/status register */ + rcar_canfd_write(gpriv->base, + RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), 0); +} + +static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv) +{ + u32 ctr; + + /* Clear any stray error interrupt flags */ + rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0); + + /* Global interrupts setup */ + ctr = RCANFD_GCTR_MEIE; + if (gpriv->fdmode) + ctr |= RCANFD_GCTR_CFMPOFIE; + + rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, ctr); +} + +static void rcar_canfd_disable_global_interrupts(struct rcar_canfd_global + *gpriv) +{ + /* Disable all interrupts */ + rcar_canfd_write(gpriv->base, RCANFD_GCTR, 0); + + /* Clear any stray error interrupt flags */ + rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0); +} + +static void rcar_canfd_enable_channel_interrupts(struct rcar_canfd_channel + *priv) +{ + u32 ctr, ch = priv->channel; + + /* Clear any stray error flags */ + rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0); + + /* Channel interrupts setup */ + ctr = (RCANFD_CCTR_TAIE | + RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE | + RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE | + RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE | + RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE); + rcar_canfd_set_bit(priv->base, RCANFD_CCTR(ch), ctr); +} + +static void rcar_canfd_disable_channel_interrupts(struct rcar_canfd_channel + *priv) +{ + u32 ctr, ch = priv->channel; + + ctr = (RCANFD_CCTR_TAIE | + RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE | + RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE | + RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE | + RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE); + rcar_canfd_clear_bit(priv->base, RCANFD_CCTR(ch), ctr); + + /* Clear any stray error flags */ + rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0); +} + +static void rcar_canfd_global_error(struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; + struct net_device_stats *stats = &ndev->stats; + u32 ch = priv->channel; + u32 gerfl, sts; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); + if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) { + netdev_dbg(ndev, "Ch0: ECC Error flag\n"); + stats->tx_dropped++; + } + if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) { + netdev_dbg(ndev, "Ch1: ECC Error flag\n"); + stats->tx_dropped++; + } + if (gerfl & RCANFD_GERFL_MES) { + sts = rcar_canfd_read(priv->base, + RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + if (sts & RCANFD_CFSTS_CFMLT) { + netdev_dbg(ndev, "Tx Message Lost flag\n"); + stats->tx_dropped++; + rcar_canfd_write(priv->base, + RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX), + sts & ~RCANFD_CFSTS_CFMLT); + } + + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + if (sts & RCANFD_RFSTS_RFMLT) { + netdev_dbg(ndev, "Rx Message Lost flag\n"); + stats->rx_dropped++; + rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx), + sts & ~RCANFD_RFSTS_RFMLT); + } + } + if (gpriv->fdmode && gerfl & RCANFD_GERFL_CMPOF) { + /* Message Lost flag will be set for respective channel + * when this condition happens with counters and flags + * already updated. + */ + netdev_dbg(ndev, "global payload overflow interrupt\n"); + } + + /* Clear all global error interrupts. Only affected channels bits + * get cleared + */ + rcar_canfd_write(priv->base, RCANFD_GERFL, 0); +} + +static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, + u16 txerr, u16 rxerr) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + u32 ch = priv->channel; + + netdev_dbg(ndev, "ch erfl %x txerr %u rxerr %u\n", cerfl, txerr, rxerr); + + /* Propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + + /* Channel error interrupts */ + if (cerfl & RCANFD_CERFL_BEF) { + netdev_dbg(ndev, "Bus error\n"); + cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; + cf->data[2] = CAN_ERR_PROT_UNSPEC; + priv->can.can_stats.bus_error++; + } + if (cerfl & RCANFD_CERFL_ADERR) { + netdev_dbg(ndev, "ACK Delimiter Error\n"); + stats->tx_errors++; + cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; + } + if (cerfl & RCANFD_CERFL_B0ERR) { + netdev_dbg(ndev, "Bit Error (dominant)\n"); + stats->tx_errors++; + cf->data[2] |= CAN_ERR_PROT_BIT0; + } + if (cerfl & RCANFD_CERFL_B1ERR) { + netdev_dbg(ndev, "Bit Error (recessive)\n"); + stats->tx_errors++; + cf->data[2] |= CAN_ERR_PROT_BIT1; + } + if (cerfl & RCANFD_CERFL_CERR) { + netdev_dbg(ndev, "CRC Error\n"); + stats->rx_errors++; + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } + if (cerfl & RCANFD_CERFL_AERR) { + netdev_dbg(ndev, "ACK Error\n"); + stats->tx_errors++; + cf->can_id |= CAN_ERR_ACK; + cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + } + if (cerfl & RCANFD_CERFL_FERR) { + netdev_dbg(ndev, "Form Error\n"); + stats->rx_errors++; + cf->data[2] |= CAN_ERR_PROT_FORM; + } + if (cerfl & RCANFD_CERFL_SERR) { + netdev_dbg(ndev, "Stuff Error\n"); + stats->rx_errors++; + cf->data[2] |= CAN_ERR_PROT_STUFF; + } + if (cerfl & RCANFD_CERFL_ALF) { + netdev_dbg(ndev, "Arbitration lost Error\n"); + priv->can.can_stats.arbitration_lost++; + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; + } + if (cerfl & RCANFD_CERFL_BLF) { + netdev_dbg(ndev, "Bus Lock Error\n"); + stats->rx_errors++; + cf->can_id |= CAN_ERR_BUSERROR; + } + if (cerfl & RCANFD_CERFL_EWF) { + netdev_dbg(ndev, "Error warning interrupt\n"); + priv->can.state = CAN_STATE_ERROR_WARNING; + priv->can.can_stats.error_warning++; + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + if (cerfl & RCANFD_CERFL_EPF) { + netdev_dbg(ndev, "Error passive interrupt\n"); + priv->can.state = CAN_STATE_ERROR_PASSIVE; + priv->can.can_stats.error_passive++; + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE : + CAN_ERR_CRTL_RX_PASSIVE; + cf->data[6] = txerr; + cf->data[7] = rxerr; + } + if (cerfl & RCANFD_CERFL_BOEF) { + netdev_dbg(ndev, "Bus-off entry interrupt\n"); + rcar_canfd_tx_failure_cleanup(ndev); + priv->can.state = CAN_STATE_BUS_OFF; + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + cf->can_id |= CAN_ERR_BUSOFF; + } + if (cerfl & RCANFD_CERFL_OVLF) { + netdev_dbg(ndev, + "Overload Frame Transmission error interrupt\n"); + stats->tx_errors++; + cf->can_id |= CAN_ERR_PROT; + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } + + /* Clear channel error interrupts that are handled */ + rcar_canfd_write(priv->base, RCANFD_CERFL(ch), + RCANFD_CERFL_ERR(~cerfl)); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); +} + +static void rcar_canfd_tx_done(struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + u32 sts; + unsigned long flags; + u32 ch = priv->channel; + + do { + u8 unsent, sent; + + sent = priv->tx_tail % RCANFD_FIFO_DEPTH; + stats->tx_packets++; + stats->tx_bytes += priv->tx_len[sent]; + priv->tx_len[sent] = 0; + can_get_echo_skb(ndev, sent); + + spin_lock_irqsave(&priv->tx_lock, flags); + priv->tx_tail++; + sts = rcar_canfd_read(priv->base, + RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + unsent = RCANFD_CFSTS_CFMC(sts); + + /* Wake producer only when there is room */ + if (unsent != RCANFD_FIFO_DEPTH) + netif_wake_queue(ndev); + + if (priv->tx_head - priv->tx_tail <= unsent) { + spin_unlock_irqrestore(&priv->tx_lock, flags); + break; + } + spin_unlock_irqrestore(&priv->tx_lock, flags); + + } while (1); + + /* Clear interrupt */ + rcar_canfd_write(priv->base, RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX), + sts & ~RCANFD_CFSTS_CFTXIF); + can_led_event(ndev, CAN_LED_EVENT_TX); +} + +static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + struct net_device *ndev; + struct rcar_canfd_channel *priv; + u32 sts, gerfl; + u32 ch, ridx; + + /* Global error interrupts still indicate a condition specific + * to a channel. RxFIFO interrupt is a global interrupt. + */ + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + priv = gpriv->ch[ch]; + ndev = priv->ndev; + ridx = ch + RCANFD_RFFIFO_IDX; + + /* Global error interrupts */ + gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); + if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl))) + rcar_canfd_global_error(ndev); + + /* Handle Rx interrupts */ + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + if (likely(sts & RCANFD_RFSTS_RFIF)) { + if (napi_schedule_prep(&priv->napi)) { + /* Disable Rx FIFO interrupts */ + rcar_canfd_clear_bit(priv->base, + RCANFD_RFCC(ridx), + RCANFD_RFCC_RFIE); + __napi_schedule(&priv->napi); + } + } + } + return IRQ_HANDLED; +} + +static void rcar_canfd_state_change(struct net_device *ndev, + u16 txerr, u16 rxerr) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + enum can_state rx_state, tx_state, state = priv->can.state; + struct can_frame *cf; + struct sk_buff *skb; + + /* Handle transition from error to normal states */ + if (txerr < 96 && rxerr < 96) + state = CAN_STATE_ERROR_ACTIVE; + else if (txerr < 128 && rxerr < 128) + state = CAN_STATE_ERROR_WARNING; + + if (state != priv->can.state) { + netdev_dbg(ndev, "state: new %d, old %d: txerr %u, rxerr %u\n", + state, priv->can.state, txerr, rxerr); + skb = alloc_can_err_skb(ndev, &cf); + if (!skb) { + stats->rx_dropped++; + return; + } + tx_state = txerr >= rxerr ? state : 0; + rx_state = txerr <= rxerr ? state : 0; + + can_change_state(ndev, cf, tx_state, rx_state); + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_rx(skb); + } +} + +static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) +{ + struct rcar_canfd_global *gpriv = dev_id; + struct net_device *ndev; + struct rcar_canfd_channel *priv; + u32 sts, ch, cerfl; + u16 txerr, rxerr; + + /* Common FIFO is a per channel resource */ + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + priv = gpriv->ch[ch]; + ndev = priv->ndev; + + /* Channel error interrupts */ + cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch)); + sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); + txerr = RCANFD_CSTS_TECCNT(sts); + rxerr = RCANFD_CSTS_RECCNT(sts); + if (unlikely(RCANFD_CERFL_ERR(cerfl))) + rcar_canfd_error(ndev, cerfl, txerr, rxerr); + + /* Handle state change to lower states */ + if (unlikely((priv->can.state != CAN_STATE_ERROR_ACTIVE) && + (priv->can.state != CAN_STATE_BUS_OFF))) + rcar_canfd_state_change(ndev, txerr, rxerr); + + /* Handle Tx interrupts */ + sts = rcar_canfd_read(priv->base, + RCANFD_CFSTS(ch, RCANFD_CFFIFO_IDX)); + if (likely(sts & RCANFD_CFSTS_CFTXIF)) + rcar_canfd_tx_done(ndev); + } + return IRQ_HANDLED; +} + +static void rcar_canfd_set_bittiming(struct net_device *dev) +{ + struct rcar_canfd_channel *priv = netdev_priv(dev); + const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; + u16 brp, sjw, tseg1, tseg2; + u32 cfg; + u32 ch = priv->channel; + + /* Nominal bit timing settings */ + brp = bt->brp - 1; + sjw = bt->sjw - 1; + tseg1 = bt->prop_seg + bt->phase_seg1 - 1; + tseg2 = bt->phase_seg2 - 1; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + /* CAN FD only mode */ + cfg = (RCANFD_NCFG_NTSEG1(tseg1) | RCANFD_NCFG_NBRP(brp) | + RCANFD_NCFG_NSJW(sjw) | RCANFD_NCFG_NTSEG2(tseg2)); + + rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); + netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", + brp, sjw, tseg1, tseg2); + + /* Data bit timing settings */ + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + + cfg = (RCANFD_DCFG_DTSEG1(tseg1) | RCANFD_DCFG_DBRP(brp) | + RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(tseg2)); + + rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg); + netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", + brp, sjw, tseg1, tseg2); + } else { + /* Classical CAN only mode */ + cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) | + RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2)); + + rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); + netdev_dbg(priv->ndev, + "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", + brp, sjw, tseg1, tseg2); + } +} + +static int rcar_canfd_start(struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + int err = -EOPNOTSUPP; + u32 sts, ch = priv->channel; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + rcar_canfd_set_bittiming(ndev); + + rcar_canfd_enable_channel_interrupts(priv); + + /* Set channel to Operational mode */ + rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch), + RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_COPM); + + /* Verify channel mode change */ + err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts, + (sts & RCANFD_CSTS_COMSTS), 2, 500000); + if (err) { + netdev_err(ndev, "channel %u communication state failed\n", ch); + goto fail_mode_change; + } + + /* Enable Common & Rx FIFO */ + rcar_canfd_set_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), + RCANFD_CFCC_CFE); + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE); + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + return 0; + +fail_mode_change: + rcar_canfd_disable_channel_interrupts(priv); + return err; +} + +static int rcar_canfd_open(struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; + int err; + + /* Peripheral clock is already enabled in probe */ + err = clk_prepare_enable(gpriv->can_clk); + if (err) { + netdev_err(ndev, "failed to enable CAN clock, error %d\n", err); + goto out_clock; + } + + err = open_candev(ndev); + if (err) { + netdev_err(ndev, "open_candev() failed, error %d\n", err); + goto out_can_clock; + } + + napi_enable(&priv->napi); + err = rcar_canfd_start(ndev); + if (err) + goto out_close; + netif_start_queue(ndev); + can_led_event(ndev, CAN_LED_EVENT_OPEN); + return 0; +out_close: + napi_disable(&priv->napi); + close_candev(ndev); +out_can_clock: + clk_disable_unprepare(gpriv->can_clk); +out_clock: + return err; +} + +static void rcar_canfd_stop(struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + int err; + u32 sts, ch = priv->channel; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + /* Transition to channel reset mode */ + rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch), + RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_CRESET); + + /* Check Channel reset mode */ + err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts, + (sts & RCANFD_CSTS_CRSTSTS), 2, 500000); + if (err) + netdev_err(ndev, "channel %u reset failed\n", ch); + + rcar_canfd_disable_channel_interrupts(priv); + + /* Disable Common & Rx FIFO */ + rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(ch, RCANFD_CFFIFO_IDX), + RCANFD_CFCC_CFE); + rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(ridx), RCANFD_RFCC_RFE); + + /* Set the state as STOPPED */ + priv->can.state = CAN_STATE_STOPPED; +} + +static int rcar_canfd_close(struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct rcar_canfd_global *gpriv = priv->gpriv; + + netif_stop_queue(ndev); + rcar_canfd_stop(ndev); + napi_disable(&priv->napi); + clk_disable_unprepare(gpriv->can_clk); + close_candev(ndev); + can_led_event(ndev, CAN_LED_EVENT_STOP); + return 0; +} + +static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct rcar_canfd_channel *priv = netdev_priv(ndev); + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 sts = 0, id, dlc; + unsigned long flags; + u32 ch = priv->channel; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + if (cf->can_id & CAN_EFF_FLAG) { + id = cf->can_id & CAN_EFF_MASK; + id |= RCANFD_CFID_CFIDE; + } else { + id = cf->can_id & CAN_SFF_MASK; + } + + if (cf->can_id & CAN_RTR_FLAG) + id |= RCANFD_CFID_CFRTR; + + dlc = RCANFD_CFPTR_CFDLC(can_len2dlc(cf->len)); + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + rcar_canfd_write(priv->base, + RCANFD_F_CFID(ch, RCANFD_CFFIFO_IDX), id); + rcar_canfd_write(priv->base, + RCANFD_F_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc); + + if (can_is_canfd_skb(skb)) { + /* CAN FD frame format */ + sts |= RCANFD_CFFDCSTS_CFFDF; + if (cf->flags & CANFD_BRS) + sts |= RCANFD_CFFDCSTS_CFBRS; + + if (priv->can.state == CAN_STATE_ERROR_PASSIVE) + sts |= RCANFD_CFFDCSTS_CFESI; + } + + rcar_canfd_write(priv->base, + RCANFD_F_CFFDCSTS(ch, RCANFD_CFFIFO_IDX), sts); + + rcar_canfd_put_data(priv, cf, + RCANFD_F_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); + } else { + rcar_canfd_write(priv->base, + RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id); + rcar_canfd_write(priv->base, + RCANFD_C_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc); + rcar_canfd_put_data(priv, cf, + RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); + } + + priv->tx_len[priv->tx_head % RCANFD_FIFO_DEPTH] = cf->len; + can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH); + + spin_lock_irqsave(&priv->tx_lock, flags); + priv->tx_head++; + + /* Stop the queue if we've filled all FIFO entries */ + if (priv->tx_head - priv->tx_tail >= RCANFD_FIFO_DEPTH) + netif_stop_queue(ndev); + + /* Start Tx: Write 0xff to CFPC to increment the CPU-side + * pointer for the Common FIFO + */ + rcar_canfd_write(priv->base, + RCANFD_CFPCTR(ch, RCANFD_CFFIFO_IDX), 0xff); + + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) +{ + struct net_device_stats *stats = &priv->ndev->stats; + struct canfd_frame *cf; + struct sk_buff *skb; + u32 sts = 0, id, dlc; + u32 ch = priv->channel; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + id = rcar_canfd_read(priv->base, RCANFD_F_RFID(ridx)); + dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(ridx)); + + sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(ridx)); + if (sts & RCANFD_RFFDSTS_RFFDF) + skb = alloc_canfd_skb(priv->ndev, &cf); + else + skb = alloc_can_skb(priv->ndev, + (struct can_frame **)&cf); + } else { + id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx)); + dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx)); + skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); + } + + if (!skb) { + stats->rx_dropped++; + return; + } + + if (id & RCANFD_RFID_RFIDE) + cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; + else + cf->can_id = id & CAN_SFF_MASK; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { + if (sts & RCANFD_RFFDSTS_RFFDF) + cf->len = can_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); + else + cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc)); + + if (sts & RCANFD_RFFDSTS_RFESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(priv->ndev, "ESI Error\n"); + } + + if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) { + cf->can_id |= CAN_RTR_FLAG; + } else { + if (sts & RCANFD_RFFDSTS_RFBRS) + cf->flags |= CANFD_BRS; + + rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(ridx, 0)); + } + } else { + cf->len = get_can_dlc(RCANFD_RFPTR_RFDLC(dlc)); + if (id & RCANFD_RFID_RFRTR) + cf->can_id |= CAN_RTR_FLAG; + else + rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0)); + } + + /* Write 0xff to RFPC to increment the CPU-side + * pointer of the Rx FIFO + */ + rcar_canfd_write(priv->base, RCANFD_RFPCTR(ridx), 0xff); + + can_led_event(priv->ndev, CAN_LED_EVENT_RX); + + stats->rx_bytes += cf->len; + stats->rx_packets++; + netif_receive_skb(skb); +} + +static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) +{ + struct rcar_canfd_channel *priv = + container_of(napi, struct rcar_canfd_channel, napi); + int num_pkts; + u32 sts; + u32 ch = priv->channel; + u32 ridx = ch + RCANFD_RFFIFO_IDX; + + for (num_pkts = 0; num_pkts < quota; num_pkts++) { + sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(ridx)); + /* Check FIFO empty condition */ + if (sts & RCANFD_RFSTS_RFEMP) + break; + + rcar_canfd_rx_pkt(priv); + + /* Clear interrupt bit */ + if (sts & RCANFD_RFSTS_RFIF) + rcar_canfd_write(priv->base, RCANFD_RFSTS(ridx), + sts & ~RCANFD_RFSTS_RFIF); + } + + /* All packets processed */ + if (num_pkts < quota) { + napi_complete(napi); + /* Enable Rx FIFO interrupts */ + rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx), + RCANFD_RFCC_RFIE); + } + return num_pkts; +} + +static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode) +{ + int err; + + switch (mode) { + case CAN_MODE_START: + err = rcar_canfd_start(ndev); + if (err) + return err; + netif_wake_queue(ndev); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int rcar_canfd_get_berr_counter(const struct net_device *dev, + struct can_berr_counter *bec) +{ + struct rcar_canfd_channel *priv = netdev_priv(dev); + u32 val, ch = priv->channel; + + /* Peripheral clock is already enabled in probe */ + val = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); + bec->txerr = RCANFD_CSTS_TECCNT(val); + bec->rxerr = RCANFD_CSTS_RECCNT(val); + return 0; +} + +static const struct net_device_ops rcar_canfd_netdev_ops = { + .ndo_open = rcar_canfd_open, + .ndo_stop = rcar_canfd_close, + .ndo_start_xmit = rcar_canfd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, + u32 fcan_freq) +{ + struct platform_device *pdev = gpriv->pdev; + struct rcar_canfd_channel *priv; + struct net_device *ndev; + int err = -ENODEV; + + ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH); + if (!ndev) { + dev_err(&pdev->dev, "alloc_candev() failed\n"); + err = -ENOMEM; + goto fail; + } + priv = netdev_priv(ndev); + + ndev->netdev_ops = &rcar_canfd_netdev_ops; + ndev->flags |= IFF_ECHO; + priv->ndev = ndev; + priv->base = gpriv->base; + priv->channel = ch; + priv->can.clock.freq = fcan_freq; + dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq); + + if (gpriv->fdmode) { + priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const; + priv->can.data_bittiming_const = + &rcar_canfd_data_bittiming_const; + + /* Controller starts in CAN FD only mode */ + can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); + priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; + } else { + /* Controller starts in Classical CAN only mode */ + priv->can.bittiming_const = &rcar_canfd_bittiming_const; + priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; + } + + priv->can.do_set_mode = rcar_canfd_do_set_mode; + priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter; + priv->gpriv = gpriv; + SET_NETDEV_DEV(ndev, &pdev->dev); + + netif_napi_add(ndev, &priv->napi, rcar_canfd_rx_poll, + RCANFD_NAPI_WEIGHT); + err = register_candev(ndev); + if (err) { + dev_err(&pdev->dev, + "register_candev() failed, error %d\n", err); + goto fail_candev; + } + spin_lock_init(&priv->tx_lock); + devm_can_led_init(ndev); + gpriv->ch[priv->channel] = priv; + dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel); + return 0; + +fail_candev: + netif_napi_del(&priv->napi); + free_candev(ndev); +fail: + return err; +} + +static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch) +{ + struct rcar_canfd_channel *priv = gpriv->ch[ch]; + + if (priv) { + unregister_candev(priv->ndev); + netif_napi_del(&priv->napi); + free_candev(priv->ndev); + } +} + +static int rcar_canfd_probe(struct platform_device *pdev) +{ + struct resource *mem; + void __iomem *addr; + u32 sts, ch, fcan_freq; + struct rcar_canfd_global *gpriv; + struct device_node *of_child; + unsigned long channels_mask = 0; + int err, ch_irq, g_irq; + bool fdmode = true; /* CAN FD only mode - default */ + + if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd")) + fdmode = false; /* Classical CAN only mode */ + + of_child = of_get_child_by_name(pdev->dev.of_node, "channel0"); + if (of_child && of_device_is_available(of_child)) + channels_mask |= BIT(0); /* Channel 0 */ + + of_child = of_get_child_by_name(pdev->dev.of_node, "channel1"); + if (of_child && of_device_is_available(of_child)) + channels_mask |= BIT(1); /* Channel 1 */ + + ch_irq = platform_get_irq(pdev, 0); + if (ch_irq < 0) { + dev_err(&pdev->dev, "no Channel IRQ resource\n"); + err = ch_irq; + goto fail_dev; + } + + g_irq = platform_get_irq(pdev, 1); + if (g_irq < 0) { + dev_err(&pdev->dev, "no Global IRQ resource\n"); + err = g_irq; + goto fail_dev; + } + + /* Global controller context */ + gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL); + if (!gpriv) { + err = -ENOMEM; + goto fail_dev; + } + gpriv->pdev = pdev; + gpriv->channels_mask = channels_mask; + gpriv->fdmode = fdmode; + + /* Peripheral clock */ + gpriv->clkp = devm_clk_get(&pdev->dev, "fck"); + if (IS_ERR(gpriv->clkp)) { + err = PTR_ERR(gpriv->clkp); + dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", + err); + goto fail_dev; + } + + /* fCAN clock: Pick External clock. If not available fallback to + * CANFD clock + */ + gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); + if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) { + gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd"); + if (IS_ERR(gpriv->can_clk)) { + err = PTR_ERR(gpriv->can_clk); + dev_err(&pdev->dev, + "cannot get canfd clock, error %d\n", err); + goto fail_dev; + } + gpriv->fcan = RCANFD_CANFDCLK; + + } else { + gpriv->fcan = RCANFD_EXTCLK; + } + fcan_freq = clk_get_rate(gpriv->can_clk); + + if (gpriv->fcan == RCANFD_CANFDCLK) + /* CANFD clock is further divided by (1/2) within the IP */ + fcan_freq /= 2; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(addr)) { + err = PTR_ERR(addr); + goto fail_dev; + } + gpriv->base = addr; + + /* Request IRQ that's common for both channels */ + err = devm_request_irq(&pdev->dev, ch_irq, + rcar_canfd_channel_interrupt, 0, + "canfd.chn", gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", + ch_irq, err); + goto fail_dev; + } + err = devm_request_irq(&pdev->dev, g_irq, + rcar_canfd_global_interrupt, 0, + "canfd.gbl", gpriv); + if (err) { + dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n", + g_irq, err); + goto fail_dev; + } + + /* Enable peripheral clock for register access */ + err = clk_prepare_enable(gpriv->clkp); + if (err) { + dev_err(&pdev->dev, + "failed to enable peripheral clock, error %d\n", err); + goto fail_dev; + } + + err = rcar_canfd_reset_controller(gpriv); + if (err) { + dev_err(&pdev->dev, "reset controller failed\n"); + goto fail_clk; + } + + /* Controller in Global reset & Channel reset mode */ + rcar_canfd_configure_controller(gpriv); + + /* Configure per channel attributes */ + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + /* Configure Channel's Rx fifo */ + rcar_canfd_configure_rx(gpriv, ch); + + /* Configure Channel's Tx (Common) fifo */ + rcar_canfd_configure_tx(gpriv, ch); + + /* Configure receive rules */ + rcar_canfd_configure_afl_rules(gpriv, ch); + } + + /* Configure common interrupts */ + rcar_canfd_enable_global_interrupts(gpriv); + + /* Start Global operation mode */ + rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK, + RCANFD_GCTR_GMDC_GOPM); + + /* Verify mode change */ + err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, + !(sts & RCANFD_GSTS_GNOPM), 2, 500000); + if (err) { + dev_err(&pdev->dev, "global operational mode failed\n"); + goto fail_mode; + } + + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq); + if (err) + goto fail_channel; + } + + platform_set_drvdata(pdev, gpriv); + dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n", + gpriv->fcan, gpriv->fdmode); + return 0; + +fail_channel: + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) + rcar_canfd_channel_remove(gpriv, ch); +fail_mode: + rcar_canfd_disable_global_interrupts(gpriv); +fail_clk: + clk_disable_unprepare(gpriv->clkp); +fail_dev: + return err; +} + +static int rcar_canfd_remove(struct platform_device *pdev) +{ + struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev); + u32 ch; + + rcar_canfd_reset_controller(gpriv); + rcar_canfd_disable_global_interrupts(gpriv); + + for_each_set_bit(ch, &gpriv->channels_mask, RCANFD_NUM_CHANNELS) { + rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]); + rcar_canfd_channel_remove(gpriv, ch); + } + + /* Enter global sleep mode */ + rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); + clk_disable_unprepare(gpriv->clkp); + return 0; +} + +static int __maybe_unused rcar_canfd_suspend(struct device *dev) +{ + return 0; +} + +static int __maybe_unused rcar_canfd_resume(struct device *dev) +{ + return 0; +} + +static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, + rcar_canfd_resume); + +static const struct of_device_id rcar_canfd_of_table[] = { + { .compatible = "renesas,rcar-gen3-canfd" }, + { } +}; + +MODULE_DEVICE_TABLE(of, rcar_canfd_of_table); + +static struct platform_driver rcar_canfd_driver = { + .driver = { + .name = RCANFD_DRV_NAME, + .of_match_table = of_match_ptr(rcar_canfd_of_table), + .pm = &rcar_canfd_pm_ops, + }, + .probe = rcar_canfd_probe, + .remove = rcar_canfd_remove, +}; + +module_platform_driver(rcar_canfd_driver); + +MODULE_AUTHOR("Ramesh Shanmugasundaram <ramesh.shanmugasundaram@bp.renesas.com>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("CAN FD driver for Renesas R-Car SoC"); +MODULE_ALIAS("platform:" RCANFD_DRV_NAME); diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c index 76513dd78..79572457a 100644 --- a/drivers/net/can/sja1000/tscan1.c +++ b/drivers/net/can/sja1000/tscan1.c @@ -203,14 +203,4 @@ static struct isa_driver tscan1_isa_driver = { }, }; -static int __init tscan1_init(void) -{ - return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV); -} -module_init(tscan1_init); - -static void __exit tscan1_exit(void) -{ - isa_unregister_driver(&tscan1_isa_driver); -} -module_exit(tscan1_exit); +module_isa_driver(tscan1_isa_driver, TSCAN1_MAXDEV); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 9a3f15cb7..eb7173713 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -354,7 +354,7 @@ static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev) { struct slcan *sl = netdev_priv(dev); - if (skb->len != sizeof(struct can_frame)) + if (skb->len != CAN_MTU) goto out; spin_lock(&sl->lock); @@ -442,7 +442,7 @@ static void slc_setup(struct net_device *dev) dev->addr_len = 0; dev->tx_queue_len = 10; - dev->mtu = sizeof(struct can_frame); + dev->mtu = CAN_MTU; dev->type = ARPHRD_CAN; /* New-style flags. */ diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index cf36d26ef..f3f05fea8 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1145,8 +1145,11 @@ static int mcp251x_can_probe(struct spi_device *spi) /* Here is OK to not lock the MCP, no one knows about it yet */ ret = mcp251x_hw_probe(spi); - if (ret) + if (ret) { + if (ret == -ENODEV) + dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model); goto error_probe; + } mcp251x_hw_sleep(spi); @@ -1156,6 +1159,7 @@ static int mcp251x_can_probe(struct spi_device *spi) devm_can_led_init(net); + netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; error_probe: @@ -1168,6 +1172,7 @@ out_clk: out_free: free_candev(net); + dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); return ret; } diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index acb0c8490..6f0cbc387 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -44,7 +44,9 @@ enum gs_usb_breq { GS_USB_BREQ_MODE, GS_USB_BREQ_BERR, GS_USB_BREQ_BT_CONST, - GS_USB_BREQ_DEVICE_CONFIG + GS_USB_BREQ_DEVICE_CONFIG, + GS_USB_BREQ_TIMESTAMP, + GS_USB_BREQ_IDENTIFY, }; enum gs_can_mode { @@ -63,6 +65,11 @@ enum gs_can_state { GS_CAN_STATE_SLEEPING }; +enum gs_can_identify_mode { + GS_CAN_IDENTIFY_OFF = 0, + GS_CAN_IDENTIFY_ON +}; + /* data types passed between host and device */ struct gs_host_config { u32 byte_order; @@ -82,10 +89,10 @@ struct gs_device_config { } __packed; #define GS_CAN_MODE_NORMAL 0 -#define GS_CAN_MODE_LISTEN_ONLY (1<<0) -#define GS_CAN_MODE_LOOP_BACK (1<<1) -#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2) -#define GS_CAN_MODE_ONE_SHOT (1<<3) +#define GS_CAN_MODE_LISTEN_ONLY BIT(0) +#define GS_CAN_MODE_LOOP_BACK BIT(1) +#define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_MODE_ONE_SHOT BIT(3) struct gs_device_mode { u32 mode; @@ -106,10 +113,16 @@ struct gs_device_bittiming { u32 brp; } __packed; -#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0) -#define GS_CAN_FEATURE_LOOP_BACK (1<<1) -#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2) -#define GS_CAN_FEATURE_ONE_SHOT (1<<3) +struct gs_identify_mode { + u32 mode; +} __packed; + +#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) +#define GS_CAN_FEATURE_LOOP_BACK BIT(1) +#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) +#define GS_CAN_FEATURE_ONE_SHOT BIT(3) +#define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) +#define GS_CAN_FEATURE_IDENTIFY BIT(5) struct gs_device_bt_const { u32 feature; @@ -214,7 +227,8 @@ static void gs_free_tx_context(struct gs_tx_context *txc) /* Get a tx context by id. */ -static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id) +static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, + unsigned int id) { unsigned long flags; @@ -457,7 +471,8 @@ static void gs_usb_xmit_callback(struct urb *urb) netif_wake_queue(netdev); } -static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, + struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; @@ -663,7 +678,8 @@ static int gs_can_open(struct net_device *netdev) rc = usb_control_msg(interface_to_usbdev(dev->iface), usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0), GS_USB_BREQ_MODE, - USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, dev->channel, 0, dm, @@ -726,7 +742,59 @@ static const struct net_device_ops gs_usb_netdev_ops = { .ndo_change_mtu = can_change_mtu, }; -static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) +static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) +{ + struct gs_can *dev = netdev_priv(netdev); + struct gs_identify_mode imode; + int rc; + + if (do_identify) + imode.mode = GS_CAN_IDENTIFY_ON; + else + imode.mode = GS_CAN_IDENTIFY_OFF; + + rc = usb_control_msg(interface_to_usbdev(dev->iface), + usb_sndctrlpipe(interface_to_usbdev(dev->iface), + 0), + GS_USB_BREQ_IDENTIFY, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_INTERFACE, + dev->channel, + 0, + &imode, + sizeof(imode), + 100); + + return (rc > 0) ? 0 : rc; +} + +/* blink LED's for finding the this interface */ +static int gs_usb_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + int rc = 0; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_ON); + break; + case ETHTOOL_ID_INACTIVE: + rc = gs_usb_set_identify(dev, GS_CAN_IDENTIFY_OFF); + break; + default: + break; + } + + return rc; +} + +static const struct ethtool_ops gs_usb_ethtool_ops = { + .set_phys_id = gs_usb_set_phys_id, +}; + +static struct gs_can *gs_make_candev(unsigned int channel, + struct usb_interface *intf, + struct gs_device_config *dconf) { struct gs_can *dev; struct net_device *netdev; @@ -814,10 +882,14 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; - kfree(bt_const); - SET_NETDEV_DEV(netdev, &intf->dev); + if (dconf->sw_version > 1) + if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY) + netdev->ethtool_ops = &gs_usb_ethtool_ops; + + kfree(bt_const); + rc = register_candev(dev->netdev); if (rc) { free_candev(dev->netdev); @@ -835,19 +907,16 @@ static void gs_destroy_candev(struct gs_can *dev) free_candev(dev->netdev); } -static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) +static int gs_usb_probe(struct usb_interface *intf, + const struct usb_device_id *id) { struct gs_usb *dev; int rc = -ENOMEM; unsigned int icount, i; - struct gs_host_config *hconf; - struct gs_device_config *dconf; - - hconf = kmalloc(sizeof(*hconf), GFP_KERNEL); - if (!hconf) - return -ENOMEM; - - hconf->byte_order = 0x0000beef; + struct gs_host_config hconf = { + .byte_order = 0x0000beef, + }; + struct gs_device_config dconf; /* send host config */ rc = usb_control_msg(interface_to_usbdev(intf), @@ -856,22 +925,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - hconf, - sizeof(*hconf), + &hconf, + sizeof(hconf), 1000); - kfree(hconf); - if (rc < 0) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } - dconf = kmalloc(sizeof(*dconf), GFP_KERNEL); - if (!dconf) - return -ENOMEM; - /* read device config */ rc = usb_control_msg(interface_to_usbdev(intf), usb_rcvctrlpipe(interface_to_usbdev(intf), 0), @@ -879,22 +942,16 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 1, intf->altsetting[0].desc.bInterfaceNumber, - dconf, - sizeof(*dconf), + &dconf, + sizeof(dconf), 1000); if (rc < 0) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); - - kfree(dconf); - return rc; } - icount = dconf->icount+1; - - kfree(dconf); - + icount = dconf.icount + 1; dev_info(&intf->dev, "Configuring for %d interfaces\n", icount); if (icount > GS_MAX_INTF) { @@ -915,7 +972,7 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id * dev->udev = interface_to_usbdev(intf); for (i = 0; i < icount; i++) { - dev->canch[i] = gs_make_candev(i, intf); + dev->canch[i] = gs_make_candev(i, intf, &dconf); if (IS_ERR_OR_NULL(dev->canch[i])) { /* save error code to return later */ rc = PTR_ERR(dev->canch[i]); diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 200663c43..8f4544394 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -9,14 +9,6 @@ config NET_DSA_MV88E6060 This enables support for the Marvell 88E6060 ethernet switch chip. -config NET_DSA_MV88E6XXX - tristate "Marvell 88E6xxx Ethernet switch chip support" - depends on NET_DSA - select NET_DSA_TAG_EDSA - ---help--- - This enables support for most of the Marvell 88E6xxx models of - Ethernet switch chips, except 88E6060. - config NET_DSA_BCM_SF2 tristate "Broadcom Starfighter 2 Ethernet switch support" depends on HAS_IOMEM && NET_DSA @@ -28,4 +20,8 @@ config NET_DSA_BCM_SF2 This enables support for the Broadcom Starfighter 2 Ethernet switch chips. +source "drivers/net/dsa/b53/Kconfig" + +source "drivers/net/dsa/mv88e6xxx/Kconfig" + endmenu diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 76b751dd9..ca1e71b85 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,3 +1,5 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o -obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o + +obj-y += b53/ +obj-y += mv88e6xxx/ diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig new file mode 100644 index 000000000..27f32a50d --- /dev/null +++ b/drivers/net/dsa/b53/Kconfig @@ -0,0 +1,33 @@ +menuconfig B53 + tristate "Broadcom BCM53xx managed switch support" + depends on NET_DSA + help + This driver adds support for Broadcom managed switch chips. It supports + BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX + integrated switches. + +config B53_SPI_DRIVER + tristate "B53 SPI connected switch driver" + depends on B53 && SPI + help + Select to enable support for registering switches configured through SPI. + +config B53_MDIO_DRIVER + tristate "B53 MDIO connected switch driver" + depends on B53 + help + Select to enable support for registering switches configured through MDIO. + +config B53_MMAP_DRIVER + tristate "B53 MMAP connected switch driver" + depends on B53 && HAS_IOMEM + help + Select to enable support for memory-mapped switches like the BCM63XX + integrated switches. + +config B53_SRAB_DRIVER + tristate "B53 SRAB connected switch driver" + depends on B53 && HAS_IOMEM + help + Select to enable support for memory-mapped Switch Register Access + Bridge Registers (SRAB) like it is found on the BCM53010 diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile new file mode 100644 index 000000000..7e6f9a8bf --- /dev/null +++ b/drivers/net/dsa/b53/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_B53) += b53_common.o + +obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o +obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o +obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o +obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c new file mode 100644 index 000000000..bda37d336 --- /dev/null +++ b/drivers/net/dsa/b53/b53_common.c @@ -0,0 +1,1799 @@ +/* + * B53 switch driver main logic + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * Copyright (C) 2016 Florian Fainelli <f.fainelli@gmail.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/gpio.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_data/b53.h> +#include <linux/phy.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <net/dsa.h> +#include <net/switchdev.h> + +#include "b53_regs.h" +#include "b53_priv.h" + +struct b53_mib_desc { + u8 size; + u8 offset; + const char *name; +}; + +/* BCM5365 MIB counters */ +static const struct b53_mib_desc b53_mibs_65[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPkts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredTransmit" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x38, "TxPausePkts" }, + { 8, 0x44, "RxOctets" }, + { 4, 0x4c, "RxUndersizePkts" }, + { 4, 0x50, "RxPausePkts" }, + { 4, 0x54, "Pkts64Octets" }, + { 4, 0x58, "Pkts65to127Octets" }, + { 4, 0x5c, "Pkts128to255Octets" }, + { 4, 0x60, "Pkts256to511Octets" }, + { 4, 0x64, "Pkts512to1023Octets" }, + { 4, 0x68, "Pkts1024to1522Octets" }, + { 4, 0x6c, "RxOversizePkts" }, + { 4, 0x70, "RxJabbers" }, + { 4, 0x74, "RxAlignmentErrors" }, + { 4, 0x78, "RxFCSErrors" }, + { 8, 0x7c, "RxGoodOctets" }, + { 4, 0x84, "RxDropPkts" }, + { 4, 0x88, "RxUnicastPkts" }, + { 4, 0x8c, "RxMulticastPkts" }, + { 4, 0x90, "RxBroadcastPkts" }, + { 4, 0x94, "RxSAChanges" }, + { 4, 0x98, "RxFragments" }, +}; + +#define B53_MIBS_65_SIZE ARRAY_SIZE(b53_mibs_65) + +/* BCM63xx MIB counters */ +static const struct b53_mib_desc b53_mibs_63xx[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x0c, "TxQoSPkts" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPkts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredTransmit" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x38, "TxPausePkts" }, + { 8, 0x3c, "TxQoSOctets" }, + { 8, 0x44, "RxOctets" }, + { 4, 0x4c, "RxUndersizePkts" }, + { 4, 0x50, "RxPausePkts" }, + { 4, 0x54, "Pkts64Octets" }, + { 4, 0x58, "Pkts65to127Octets" }, + { 4, 0x5c, "Pkts128to255Octets" }, + { 4, 0x60, "Pkts256to511Octets" }, + { 4, 0x64, "Pkts512to1023Octets" }, + { 4, 0x68, "Pkts1024to1522Octets" }, + { 4, 0x6c, "RxOversizePkts" }, + { 4, 0x70, "RxJabbers" }, + { 4, 0x74, "RxAlignmentErrors" }, + { 4, 0x78, "RxFCSErrors" }, + { 8, 0x7c, "RxGoodOctets" }, + { 4, 0x84, "RxDropPkts" }, + { 4, 0x88, "RxUnicastPkts" }, + { 4, 0x8c, "RxMulticastPkts" }, + { 4, 0x90, "RxBroadcastPkts" }, + { 4, 0x94, "RxSAChanges" }, + { 4, 0x98, "RxFragments" }, + { 4, 0xa0, "RxSymbolErrors" }, + { 4, 0xa4, "RxQoSPkts" }, + { 8, 0xa8, "RxQoSOctets" }, + { 4, 0xb0, "Pkts1523to2047Octets" }, + { 4, 0xb4, "Pkts2048to4095Octets" }, + { 4, 0xb8, "Pkts4096to8191Octets" }, + { 4, 0xbc, "Pkts8192to9728Octets" }, + { 4, 0xc0, "RxDiscarded" }, +}; + +#define B53_MIBS_63XX_SIZE ARRAY_SIZE(b53_mibs_63xx) + +/* MIB counters */ +static const struct b53_mib_desc b53_mibs[] = { + { 8, 0x00, "TxOctets" }, + { 4, 0x08, "TxDropPkts" }, + { 4, 0x10, "TxBroadcastPkts" }, + { 4, 0x14, "TxMulticastPkts" }, + { 4, 0x18, "TxUnicastPkts" }, + { 4, 0x1c, "TxCollisions" }, + { 4, 0x20, "TxSingleCollision" }, + { 4, 0x24, "TxMultipleCollision" }, + { 4, 0x28, "TxDeferredTransmit" }, + { 4, 0x2c, "TxLateCollision" }, + { 4, 0x30, "TxExcessiveCollision" }, + { 4, 0x38, "TxPausePkts" }, + { 8, 0x50, "RxOctets" }, + { 4, 0x58, "RxUndersizePkts" }, + { 4, 0x5c, "RxPausePkts" }, + { 4, 0x60, "Pkts64Octets" }, + { 4, 0x64, "Pkts65to127Octets" }, + { 4, 0x68, "Pkts128to255Octets" }, + { 4, 0x6c, "Pkts256to511Octets" }, + { 4, 0x70, "Pkts512to1023Octets" }, + { 4, 0x74, "Pkts1024to1522Octets" }, + { 4, 0x78, "RxOversizePkts" }, + { 4, 0x7c, "RxJabbers" }, + { 4, 0x80, "RxAlignmentErrors" }, + { 4, 0x84, "RxFCSErrors" }, + { 8, 0x88, "RxGoodOctets" }, + { 4, 0x90, "RxDropPkts" }, + { 4, 0x94, "RxUnicastPkts" }, + { 4, 0x98, "RxMulticastPkts" }, + { 4, 0x9c, "RxBroadcastPkts" }, + { 4, 0xa0, "RxSAChanges" }, + { 4, 0xa4, "RxFragments" }, + { 4, 0xa8, "RxJumboPkts" }, + { 4, 0xac, "RxSymbolErrors" }, + { 4, 0xc0, "RxDiscarded" }, +}; + +#define B53_MIBS_SIZE ARRAY_SIZE(b53_mibs) + +static int b53_do_vlan_op(struct b53_device *dev, u8 op) +{ + unsigned int i; + + b53_write8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], VTA_START_CMD | op); + + for (i = 0; i < 10; i++) { + u8 vta; + + b53_read8(dev, B53_ARLIO_PAGE, dev->vta_regs[0], &vta); + if (!(vta & VTA_START_CMD)) + return 0; + + usleep_range(100, 200); + } + + return -EIO; +} + +static void b53_set_vlan_entry(struct b53_device *dev, u16 vid, + struct b53_vlan *vlan) +{ + if (is5325(dev)) { + u32 entry = 0; + + if (vlan->members) { + entry = ((vlan->untag & VA_UNTAG_MASK_25) << + VA_UNTAG_S_25) | vlan->members; + if (dev->core_rev >= 3) + entry |= VA_VALID_25_R4 | vid << VA_VID_HIGH_S; + else + entry |= VA_VALID_25; + } + + b53_write32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, entry); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | + VTA_RW_STATE_WR | VTA_RW_OP_EN); + } else if (is5365(dev)) { + u16 entry = 0; + + if (vlan->members) + entry = ((vlan->untag & VA_UNTAG_MASK_65) << + VA_UNTAG_S_65) | vlan->members | VA_VALID_65; + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, entry); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | + VTA_RW_STATE_WR | VTA_RW_OP_EN); + } else { + b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); + b53_write32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], + (vlan->untag << VTE_UNTAG_S) | vlan->members); + + b53_do_vlan_op(dev, VTA_CMD_WRITE); + } + + dev_dbg(dev->ds->dev, "VID: %d, members: 0x%04x, untag: 0x%04x\n", + vid, vlan->members, vlan->untag); +} + +static void b53_get_vlan_entry(struct b53_device *dev, u16 vid, + struct b53_vlan *vlan) +{ + if (is5325(dev)) { + u32 entry = 0; + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, vid | + VTA_RW_STATE_RD | VTA_RW_OP_EN); + b53_read32(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_25, &entry); + + if (dev->core_rev >= 3) + vlan->valid = !!(entry & VA_VALID_25_R4); + else + vlan->valid = !!(entry & VA_VALID_25); + vlan->members = entry & VA_MEMBER_MASK; + vlan->untag = (entry >> VA_UNTAG_S_25) & VA_UNTAG_MASK_25; + + } else if (is5365(dev)) { + u16 entry = 0; + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_65, vid | + VTA_RW_STATE_WR | VTA_RW_OP_EN); + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_WRITE_65, &entry); + + vlan->valid = !!(entry & VA_VALID_65); + vlan->members = entry & VA_MEMBER_MASK; + vlan->untag = (entry >> VA_UNTAG_S_65) & VA_UNTAG_MASK_65; + } else { + u32 entry = 0; + + b53_write16(dev, B53_ARLIO_PAGE, dev->vta_regs[1], vid); + b53_do_vlan_op(dev, VTA_CMD_READ); + b53_read32(dev, B53_ARLIO_PAGE, dev->vta_regs[2], &entry); + vlan->members = entry & VTE_MEMBERS; + vlan->untag = (entry >> VTE_UNTAG_S) & VTE_MEMBERS; + vlan->valid = true; + } +} + +static void b53_set_forwarding(struct b53_device *dev, int enable) +{ + u8 mgmt; + + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + + if (enable) + mgmt |= SM_SW_FWD_EN; + else + mgmt &= ~SM_SW_FWD_EN; + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); +} + +static void b53_enable_vlan(struct b53_device *dev, bool enable) +{ + u8 mgmt, vc0, vc1, vc4 = 0, vc5; + + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, &vc0); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, &vc1); + + if (is5325(dev) || is5365(dev)) { + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, &vc5); + } else if (is63xx(dev)) { + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, &vc4); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, &vc5); + } else { + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, &vc4); + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); + } + + mgmt &= ~SM_SW_FWD_MODE; + + if (enable) { + vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; + vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + + if (is5325(dev)) + vc0 &= ~VC0_RESERVED_1; + + if (is5325(dev) || is5365(dev)) + vc1 |= VC1_RX_MCST_TAG_EN; + + } else { + vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); + vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc5 &= ~VC5_DROP_VTABLE_MISS; + + if (is5325(dev) || is5365(dev)) + vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + else + vc4 |= VC4_ING_VID_VIO_TO_IMP << VC4_ING_VID_CHECK_S; + + if (is5325(dev) || is5365(dev)) + vc1 &= ~VC1_RX_MCST_TAG_EN; + } + + if (!is5325(dev) && !is5365(dev)) + vc5 &= ~VC5_VID_FFF_EN; + + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL0, vc0); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL1, vc1); + + if (is5325(dev) || is5365(dev)) { + /* enable the high 8 bit vid check on 5325 */ + if (is5325(dev) && enable) + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, + VC3_HIGH_8BIT_EN); + else + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); + + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, vc4); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_25, vc5); + } else if (is63xx(dev)) { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3_63XX, 0); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_63XX, vc4); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5_63XX, vc5); + } else { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_CTRL3, 0); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4, vc4); + b53_write8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, vc5); + } + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); +} + +static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) +{ + u32 port_mask = 0; + u16 max_size = JMS_MIN_SIZE; + + if (is5325(dev) || is5365(dev)) + return -EINVAL; + + if (enable) { + port_mask = dev->enabled_ports; + max_size = JMS_MAX_SIZE; + if (allow_10_100) + port_mask |= JPM_10_100_JUMBO_EN; + } + + b53_write32(dev, B53_JUMBO_PAGE, dev->jumbo_pm_reg, port_mask); + return b53_write16(dev, B53_JUMBO_PAGE, dev->jumbo_size_reg, max_size); +} + +static int b53_flush_arl(struct b53_device *dev, u8 mask) +{ + unsigned int i; + + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, + FAST_AGE_DONE | FAST_AGE_DYNAMIC | mask); + + for (i = 0; i < 10; i++) { + u8 fast_age_ctrl; + + b53_read8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, + &fast_age_ctrl); + + if (!(fast_age_ctrl & FAST_AGE_DONE)) + goto out; + + msleep(1); + } + + return -ETIMEDOUT; +out: + /* Only age dynamic entries (default behavior) */ + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_CTRL, FAST_AGE_DYNAMIC); + return 0; +} + +static int b53_fast_age_port(struct b53_device *dev, int port) +{ + b53_write8(dev, B53_CTRL_PAGE, B53_FAST_AGE_PORT_CTRL, port); + + return b53_flush_arl(dev, FAST_AGE_PORT); +} + +static int b53_fast_age_vlan(struct b53_device *dev, u16 vid) +{ + b53_write16(dev, B53_CTRL_PAGE, B53_FAST_AGE_VID_CTRL, vid); + + return b53_flush_arl(dev, FAST_AGE_VLAN); +} + +static void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) +{ + struct b53_device *dev = ds_to_priv(ds); + unsigned int i; + u16 pvlan; + + /* Enable the IMP port to be in the same VLAN as the other ports + * on a per-port basis such that we only have Port i and IMP in + * the same VLAN. + */ + b53_for_each_port(dev, i) { + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &pvlan); + pvlan |= BIT(cpu_port); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), pvlan); + } +} + +static int b53_enable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct b53_device *dev = ds_to_priv(ds); + unsigned int cpu_port = dev->cpu_port; + u16 pvlan; + + /* Clear the Rx and Tx disable bits and set to no spanning tree */ + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); + + /* Set this port, and only this one to be in the default VLAN, + * if member of a bridge, restore its membership prior to + * bringing down this port. + */ + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); + pvlan &= ~0x1ff; + pvlan |= BIT(port); + pvlan |= dev->ports[port].vlan_ctl_mask; + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + + b53_imp_vlan_setup(ds, cpu_port); + + return 0; +} + +static void b53_disable_port(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct b53_device *dev = ds_to_priv(ds); + u8 reg; + + /* Disable Tx/Rx for the port */ + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); + reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); +} + +static void b53_enable_cpu_port(struct b53_device *dev) +{ + unsigned int cpu_port = dev->cpu_port; + u8 port_ctrl; + + /* BCM5325 CPU port is at 8 */ + if ((is5325(dev) || is5365(dev)) && cpu_port == B53_CPU_PORT_25) + cpu_port = B53_CPU_PORT; + + port_ctrl = PORT_CTRL_RX_BCST_EN | + PORT_CTRL_RX_MCST_EN | + PORT_CTRL_RX_UCST_EN; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(cpu_port), port_ctrl); +} + +static void b53_enable_mib(struct b53_device *dev) +{ + u8 gc; + + b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + gc &= ~(GC_RESET_MIB | GC_MIB_AC_EN); + b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); +} + +static int b53_configure_vlan(struct b53_device *dev) +{ + struct b53_vlan vl = { 0 }; + int i; + + /* clear all vlan entries */ + if (is5325(dev) || is5365(dev)) { + for (i = 1; i < dev->num_vlans; i++) + b53_set_vlan_entry(dev, i, &vl); + } else { + b53_do_vlan_op(dev, VTA_CMD_CLEAR); + } + + b53_enable_vlan(dev, false); + + b53_for_each_port(dev, i) + b53_write16(dev, B53_VLAN_PAGE, + B53_VLAN_PORT_DEF_TAG(i), 1); + + if (!is5325(dev) && !is5365(dev)) + b53_set_jumbo(dev, dev->enable_jumbo, false); + + return 0; +} + +static void b53_switch_reset_gpio(struct b53_device *dev) +{ + int gpio = dev->reset_gpio; + + if (gpio < 0) + return; + + /* Reset sequence: RESET low(50ms)->high(20ms) + */ + gpio_set_value(gpio, 0); + mdelay(50); + + gpio_set_value(gpio, 1); + mdelay(20); + + dev->current_page = 0xff; +} + +static int b53_switch_reset(struct b53_device *dev) +{ + u8 mgmt; + + b53_switch_reset_gpio(dev); + + if (is539x(dev)) { + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x83); + b53_write8(dev, B53_CTRL_PAGE, B53_SOFTRESET, 0x00); + } + + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + + if (!(mgmt & SM_SW_FWD_EN)) { + mgmt &= ~SM_SW_FWD_MODE; + mgmt |= SM_SW_FWD_EN; + + b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); + b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt); + + if (!(mgmt & SM_SW_FWD_EN)) { + dev_err(dev->dev, "Failed to enable switch!\n"); + return -EINVAL; + } + } + + b53_enable_mib(dev); + + return b53_flush_arl(dev, FAST_AGE_STATIC); +} + +static int b53_phy_read16(struct dsa_switch *ds, int addr, int reg) +{ + struct b53_device *priv = ds_to_priv(ds); + u16 value = 0; + int ret; + + if (priv->ops->phy_read16) + ret = priv->ops->phy_read16(priv, addr, reg, &value); + else + ret = b53_read16(priv, B53_PORT_MII_PAGE(addr), + reg * 2, &value); + + return ret ? ret : value; +} + +static int b53_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) +{ + struct b53_device *priv = ds_to_priv(ds); + + if (priv->ops->phy_write16) + return priv->ops->phy_write16(priv, addr, reg, val); + + return b53_write16(priv, B53_PORT_MII_PAGE(addr), reg * 2, val); +} + +static int b53_reset_switch(struct b53_device *priv) +{ + /* reset vlans */ + priv->enable_jumbo = false; + + memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); + memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); + + return b53_switch_reset(priv); +} + +static int b53_apply_config(struct b53_device *priv) +{ + /* disable switching */ + b53_set_forwarding(priv, 0); + + b53_configure_vlan(priv); + + /* enable switching */ + b53_set_forwarding(priv, 1); + + return 0; +} + +static void b53_reset_mib(struct b53_device *priv) +{ + u8 gc; + + b53_read8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + + b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc | GC_RESET_MIB); + msleep(1); + b53_write8(priv, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc & ~GC_RESET_MIB); + msleep(1); +} + +static const struct b53_mib_desc *b53_get_mib(struct b53_device *dev) +{ + if (is5365(dev)) + return b53_mibs_65; + else if (is63xx(dev)) + return b53_mibs_63xx; + else + return b53_mibs; +} + +static unsigned int b53_get_mib_size(struct b53_device *dev) +{ + if (is5365(dev)) + return B53_MIBS_65_SIZE; + else if (is63xx(dev)) + return B53_MIBS_63XX_SIZE; + else + return B53_MIBS_SIZE; +} + +static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data) +{ + struct b53_device *dev = ds_to_priv(ds); + const struct b53_mib_desc *mibs = b53_get_mib(dev); + unsigned int mib_size = b53_get_mib_size(dev); + unsigned int i; + + for (i = 0; i < mib_size; i++) + memcpy(data + i * ETH_GSTRING_LEN, + mibs[i].name, ETH_GSTRING_LEN); +} + +static void b53_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct b53_device *dev = ds_to_priv(ds); + const struct b53_mib_desc *mibs = b53_get_mib(dev); + unsigned int mib_size = b53_get_mib_size(dev); + const struct b53_mib_desc *s; + unsigned int i; + u64 val = 0; + + if (is5365(dev) && port == 5) + port = 8; + + mutex_lock(&dev->stats_mutex); + + for (i = 0; i < mib_size; i++) { + s = &mibs[i]; + + if (s->size == 8) { + b53_read64(dev, B53_MIB_PAGE(port), s->offset, &val); + } else { + u32 val32; + + b53_read32(dev, B53_MIB_PAGE(port), s->offset, + &val32); + val = val32; + } + data[i] = (u64)val; + } + + mutex_unlock(&dev->stats_mutex); +} + +static int b53_get_sset_count(struct dsa_switch *ds) +{ + struct b53_device *dev = ds_to_priv(ds); + + return b53_get_mib_size(dev); +} + +static int b53_set_addr(struct dsa_switch *ds, u8 *addr) +{ + return 0; +} + +static int b53_setup(struct dsa_switch *ds) +{ + struct b53_device *dev = ds_to_priv(ds); + unsigned int port; + int ret; + + ret = b53_reset_switch(dev); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); + return ret; + } + + b53_reset_mib(dev); + + ret = b53_apply_config(dev); + if (ret) + dev_err(ds->dev, "failed to apply configuration\n"); + + for (port = 0; port < dev->num_ports; port++) { + if (BIT(port) & ds->enabled_port_mask) + b53_enable_port(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + b53_enable_cpu_port(dev); + else + b53_disable_port(ds, port, NULL); + } + + return ret; +} + +static void b53_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct b53_device *dev = ds_to_priv(ds); + u8 rgmii_ctrl = 0, reg = 0, off; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + + /* Override the port settings */ + if (port == dev->cpu_port) { + off = B53_PORT_OVERRIDE_CTRL; + reg = PORT_OVERRIDE_EN; + } else { + off = B53_GMII_PORT_OVERRIDE_CTRL(port); + reg = GMII_PO_EN; + } + + /* Set the link UP */ + if (phydev->link) + reg |= PORT_OVERRIDE_LINK; + + if (phydev->duplex == DUPLEX_FULL) + reg |= PORT_OVERRIDE_FULL_DUPLEX; + + switch (phydev->speed) { + case 2000: + reg |= PORT_OVERRIDE_SPEED_2000M; + /* fallthrough */ + case SPEED_1000: + reg |= PORT_OVERRIDE_SPEED_1000M; + break; + case SPEED_100: + reg |= PORT_OVERRIDE_SPEED_100M; + break; + case SPEED_10: + reg |= PORT_OVERRIDE_SPEED_10M; + break; + default: + dev_err(ds->dev, "unknown speed: %d\n", phydev->speed); + return; + } + + /* Enable flow control on BCM5301x's CPU port */ + if (is5301x(dev) && port == dev->cpu_port) + reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW; + + if (phydev->pause) { + if (phydev->asym_pause) + reg |= PORT_OVERRIDE_TX_FLOW; + reg |= PORT_OVERRIDE_RX_FLOW; + } + + b53_write8(dev, B53_CTRL_PAGE, off, reg); + + if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { + if (port == 8) + off = B53_RGMII_CTRL_IMP; + else + off = B53_RGMII_CTRL_P(port); + + /* Configure the port RGMII clock delay by DLL disabled and + * tx_clk aligned timing (restoring to reset defaults) + */ + b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl); + rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC | + RGMII_CTRL_TIMING_SEL); + + /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make + * sure that we enable the port TX clock internal delay to + * account for this internal delay that is inserted, otherwise + * the switch won't be able to receive correctly. + * + * PHY_INTERFACE_MODE_RGMII means that we are not introducing + * any delay neither on transmission nor reception, so the + * BCM53125 must also be configured accordingly to account for + * the lack of delay and introduce + * + * The BCM53125 switch has its RX clock and TX clock control + * swapped, hence the reason why we modify the TX clock path in + * the "RGMII" case + */ + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + rgmii_ctrl |= RGMII_CTRL_DLL_TXC; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII) + rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC; + rgmii_ctrl |= RGMII_CTRL_TIMING_SEL; + b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl); + + dev_info(ds->dev, "Configured port %d for %s\n", port, + phy_modes(phydev->interface)); + } + + /* configure MII port if necessary */ + if (is5325(dev)) { + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + ®); + + /* reverse mii needs to be enabled */ + if (!(reg & PORT_OVERRIDE_RV_MII_25)) { + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + reg | PORT_OVERRIDE_RV_MII_25); + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL, + ®); + + if (!(reg & PORT_OVERRIDE_RV_MII_25)) { + dev_err(ds->dev, + "Failed to enable reverse MII mode\n"); + return; + } + } + } else if (is5301x(dev)) { + if (port != dev->cpu_port) { + u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port); + u8 gmii_po; + + b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po); + gmii_po |= GMII_PO_LINK | + GMII_PO_RX_FLOW | + GMII_PO_TX_FLOW | + GMII_PO_EN | + GMII_PO_SPEED_2000M; + b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po); + } + } +} + +static int b53_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + return 0; +} + +static int b53_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct b53_device *dev = ds_to_priv(ds); + + if ((is5325(dev) || is5365(dev)) && vlan->vid_begin == 0) + return -EOPNOTSUPP; + + if (vlan->vid_end > dev->num_vlans) + return -ERANGE; + + b53_enable_vlan(dev, true); + + return 0; +} + +static void b53_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct b53_device *dev = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + unsigned int cpu_port = dev->cpu_port; + struct b53_vlan *vl; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &dev->vlans[vid]; + + b53_get_vlan_entry(dev, vid, vl); + + vl->members |= BIT(port) | BIT(cpu_port); + if (untagged) + vl->untag |= BIT(port) | BIT(cpu_port); + else + vl->untag &= ~(BIT(port) | BIT(cpu_port)); + + b53_set_vlan_entry(dev, vid, vl); + b53_fast_age_vlan(dev, vid); + } + + if (pvid) { + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), + vlan->vid_end); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), + vlan->vid_end); + b53_fast_age_vlan(dev, vid); + } +} + +static int b53_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct b53_device *dev = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + unsigned int cpu_port = dev->cpu_port; + struct b53_vlan *vl; + u16 vid; + u16 pvid; + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &dev->vlans[vid]; + + b53_get_vlan_entry(dev, vid, vl); + + vl->members &= ~BIT(port); + if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) + vl->members = 0; + + if (pvid == vid) { + if (is5325(dev) || is5365(dev)) + pvid = 1; + else + pvid = 0; + } + + if (untagged) { + vl->untag &= ~(BIT(port)); + if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) + vl->untag = 0; + } + + b53_set_vlan_entry(dev, vid, vl); + b53_fast_age_vlan(dev, vid); + } + + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); + b53_fast_age_vlan(dev, pvid); + + return 0; +} + +static int b53_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct b53_device *dev = ds_to_priv(ds); + u16 vid, vid_start = 0, pvid; + struct b53_vlan *vl; + int err = 0; + + if (is5325(dev) || is5365(dev)) + vid_start = 1; + + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); + + /* Use our software cache for dumps, since we do not have any HW + * operation returning only the used/valid VLANs + */ + for (vid = vid_start; vid < dev->num_vlans; vid++) { + vl = &dev->vlans[vid]; + + if (!vl->valid) + continue; + + if (!(vl->members & BIT(port))) + continue; + + vlan->vid_begin = vlan->vid_end = vid; + vlan->flags = 0; + + if (vl->untag & BIT(port)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + if (pvid == vid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +/* Address Resolution Logic routines */ +static int b53_arl_op_wait(struct b53_device *dev) +{ + unsigned int timeout = 10; + u8 reg; + + do { + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); + if (!(reg & ARLTBL_START_DONE)) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + dev_warn(dev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg); + + return -ETIMEDOUT; +} + +static int b53_arl_rw_op(struct b53_device *dev, unsigned int op) +{ + u8 reg; + + if (op > ARLTBL_RW) + return -EINVAL; + + b53_read8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, ®); + reg |= ARLTBL_START_DONE; + if (op) + reg |= ARLTBL_RW; + else + reg &= ~ARLTBL_RW; + b53_write8(dev, B53_ARLIO_PAGE, B53_ARLTBL_RW_CTRL, reg); + + return b53_arl_op_wait(dev); +} + +static int b53_arl_read(struct b53_device *dev, u64 mac, + u16 vid, struct b53_arl_entry *ent, u8 *idx, + bool is_valid) +{ + unsigned int i; + int ret; + + ret = b53_arl_op_wait(dev); + if (ret) + return ret; + + /* Read the bins */ + for (i = 0; i < dev->num_arl_entries; i++) { + u64 mac_vid; + u32 fwd_entry; + + b53_read64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(i), &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(i), &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); + + if (!(fwd_entry & ARLTBL_VALID)) + continue; + if ((mac_vid & ARLTBL_MAC_MASK) != mac) + continue; + *idx = i; + } + + return -ENOENT; +} + +static int b53_arl_op(struct b53_device *dev, int op, int port, + const unsigned char *addr, u16 vid, bool is_valid) +{ + struct b53_arl_entry ent; + u32 fwd_entry; + u64 mac, mac_vid = 0; + u8 idx = 0; + int ret; + + /* Convert the array into a 64-bit MAC */ + mac = b53_mac_to_u64(addr); + + /* Perform a read for the given MAC and VID */ + b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac); + b53_write16(dev, B53_ARLIO_PAGE, B53_VLAN_ID_IDX, vid); + + /* Issue a read operation for this MAC */ + ret = b53_arl_rw_op(dev, 1); + if (ret) + return ret; + + ret = b53_arl_read(dev, mac, vid, &ent, &idx, is_valid); + /* If this is a read, just finish now */ + if (op) + return ret; + + /* We could not find a matching MAC, so reset to a new entry */ + if (ret) { + fwd_entry = 0; + idx = 1; + } + + memset(&ent, 0, sizeof(ent)); + ent.port = port; + ent.is_valid = is_valid; + ent.vid = vid; + ent.is_static = true; + memcpy(ent.mac, addr, ETH_ALEN); + b53_arl_from_entry(&mac_vid, &fwd_entry, &ent); + + b53_write64(dev, B53_ARLIO_PAGE, + B53_ARLTBL_MAC_VID_ENTRY(idx), mac_vid); + b53_write32(dev, B53_ARLIO_PAGE, + B53_ARLTBL_DATA_ENTRY(idx), fwd_entry); + + return b53_arl_rw_op(dev, 0); +} + +static int b53_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct b53_device *priv = ds_to_priv(ds); + + /* 5325 and 5365 require some more massaging, but could + * be supported eventually + */ + if (is5325(priv) || is5365(priv)) + return -EOPNOTSUPP; + + return 0; +} + +static void b53_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct b53_device *priv = ds_to_priv(ds); + + if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) + pr_err("%s: failed to add MAC address\n", __func__); +} + +static int b53_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct b53_device *priv = ds_to_priv(ds); + + return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); +} + +static int b53_arl_search_wait(struct b53_device *dev) +{ + unsigned int timeout = 1000; + u8 reg; + + do { + b53_read8(dev, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, ®); + if (!(reg & ARL_SRCH_STDN)) + return 0; + + if (reg & ARL_SRCH_VLID) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +static void b53_arl_search_rd(struct b53_device *dev, u8 idx, + struct b53_arl_entry *ent) +{ + u64 mac_vid; + u32 fwd_entry; + + b53_read64(dev, B53_ARLIO_PAGE, + B53_ARL_SRCH_RSTL_MACVID(idx), &mac_vid); + b53_read32(dev, B53_ARLIO_PAGE, + B53_ARL_SRCH_RSTL(idx), &fwd_entry); + b53_arl_to_entry(ent, mac_vid, fwd_entry); +} + +static int b53_fdb_copy(struct net_device *dev, int port, + const struct b53_arl_entry *ent, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + if (!ent->is_valid) + return 0; + + if (port != ent->port) + return 0; + + ether_addr_copy(fdb->addr, ent->mac); + fdb->vid = ent->vid; + fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; + + return cb(&fdb->obj); +} + +static int b53_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct b53_device *priv = ds_to_priv(ds); + struct net_device *dev = ds->ports[port].netdev; + struct b53_arl_entry results[2]; + unsigned int count = 0; + int ret; + u8 reg; + + /* Start search operation */ + reg = ARL_SRCH_STDN; + b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg); + + do { + ret = b53_arl_search_wait(priv); + if (ret) + return ret; + + b53_arl_search_rd(priv, 0, &results[0]); + ret = b53_fdb_copy(dev, port, &results[0], fdb, cb); + if (ret) + return ret; + + if (priv->num_arl_entries > 2) { + b53_arl_search_rd(priv, 1, &results[1]); + ret = b53_fdb_copy(dev, port, &results[1], fdb, cb); + if (ret) + return ret; + + if (!results[0].is_valid && !results[1].is_valid) + break; + } + + } while (count++ < 1024); + + return 0; +} + +static int b53_br_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct b53_device *dev = ds_to_priv(ds); + u16 pvlan, reg; + unsigned int i; + + dev->ports[port].bridge_dev = bridge; + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); + + b53_for_each_port(dev, i) { + if (dev->ports[i].bridge_dev != bridge) + continue; + + /* Add this local port to the remote port VLAN control + * membership and update the remote port bitmask + */ + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); + reg |= BIT(port); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); + dev->ports[i].vlan_ctl_mask = reg; + + pvlan |= BIT(i); + } + + /* Configure the local port VLAN control membership to include + * remote ports and update the local port bitmask + */ + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + dev->ports[port].vlan_ctl_mask = pvlan; + + return 0; +} + +static void b53_br_leave(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds_to_priv(ds); + struct net_device *bridge = dev->ports[port].bridge_dev; + struct b53_vlan *vl = &dev->vlans[0]; + unsigned int i; + u16 pvlan, reg, pvid; + + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan); + + b53_for_each_port(dev, i) { + /* Don't touch the remaining ports */ + if (dev->ports[i].bridge_dev != bridge) + continue; + + b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), ®); + reg &= ~BIT(port); + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), reg); + dev->ports[port].vlan_ctl_mask = reg; + + /* Prevent self removal to preserve isolation */ + if (port != i) + pvlan &= ~BIT(i); + } + + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + dev->ports[port].vlan_ctl_mask = pvlan; + dev->ports[port].bridge_dev = NULL; + + if (is5325(dev) || is5365(dev)) + pvid = 1; + else + pvid = 0; + + b53_get_vlan_entry(dev, pvid, vl); + vl->members |= BIT(port) | BIT(dev->cpu_port); + vl->untag |= BIT(port) | BIT(dev->cpu_port); + b53_set_vlan_entry(dev, pvid, vl); +} + +static void b53_br_set_stp_state(struct dsa_switch *ds, int port, + u8 state) +{ + struct b53_device *dev = ds_to_priv(ds); + u8 hw_state, cur_hw_state; + u8 reg; + + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); + cur_hw_state = reg & PORT_CTRL_STP_STATE_MASK; + + switch (state) { + case BR_STATE_DISABLED: + hw_state = PORT_CTRL_DIS_STATE; + break; + case BR_STATE_LISTENING: + hw_state = PORT_CTRL_LISTEN_STATE; + break; + case BR_STATE_LEARNING: + hw_state = PORT_CTRL_LEARN_STATE; + break; + case BR_STATE_FORWARDING: + hw_state = PORT_CTRL_FWD_STATE; + break; + case BR_STATE_BLOCKING: + hw_state = PORT_CTRL_BLOCK_STATE; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + /* Fast-age ARL entries if we are moving a port from Learning or + * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening + * state (hw_state) + */ + if (cur_hw_state != hw_state) { + if (cur_hw_state >= PORT_CTRL_LEARN_STATE && + hw_state <= PORT_CTRL_LISTEN_STATE) { + if (b53_fast_age_port(dev, port)) { + dev_err(ds->dev, "fast ageing failed\n"); + return; + } + } + } + + b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); + reg &= ~PORT_CTRL_STP_STATE_MASK; + reg |= hw_state; + b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); +} + +static struct dsa_switch_driver b53_switch_ops = { + .tag_protocol = DSA_TAG_PROTO_NONE, + .setup = b53_setup, + .set_addr = b53_set_addr, + .get_strings = b53_get_strings, + .get_ethtool_stats = b53_get_ethtool_stats, + .get_sset_count = b53_get_sset_count, + .phy_read = b53_phy_read16, + .phy_write = b53_phy_write16, + .adjust_link = b53_adjust_link, + .port_enable = b53_enable_port, + .port_disable = b53_disable_port, + .port_bridge_join = b53_br_join, + .port_bridge_leave = b53_br_leave, + .port_stp_state_set = b53_br_set_stp_state, + .port_vlan_filtering = b53_vlan_filtering, + .port_vlan_prepare = b53_vlan_prepare, + .port_vlan_add = b53_vlan_add, + .port_vlan_del = b53_vlan_del, + .port_vlan_dump = b53_vlan_dump, + .port_fdb_prepare = b53_fdb_prepare, + .port_fdb_dump = b53_fdb_dump, + .port_fdb_add = b53_fdb_add, + .port_fdb_del = b53_fdb_del, +}; + +struct b53_chip_data { + u32 chip_id; + const char *dev_name; + u16 vlans; + u16 enabled_ports; + u8 cpu_port; + u8 vta_regs[3]; + u8 arl_entries; + u8 duplex_reg; + u8 jumbo_pm_reg; + u8 jumbo_size_reg; +}; + +#define B53_VTA_REGS \ + { B53_VT_ACCESS, B53_VT_INDEX, B53_VT_ENTRY } +#define B53_VTA_REGS_9798 \ + { B53_VT_ACCESS_9798, B53_VT_INDEX_9798, B53_VT_ENTRY_9798 } +#define B53_VTA_REGS_63XX \ + { B53_VT_ACCESS_63XX, B53_VT_INDEX_63XX, B53_VT_ENTRY_63XX } + +static const struct b53_chip_data b53_switch_chips[] = { + { + .chip_id = BCM5325_DEVICE_ID, + .dev_name = "BCM5325", + .vlans = 16, + .enabled_ports = 0x1f, + .arl_entries = 2, + .cpu_port = B53_CPU_PORT_25, + .duplex_reg = B53_DUPLEX_STAT_FE, + }, + { + .chip_id = BCM5365_DEVICE_ID, + .dev_name = "BCM5365", + .vlans = 256, + .enabled_ports = 0x1f, + .arl_entries = 2, + .cpu_port = B53_CPU_PORT_25, + .duplex_reg = B53_DUPLEX_STAT_FE, + }, + { + .chip_id = BCM5395_DEVICE_ID, + .dev_name = "BCM5395", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM5397_DEVICE_ID, + .dev_name = "BCM5397", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_9798, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM5398_DEVICE_ID, + .dev_name = "BCM5398", + .vlans = 4096, + .enabled_ports = 0x7f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_9798, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53115_DEVICE_ID, + .dev_name = "BCM53115", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .vta_regs = B53_VTA_REGS, + .cpu_port = B53_CPU_PORT, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53125_DEVICE_ID, + .dev_name = "BCM53125", + .vlans = 4096, + .enabled_ports = 0xff, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53128_DEVICE_ID, + .dev_name = "BCM53128", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM63XX_DEVICE_ID, + .dev_name = "BCM63xx", + .vlans = 4096, + .enabled_ports = 0, /* pdata must provide them */ + .arl_entries = 4, + .cpu_port = B53_CPU_PORT, + .vta_regs = B53_VTA_REGS_63XX, + .duplex_reg = B53_DUPLEX_STAT_63XX, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE_63XX, + }, + { + .chip_id = BCM53010_DEVICE_ID, + .dev_name = "BCM53010", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53011_DEVICE_ID, + .dev_name = "BCM53011", + .vlans = 4096, + .enabled_ports = 0x1bf, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53012_DEVICE_ID, + .dev_name = "BCM53012", + .vlans = 4096, + .enabled_ports = 0x1bf, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53018_DEVICE_ID, + .dev_name = "BCM53018", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM53019_DEVICE_ID, + .dev_name = "BCM53019", + .vlans = 4096, + .enabled_ports = 0x1f, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */ + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, + { + .chip_id = BCM58XX_DEVICE_ID, + .dev_name = "BCM585xx/586xx/88312", + .vlans = 4096, + .enabled_ports = 0x1ff, + .arl_entries = 4, + .cpu_port = B53_CPU_PORT_25, + .vta_regs = B53_VTA_REGS, + .duplex_reg = B53_DUPLEX_STAT_GE, + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, + }, +}; + +static int b53_switch_init(struct b53_device *dev) +{ + struct dsa_switch *ds = dev->ds; + unsigned int i; + int ret; + + for (i = 0; i < ARRAY_SIZE(b53_switch_chips); i++) { + const struct b53_chip_data *chip = &b53_switch_chips[i]; + + if (chip->chip_id == dev->chip_id) { + if (!dev->enabled_ports) + dev->enabled_ports = chip->enabled_ports; + dev->name = chip->dev_name; + dev->duplex_reg = chip->duplex_reg; + dev->vta_regs[0] = chip->vta_regs[0]; + dev->vta_regs[1] = chip->vta_regs[1]; + dev->vta_regs[2] = chip->vta_regs[2]; + dev->jumbo_pm_reg = chip->jumbo_pm_reg; + ds->drv = &b53_switch_ops; + dev->cpu_port = chip->cpu_port; + dev->num_vlans = chip->vlans; + dev->num_arl_entries = chip->arl_entries; + break; + } + } + + /* check which BCM5325x version we have */ + if (is5325(dev)) { + u8 vc4; + + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL4_25, &vc4); + + /* check reserved bits */ + switch (vc4 & 3) { + case 1: + /* BCM5325E */ + break; + case 3: + /* BCM5325F - do not use port 4 */ + dev->enabled_ports &= ~BIT(4); + break; + default: +/* On the BCM47XX SoCs this is the supported internal switch.*/ +#ifndef CONFIG_BCM47XX + /* BCM5325M */ + return -EINVAL; +#else + break; +#endif + } + } else if (dev->chip_id == BCM53115_DEVICE_ID) { + u64 strap_value; + + b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value); + /* use second IMP port if GMII is enabled */ + if (strap_value & SV_GMII_CTRL_115) + dev->cpu_port = 5; + } + + /* cpu port is always last */ + dev->num_ports = dev->cpu_port + 1; + dev->enabled_ports |= BIT(dev->cpu_port); + + dev->ports = devm_kzalloc(dev->dev, + sizeof(struct b53_port) * dev->num_ports, + GFP_KERNEL); + if (!dev->ports) + return -ENOMEM; + + dev->vlans = devm_kzalloc(dev->dev, + sizeof(struct b53_vlan) * dev->num_vlans, + GFP_KERNEL); + if (!dev->vlans) + return -ENOMEM; + + dev->reset_gpio = b53_switch_get_reset_gpio(dev); + if (dev->reset_gpio >= 0) { + ret = devm_gpio_request_one(dev->dev, dev->reset_gpio, + GPIOF_OUT_INIT_HIGH, "robo_reset"); + if (ret) + return ret; + } + + return 0; +} + +struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, + void *priv) +{ + struct dsa_switch *ds; + struct b53_device *dev; + + ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL); + if (!ds) + return NULL; + + dev = (struct b53_device *)(ds + 1); + + ds->priv = dev; + ds->dev = base; + dev->dev = base; + + dev->ds = ds; + dev->priv = priv; + dev->ops = ops; + mutex_init(&dev->reg_mutex); + mutex_init(&dev->stats_mutex); + + return dev; +} +EXPORT_SYMBOL(b53_switch_alloc); + +int b53_switch_detect(struct b53_device *dev) +{ + u32 id32; + u16 tmp; + u8 id8; + int ret; + + ret = b53_read8(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id8); + if (ret) + return ret; + + switch (id8) { + case 0: + /* BCM5325 and BCM5365 do not have this register so reads + * return 0. But the read operation did succeed, so assume this + * is one of them. + * + * Next check if we can write to the 5325's VTA register; for + * 5365 it is read only. + */ + b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, 0xf); + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_TABLE_ACCESS_25, &tmp); + + if (tmp == 0xf) + dev->chip_id = BCM5325_DEVICE_ID; + else + dev->chip_id = BCM5365_DEVICE_ID; + break; + case BCM5395_DEVICE_ID: + case BCM5397_DEVICE_ID: + case BCM5398_DEVICE_ID: + dev->chip_id = id8; + break; + default: + ret = b53_read32(dev, B53_MGMT_PAGE, B53_DEVICE_ID, &id32); + if (ret) + return ret; + + switch (id32) { + case BCM53115_DEVICE_ID: + case BCM53125_DEVICE_ID: + case BCM53128_DEVICE_ID: + case BCM53010_DEVICE_ID: + case BCM53011_DEVICE_ID: + case BCM53012_DEVICE_ID: + case BCM53018_DEVICE_ID: + case BCM53019_DEVICE_ID: + dev->chip_id = id32; + break; + default: + pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n", + id8, id32); + return -ENODEV; + } + } + + if (dev->chip_id == BCM5325_DEVICE_ID) + return b53_read8(dev, B53_STAT_PAGE, B53_REV_ID_25, + &dev->core_rev); + else + return b53_read8(dev, B53_MGMT_PAGE, B53_REV_ID, + &dev->core_rev); +} +EXPORT_SYMBOL(b53_switch_detect); + +int b53_switch_register(struct b53_device *dev) +{ + int ret; + + if (dev->pdata) { + dev->chip_id = dev->pdata->chip_id; + dev->enabled_ports = dev->pdata->enabled_ports; + } + + if (!dev->chip_id && b53_switch_detect(dev)) + return -EINVAL; + + ret = b53_switch_init(dev); + if (ret) + return ret; + + pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev); + + return dsa_register_switch(dev->ds, dev->ds->dev->of_node); +} +EXPORT_SYMBOL(b53_switch_register); + +MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); +MODULE_DESCRIPTION("B53 switch library"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c new file mode 100644 index 000000000..aa87c3fff --- /dev/null +++ b/drivers/net/dsa/b53/b53_mdio.c @@ -0,0 +1,392 @@ +/* + * B53 register access through MII registers + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/brcmphy.h> +#include <linux/rtnetlink.h> +#include <net/dsa.h> + +#include "b53_priv.h" + +/* MII registers */ +#define REG_MII_PAGE 0x10 /* MII Page register */ +#define REG_MII_ADDR 0x11 /* MII Address register */ +#define REG_MII_DATA0 0x18 /* MII Data register 0 */ +#define REG_MII_DATA1 0x19 /* MII Data register 1 */ +#define REG_MII_DATA2 0x1a /* MII Data register 2 */ +#define REG_MII_DATA3 0x1b /* MII Data register 3 */ + +#define REG_MII_PAGE_ENABLE BIT(0) +#define REG_MII_ADDR_WRITE BIT(0) +#define REG_MII_ADDR_READ BIT(1) + +static int b53_mdio_op(struct b53_device *dev, u8 page, u8 reg, u16 op) +{ + int i; + u16 v; + int ret; + struct mii_bus *bus = dev->priv; + + if (dev->current_page != page) { + /* set page number */ + v = (page << 8) | REG_MII_PAGE_ENABLE; + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_PAGE, v); + if (ret) + return ret; + dev->current_page = page; + } + + /* set register address */ + v = (reg << 8) | op; + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_ADDR, v); + if (ret) + return ret; + + /* check if operation completed */ + for (i = 0; i < 5; ++i) { + v = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_ADDR); + if (!(v & (REG_MII_ADDR_WRITE | REG_MII_ADDR_READ))) + break; + usleep_range(10, 100); + } + + if (WARN_ON(i == 5)) + return -EIO; + + return 0; +} + +static int b53_mdio_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0) & 0xff; + + return 0; +} + +static int b53_mdio_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0); + + return 0; +} + +static int b53_mdio_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + *val = mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, REG_MII_DATA0); + *val |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA1) << 16; + + return 0; +} + +static int b53_mdio_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct mii_bus *bus = dev->priv; + u64 temp = 0; + int i; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + for (i = 2; i >= 0; i--) { + temp <<= 16; + temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i); + } + + *val = temp; + + return 0; +} + +static int b53_mdio_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct mii_bus *bus = dev->priv; + u64 temp = 0; + int i; + int ret; + + ret = b53_mdio_op(dev, page, reg, REG_MII_ADDR_READ); + if (ret) + return ret; + + for (i = 3; i >= 0; i--) { + temp <<= 16; + temp |= mdiobus_read_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i); + } + + *val = temp; + + return 0; +} + +static int b53_mdio_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0, value); + if (ret) + return ret; + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + struct mii_bus *bus = dev->priv; + int ret; + + ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0, value); + if (ret) + return ret; + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + struct mii_bus *bus = dev->priv; + unsigned int i; + u32 temp = value; + + for (i = 0; i < 2; i++) { + int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i, + temp & 0xffff); + if (ret) + return ret; + temp >>= 16; + } + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct mii_bus *bus = dev->priv; + unsigned int i; + u64 temp = value; + + for (i = 0; i < 3; i++) { + int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i, + temp & 0xffff); + if (ret) + return ret; + temp >>= 16; + } + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct mii_bus *bus = dev->priv; + unsigned int i; + u64 temp = value; + + for (i = 0; i < 4; i++) { + int ret = mdiobus_write_nested(bus, BRCM_PSEUDO_PHY_ADDR, + REG_MII_DATA0 + i, + temp & 0xffff); + if (ret) + return ret; + temp >>= 16; + } + + return b53_mdio_op(dev, page, reg, REG_MII_ADDR_WRITE); +} + +static int b53_mdio_phy_read16(struct b53_device *dev, int addr, int reg, + u16 *value) +{ + struct mii_bus *bus = dev->priv; + + *value = mdiobus_read_nested(bus, addr, reg); + + return 0; +} + +static int b53_mdio_phy_write16(struct b53_device *dev, int addr, int reg, + u16 value) +{ + struct mii_bus *bus = dev->bus; + + return mdiobus_write_nested(bus, addr, reg, value); +} + +static struct b53_io_ops b53_mdio_ops = { + .read8 = b53_mdio_read8, + .read16 = b53_mdio_read16, + .read32 = b53_mdio_read32, + .read48 = b53_mdio_read48, + .read64 = b53_mdio_read64, + .write8 = b53_mdio_write8, + .write16 = b53_mdio_write16, + .write32 = b53_mdio_write32, + .write48 = b53_mdio_write48, + .write64 = b53_mdio_write64, + .phy_read16 = b53_mdio_phy_read16, + .phy_write16 = b53_mdio_phy_write16, +}; + +#define B53_BRCM_OUI_1 0x0143bc00 +#define B53_BRCM_OUI_2 0x03625c00 +#define B53_BRCM_OUI_3 0x00406000 + +static int b53_mdio_probe(struct mdio_device *mdiodev) +{ + struct b53_device *dev; + u32 phy_id; + int ret; + + /* allow the generic PHY driver to take over the non-management MDIO + * addresses + */ + if (mdiodev->addr != BRCM_PSEUDO_PHY_ADDR && mdiodev->addr != 0) { + dev_err(&mdiodev->dev, "leaving address %d to PHY\n", + mdiodev->addr); + return -ENODEV; + } + + /* read the first port's id */ + phy_id = mdiobus_read(mdiodev->bus, 0, 2) << 16; + phy_id |= mdiobus_read(mdiodev->bus, 0, 3); + + /* BCM5325, BCM539x (OUI_1) + * BCM53125, BCM53128 (OUI_2) + * BCM5365 (OUI_3) + */ + if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && + (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { + dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); + return -ENODEV; + } + + /* First probe will come from SWITCH_MDIO controller on the 7445D0 + * switch, which will conflict with the 7445 integrated switch + * pseudo-phy (we end-up programming both). In that case, we return + * -EPROBE_DEFER for the first time we get here, and wait until we come + * back with the slave MDIO bus which has the correct indirection + * layer setup + */ + if (of_machine_is_compatible("brcm,bcm7445d0") && + strcmp(mdiodev->bus->name, "sf2 slave mii")) + return -EPROBE_DEFER; + + dev = b53_switch_alloc(&mdiodev->dev, &b53_mdio_ops, mdiodev->bus); + if (!dev) + return -ENOMEM; + + /* we don't use page 0xff, so force a page set */ + dev->current_page = 0xff; + dev->bus = mdiodev->bus; + + dev_set_drvdata(&mdiodev->dev, dev); + + ret = b53_switch_register(dev); + if (ret) { + dev_err(&mdiodev->dev, "failed to register switch: %i\n", ret); + return ret; + } + + return ret; +} + +static void b53_mdio_remove(struct mdio_device *mdiodev) +{ + struct b53_device *dev = dev_get_drvdata(&mdiodev->dev); + struct dsa_switch *ds = dev->ds; + + dsa_unregister_switch(ds); +} + +static const struct of_device_id b53_of_match[] = { + { .compatible = "brcm,bcm5325" }, + { .compatible = "brcm,bcm53115" }, + { .compatible = "brcm,bcm53125" }, + { .compatible = "brcm,bcm53128" }, + { .compatible = "brcm,bcm5365" }, + { .compatible = "brcm,bcm5395" }, + { .compatible = "brcm,bcm5397" }, + { .compatible = "brcm,bcm5398" }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, b53_of_match); + +static struct mdio_driver b53_mdio_driver = { + .probe = b53_mdio_probe, + .remove = b53_mdio_remove, + .mdiodrv.driver = { + .name = "bcm53xx", + .of_match_table = b53_of_match, + }, +}; + +static int __init b53_mdio_driver_register(void) +{ + return mdio_driver_register(&b53_mdio_driver); +} +module_init(b53_mdio_driver_register); + +static void __exit b53_mdio_driver_unregister(void) +{ + mdio_driver_unregister(&b53_mdio_driver); +} +module_exit(b53_mdio_driver_unregister); + +MODULE_DESCRIPTION("B53 MDIO access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c new file mode 100644 index 000000000..77ffc4312 --- /dev/null +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -0,0 +1,273 @@ +/* + * B53 register access through memory mapped registers + * + * Copyright (C) 2012-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/kconfig.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/platform_data/b53.h> + +#include "b53_priv.h" + +struct b53_mmap_priv { + void __iomem *regs; +}; + +static int b53_mmap_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + u8 __iomem *regs = dev->priv; + + *val = readb(regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + *val = ioread16be(regs + (page << 8) + reg); + else + *val = readw(regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 4)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + *val = ioread32be(regs + (page << 8) + reg); + else + *val = readl(regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (reg % 4) { + u16 lo; + u32 hi; + + if (dev->pdata && dev->pdata->big_endian) { + lo = ioread16be(regs + (page << 8) + reg); + hi = ioread32be(regs + (page << 8) + reg + 2); + } else { + lo = readw(regs + (page << 8) + reg); + hi = readl(regs + (page << 8) + reg + 2); + } + + *val = ((u64)hi << 16) | lo; + } else { + u32 lo; + u16 hi; + + if (dev->pdata && dev->pdata->big_endian) { + lo = ioread32be(regs + (page << 8) + reg); + hi = ioread16be(regs + (page << 8) + reg + 4); + } else { + lo = readl(regs + (page << 8) + reg); + hi = readw(regs + (page << 8) + reg + 4); + } + + *val = ((u64)hi << 32) | lo; + } + + return 0; +} + +static int b53_mmap_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + u8 __iomem *regs = dev->priv; + u32 hi, lo; + + if (WARN_ON(reg % 4)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) { + lo = ioread32be(regs + (page << 8) + reg); + hi = ioread32be(regs + (page << 8) + reg + 4); + } else { + lo = readl(regs + (page << 8) + reg); + hi = readl(regs + (page << 8) + reg + 4); + } + + *val = ((u64)hi << 32) | lo; + + return 0; +} + +static int b53_mmap_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + u8 __iomem *regs = dev->priv; + + writeb(value, regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + iowrite16be(value, regs + (page << 8) + reg); + else + writew(value, regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + u8 __iomem *regs = dev->priv; + + if (WARN_ON(reg % 4)) + return -EINVAL; + + if (dev->pdata && dev->pdata->big_endian) + iowrite32be(value, regs + (page << 8) + reg); + else + writel(value, regs + (page << 8) + reg); + + return 0; +} + +static int b53_mmap_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + if (WARN_ON(reg % 2)) + return -EINVAL; + + if (reg % 4) { + u32 hi = (u32)(value >> 16); + u16 lo = (u16)value; + + b53_mmap_write16(dev, page, reg, lo); + b53_mmap_write32(dev, page, reg + 2, hi); + } else { + u16 hi = (u16)(value >> 32); + u32 lo = (u32)value; + + b53_mmap_write32(dev, page, reg, lo); + b53_mmap_write16(dev, page, reg + 4, hi); + } + + return 0; +} + +static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + u32 hi, lo; + + hi = upper_32_bits(value); + lo = lower_32_bits(value); + + if (WARN_ON(reg % 4)) + return -EINVAL; + + b53_mmap_write32(dev, page, reg, lo); + b53_mmap_write32(dev, page, reg + 4, hi); + + return 0; +} + +static struct b53_io_ops b53_mmap_ops = { + .read8 = b53_mmap_read8, + .read16 = b53_mmap_read16, + .read32 = b53_mmap_read32, + .read48 = b53_mmap_read48, + .read64 = b53_mmap_read64, + .write8 = b53_mmap_write8, + .write16 = b53_mmap_write16, + .write32 = b53_mmap_write32, + .write48 = b53_mmap_write48, + .write64 = b53_mmap_write64, +}; + +static int b53_mmap_probe(struct platform_device *pdev) +{ + struct b53_platform_data *pdata = pdev->dev.platform_data; + struct b53_device *dev; + + if (!pdata) + return -EINVAL; + + dev = b53_switch_alloc(&pdev->dev, &b53_mmap_ops, pdata->regs); + if (!dev) + return -ENOMEM; + + dev->pdata = pdata; + + platform_set_drvdata(pdev, dev); + + return b53_switch_register(dev); +} + +static int b53_mmap_remove(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + + if (dev) + b53_switch_remove(dev); + + return 0; +} + +static const struct of_device_id b53_mmap_of_table[] = { + { .compatible = "brcm,bcm3384-switch" }, + { .compatible = "brcm,bcm6328-switch" }, + { .compatible = "brcm,bcm6368-switch" }, + { .compatible = "brcm,bcm63xx-switch" }, + { /* sentinel */ }, +}; + +static struct platform_driver b53_mmap_driver = { + .probe = b53_mmap_probe, + .remove = b53_mmap_remove, + .driver = { + .name = "b53-switch", + .of_match_table = b53_mmap_of_table, + }, +}; + +module_platform_driver(b53_mmap_driver); +MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); +MODULE_DESCRIPTION("B53 MMAP access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h new file mode 100644 index 000000000..835a744f2 --- /dev/null +++ b/drivers/net/dsa/b53/b53_priv.h @@ -0,0 +1,388 @@ +/* + * B53 common definitions + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_PRIV_H +#define __B53_PRIV_H + +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/phy.h> +#include <net/dsa.h> + +#include "b53_regs.h" + +struct b53_device; +struct net_device; + +struct b53_io_ops { + int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); + int (*read16)(struct b53_device *dev, u8 page, u8 reg, u16 *value); + int (*read32)(struct b53_device *dev, u8 page, u8 reg, u32 *value); + int (*read48)(struct b53_device *dev, u8 page, u8 reg, u64 *value); + int (*read64)(struct b53_device *dev, u8 page, u8 reg, u64 *value); + int (*write8)(struct b53_device *dev, u8 page, u8 reg, u8 value); + int (*write16)(struct b53_device *dev, u8 page, u8 reg, u16 value); + int (*write32)(struct b53_device *dev, u8 page, u8 reg, u32 value); + int (*write48)(struct b53_device *dev, u8 page, u8 reg, u64 value); + int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value); + int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value); + int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value); +}; + +enum { + BCM5325_DEVICE_ID = 0x25, + BCM5365_DEVICE_ID = 0x65, + BCM5395_DEVICE_ID = 0x95, + BCM5397_DEVICE_ID = 0x97, + BCM5398_DEVICE_ID = 0x98, + BCM53115_DEVICE_ID = 0x53115, + BCM53125_DEVICE_ID = 0x53125, + BCM53128_DEVICE_ID = 0x53128, + BCM63XX_DEVICE_ID = 0x6300, + BCM53010_DEVICE_ID = 0x53010, + BCM53011_DEVICE_ID = 0x53011, + BCM53012_DEVICE_ID = 0x53012, + BCM53018_DEVICE_ID = 0x53018, + BCM53019_DEVICE_ID = 0x53019, + BCM58XX_DEVICE_ID = 0x5800, +}; + +#define B53_N_PORTS 9 +#define B53_N_PORTS_25 6 + +struct b53_port { + u16 vlan_ctl_mask; + struct net_device *bridge_dev; +}; + +struct b53_vlan { + u16 members; + u16 untag; + bool valid; +}; + +struct b53_device { + struct dsa_switch *ds; + struct b53_platform_data *pdata; + const char *name; + + struct mutex reg_mutex; + struct mutex stats_mutex; + const struct b53_io_ops *ops; + + /* chip specific data */ + u32 chip_id; + u8 core_rev; + u8 vta_regs[3]; + u8 duplex_reg; + u8 jumbo_pm_reg; + u8 jumbo_size_reg; + int reset_gpio; + u8 num_arl_entries; + + /* used ports mask */ + u16 enabled_ports; + unsigned int cpu_port; + + /* connect specific data */ + u8 current_page; + struct device *dev; + + /* Master MDIO bus we got probed from */ + struct mii_bus *bus; + + void *priv; + + /* run time configuration */ + bool enable_jumbo; + + unsigned int num_vlans; + struct b53_vlan *vlans; + unsigned int num_ports; + struct b53_port *ports; +}; + +#define b53_for_each_port(dev, i) \ + for (i = 0; i < B53_N_PORTS; i++) \ + if (dev->enabled_ports & BIT(i)) + + +static inline int is5325(struct b53_device *dev) +{ + return dev->chip_id == BCM5325_DEVICE_ID; +} + +static inline int is5365(struct b53_device *dev) +{ +#ifdef CONFIG_BCM47XX + return dev->chip_id == BCM5365_DEVICE_ID; +#else + return 0; +#endif +} + +static inline int is5397_98(struct b53_device *dev) +{ + return dev->chip_id == BCM5397_DEVICE_ID || + dev->chip_id == BCM5398_DEVICE_ID; +} + +static inline int is539x(struct b53_device *dev) +{ + return dev->chip_id == BCM5395_DEVICE_ID || + dev->chip_id == BCM5397_DEVICE_ID || + dev->chip_id == BCM5398_DEVICE_ID; +} + +static inline int is531x5(struct b53_device *dev) +{ + return dev->chip_id == BCM53115_DEVICE_ID || + dev->chip_id == BCM53125_DEVICE_ID || + dev->chip_id == BCM53128_DEVICE_ID; +} + +static inline int is63xx(struct b53_device *dev) +{ +#ifdef CONFIG_BCM63XX + return dev->chip_id == BCM63XX_DEVICE_ID; +#else + return 0; +#endif +} + +static inline int is5301x(struct b53_device *dev) +{ + return dev->chip_id == BCM53010_DEVICE_ID || + dev->chip_id == BCM53011_DEVICE_ID || + dev->chip_id == BCM53012_DEVICE_ID || + dev->chip_id == BCM53018_DEVICE_ID || + dev->chip_id == BCM53019_DEVICE_ID; +} + +#define B53_CPU_PORT_25 5 +#define B53_CPU_PORT 8 + +static inline int is_cpu_port(struct b53_device *dev, int port) +{ + return dev->cpu_port; +} + +struct b53_device *b53_switch_alloc(struct device *base, struct b53_io_ops *ops, + void *priv); + +int b53_switch_detect(struct b53_device *dev); + +int b53_switch_register(struct b53_device *dev); + +static inline void b53_switch_remove(struct b53_device *dev) +{ + dsa_unregister_switch(dev->ds); +} + +static inline int b53_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read8(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read16(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read32(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read48(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read64(dev, page, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write8(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write16(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write32(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write48(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int b53_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write64(dev, page, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +struct b53_arl_entry { + u8 port; + u8 mac[ETH_ALEN]; + u16 vid; + u8 is_valid:1; + u8 is_age:1; + u8 is_static:1; +}; + +static inline void b53_mac_from_u64(u64 src, u8 *dst) +{ + unsigned int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff; +} + +static inline u64 b53_mac_to_u64(const u8 *src) +{ + unsigned int i; + u64 dst = 0; + + for (i = 0; i < ETH_ALEN; i++) + dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i); + + return dst; +} + +static inline void b53_arl_to_entry(struct b53_arl_entry *ent, + u64 mac_vid, u32 fwd_entry) +{ + memset(ent, 0, sizeof(*ent)); + ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK; + ent->is_valid = !!(fwd_entry & ARLTBL_VALID); + ent->is_age = !!(fwd_entry & ARLTBL_AGE); + ent->is_static = !!(fwd_entry & ARLTBL_STATIC); + b53_mac_from_u64(mac_vid, ent->mac); + ent->vid = mac_vid >> ARLTBL_VID_S; +} + +static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry, + const struct b53_arl_entry *ent) +{ + *mac_vid = b53_mac_to_u64(ent->mac); + *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S; + *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK; + if (ent->is_valid) + *fwd_entry |= ARLTBL_VALID; + if (ent->is_static) + *fwd_entry |= ARLTBL_STATIC; + if (ent->is_age) + *fwd_entry |= ARLTBL_AGE; +} + +#ifdef CONFIG_BCM47XX + +#include <linux/version.h> +#include <linux/bcm47xx_nvram.h> +#include <bcm47xx_board.h> +static inline int b53_switch_get_reset_gpio(struct b53_device *dev) +{ + enum bcm47xx_board board = bcm47xx_board_get(); + + switch (board) { + case BCM47XX_BOARD_LINKSYS_WRT300NV11: + case BCM47XX_BOARD_LINKSYS_WRT310NV1: + return 8; + default: + return bcm47xx_nvram_gpio_pin("robo_reset"); + } +} +#else +static inline int b53_switch_get_reset_gpio(struct b53_device *dev) +{ + return -ENOENT; +} +#endif +#endif diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h new file mode 100644 index 000000000..a0b453ea3 --- /dev/null +++ b/drivers/net/dsa/b53/b53_regs.h @@ -0,0 +1,434 @@ +/* + * B53 register definitions + * + * Copyright (C) 2004 Broadcom Corporation + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __B53_REGS_H +#define __B53_REGS_H + +/* Management Port (SMP) Page offsets */ +#define B53_CTRL_PAGE 0x00 /* Control */ +#define B53_STAT_PAGE 0x01 /* Status */ +#define B53_MGMT_PAGE 0x02 /* Management Mode */ +#define B53_MIB_AC_PAGE 0x03 /* MIB Autocast */ +#define B53_ARLCTRL_PAGE 0x04 /* ARL Control */ +#define B53_ARLIO_PAGE 0x05 /* ARL Access */ +#define B53_FRAMEBUF_PAGE 0x06 /* Management frame access */ +#define B53_MEM_ACCESS_PAGE 0x08 /* Memory access */ + +/* PHY Registers */ +#define B53_PORT_MII_PAGE(i) (0x10 + (i)) /* Port i MII Registers */ +#define B53_IM_PORT_PAGE 0x18 /* Inverse MII Port (to EMAC) */ +#define B53_ALL_PORT_PAGE 0x19 /* All ports MII (broadcast) */ + +/* MIB registers */ +#define B53_MIB_PAGE(i) (0x20 + (i)) + +/* Quality of Service (QoS) Registers */ +#define B53_QOS_PAGE 0x30 + +/* Port VLAN Page */ +#define B53_PVLAN_PAGE 0x31 + +/* VLAN Registers */ +#define B53_VLAN_PAGE 0x34 + +/* Jumbo Frame Registers */ +#define B53_JUMBO_PAGE 0x40 + +/* CFP Configuration Registers Page */ +#define B53_CFP_PAGE 0xa1 + +/************************************************************************* + * Control Page registers + *************************************************************************/ + +/* Port Control Register (8 bit) */ +#define B53_PORT_CTRL(i) (0x00 + (i)) +#define PORT_CTRL_RX_DISABLE BIT(0) +#define PORT_CTRL_TX_DISABLE BIT(1) +#define PORT_CTRL_RX_BCST_EN BIT(2) /* Broadcast RX (P8 only) */ +#define PORT_CTRL_RX_MCST_EN BIT(3) /* Multicast RX (P8 only) */ +#define PORT_CTRL_RX_UCST_EN BIT(4) /* Unicast RX (P8 only) */ +#define PORT_CTRL_STP_STATE_S 5 +#define PORT_CTRL_NO_STP (0 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_DIS_STATE (1 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_BLOCK_STATE (2 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_LISTEN_STATE (3 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_LEARN_STATE (4 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_FWD_STATE (5 << PORT_CTRL_STP_STATE_S) +#define PORT_CTRL_STP_STATE_MASK (0x7 << PORT_CTRL_STP_STATE_S) + +/* SMP Control Register (8 bit) */ +#define B53_SMP_CTRL 0x0a + +/* Switch Mode Control Register (8 bit) */ +#define B53_SWITCH_MODE 0x0b +#define SM_SW_FWD_MODE BIT(0) /* 1 = Managed Mode */ +#define SM_SW_FWD_EN BIT(1) /* Forwarding Enable */ + +/* IMP Port state override register (8 bit) */ +#define B53_PORT_OVERRIDE_CTRL 0x0e +#define PORT_OVERRIDE_LINK BIT(0) +#define PORT_OVERRIDE_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */ +#define PORT_OVERRIDE_SPEED_S 2 +#define PORT_OVERRIDE_SPEED_10M (0 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_SPEED_100M (1 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_SPEED_1000M (2 << PORT_OVERRIDE_SPEED_S) +#define PORT_OVERRIDE_RV_MII_25 BIT(4) /* BCM5325 only */ +#define PORT_OVERRIDE_RX_FLOW BIT(4) +#define PORT_OVERRIDE_TX_FLOW BIT(5) +#define PORT_OVERRIDE_SPEED_2000M BIT(6) /* BCM5301X only, requires setting 1000M */ +#define PORT_OVERRIDE_EN BIT(7) /* Use the register contents */ + +/* Power-down mode control */ +#define B53_PD_MODE_CTRL_25 0x0f + +/* IP Multicast control (8 bit) */ +#define B53_IP_MULTICAST_CTRL 0x21 +#define B53_IPMC_FWD_EN BIT(1) +#define B53_UC_FWD_EN BIT(6) +#define B53_MC_FWD_EN BIT(7) + +/* (16 bit) */ +#define B53_UC_FLOOD_MASK 0x32 +#define B53_MC_FLOOD_MASK 0x34 +#define B53_IPMC_FLOOD_MASK 0x36 + +/* + * Override Ports 0-7 State on devices with xMII interfaces (8 bit) + * + * For port 8 still use B53_PORT_OVERRIDE_CTRL + * Please note that not all ports are available on every hardware, e.g. BCM5301X + * don't include overriding port 6, BCM63xx also have some limitations. + */ +#define B53_GMII_PORT_OVERRIDE_CTRL(i) (0x58 + (i)) +#define GMII_PO_LINK BIT(0) +#define GMII_PO_FULL_DUPLEX BIT(1) /* 0 = Half Duplex */ +#define GMII_PO_SPEED_S 2 +#define GMII_PO_SPEED_10M (0 << GMII_PO_SPEED_S) +#define GMII_PO_SPEED_100M (1 << GMII_PO_SPEED_S) +#define GMII_PO_SPEED_1000M (2 << GMII_PO_SPEED_S) +#define GMII_PO_RX_FLOW BIT(4) +#define GMII_PO_TX_FLOW BIT(5) +#define GMII_PO_EN BIT(6) /* Use the register contents */ +#define GMII_PO_SPEED_2000M BIT(7) /* BCM5301X only, requires setting 1000M */ + +#define B53_RGMII_CTRL_IMP 0x60 +#define RGMII_CTRL_ENABLE_GMII BIT(7) +#define RGMII_CTRL_TIMING_SEL BIT(2) +#define RGMII_CTRL_DLL_RXC BIT(1) +#define RGMII_CTRL_DLL_TXC BIT(0) + +#define B53_RGMII_CTRL_P(i) (B53_RGMII_CTRL_IMP + (i)) + +/* Software reset register (8 bit) */ +#define B53_SOFTRESET 0x79 +#define SW_RST BIT(7) +#define EN_SW_RST BIT(4) + +/* Fast Aging Control register (8 bit) */ +#define B53_FAST_AGE_CTRL 0x88 +#define FAST_AGE_STATIC BIT(0) +#define FAST_AGE_DYNAMIC BIT(1) +#define FAST_AGE_PORT BIT(2) +#define FAST_AGE_VLAN BIT(3) +#define FAST_AGE_STP BIT(4) +#define FAST_AGE_MC BIT(5) +#define FAST_AGE_DONE BIT(7) + +/* Fast Aging Port Control register (8 bit) */ +#define B53_FAST_AGE_PORT_CTRL 0x89 + +/* Fast Aging VID Control register (16 bit) */ +#define B53_FAST_AGE_VID_CTRL 0x8a + +/************************************************************************* + * Status Page registers + *************************************************************************/ + +/* Link Status Summary Register (16bit) */ +#define B53_LINK_STAT 0x00 + +/* Link Status Change Register (16 bit) */ +#define B53_LINK_STAT_CHANGE 0x02 + +/* Port Speed Summary Register (16 bit for FE, 32 bit for GE) */ +#define B53_SPEED_STAT 0x04 +#define SPEED_PORT_FE(reg, port) (((reg) >> (port)) & 1) +#define SPEED_PORT_GE(reg, port) (((reg) >> 2 * (port)) & 3) +#define SPEED_STAT_10M 0 +#define SPEED_STAT_100M 1 +#define SPEED_STAT_1000M 2 + +/* Duplex Status Summary (16 bit) */ +#define B53_DUPLEX_STAT_FE 0x06 +#define B53_DUPLEX_STAT_GE 0x08 +#define B53_DUPLEX_STAT_63XX 0x0c + +/* Revision ID register for BCM5325 */ +#define B53_REV_ID_25 0x50 + +/* Strap Value (48 bit) */ +#define B53_STRAP_VALUE 0x70 +#define SV_GMII_CTRL_115 BIT(27) + +/************************************************************************* + * Management Mode Page Registers + *************************************************************************/ + +/* Global Management Config Register (8 bit) */ +#define B53_GLOBAL_CONFIG 0x00 +#define GC_RESET_MIB 0x01 +#define GC_RX_BPDU_EN 0x02 +#define GC_MIB_AC_HDR_EN 0x10 +#define GC_MIB_AC_EN 0x20 +#define GC_FRM_MGMT_PORT_M 0xC0 +#define GC_FRM_MGMT_PORT_04 0x00 +#define GC_FRM_MGMT_PORT_MII 0x80 + +/* Broadcom Header control register (8 bit) */ +#define B53_BRCM_HDR 0x03 +#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */ +#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */ + +/* Device ID register (8 or 32 bit) */ +#define B53_DEVICE_ID 0x30 + +/* Revision ID register (8 bit) */ +#define B53_REV_ID 0x40 + +/************************************************************************* + * ARL Access Page Registers + *************************************************************************/ + +/* VLAN Table Access Register (8 bit) */ +#define B53_VT_ACCESS 0x80 +#define B53_VT_ACCESS_9798 0x60 /* for BCM5397/BCM5398 */ +#define B53_VT_ACCESS_63XX 0x60 /* for BCM6328/62/68 */ +#define VTA_CMD_WRITE 0 +#define VTA_CMD_READ 1 +#define VTA_CMD_CLEAR 2 +#define VTA_START_CMD BIT(7) + +/* VLAN Table Index Register (16 bit) */ +#define B53_VT_INDEX 0x81 +#define B53_VT_INDEX_9798 0x61 +#define B53_VT_INDEX_63XX 0x62 + +/* VLAN Table Entry Register (32 bit) */ +#define B53_VT_ENTRY 0x83 +#define B53_VT_ENTRY_9798 0x63 +#define B53_VT_ENTRY_63XX 0x64 +#define VTE_MEMBERS 0x1ff +#define VTE_UNTAG_S 9 +#define VTE_UNTAG (0x1ff << 9) + +/************************************************************************* + * ARL I/O Registers + *************************************************************************/ + +/* ARL Table Read/Write Register (8 bit) */ +#define B53_ARLTBL_RW_CTRL 0x00 +#define ARLTBL_RW BIT(0) +#define ARLTBL_START_DONE BIT(7) + +/* MAC Address Index Register (48 bit) */ +#define B53_MAC_ADDR_IDX 0x02 + +/* VLAN ID Index Register (16 bit) */ +#define B53_VLAN_ID_IDX 0x08 + +/* ARL Table MAC/VID Entry N Registers (64 bit) + * + * BCM5325 and BCM5365 share most definitions below + */ +#define B53_ARLTBL_MAC_VID_ENTRY(n) (0x10 * (n)) +#define ARLTBL_MAC_MASK 0xffffffffffffULL +#define ARLTBL_VID_S 48 +#define ARLTBL_VID_MASK_25 0xff +#define ARLTBL_VID_MASK 0xfff +#define ARLTBL_DATA_PORT_ID_S_25 48 +#define ARLTBL_DATA_PORT_ID_MASK_25 0xf +#define ARLTBL_AGE_25 BIT(61) +#define ARLTBL_STATIC_25 BIT(62) +#define ARLTBL_VALID_25 BIT(63) + +/* ARL Table Data Entry N Registers (32 bit) */ +#define B53_ARLTBL_DATA_ENTRY(n) ((0x10 * (n)) + 0x08) +#define ARLTBL_DATA_PORT_ID_MASK 0x1ff +#define ARLTBL_TC(tc) ((3 & tc) << 11) +#define ARLTBL_AGE BIT(14) +#define ARLTBL_STATIC BIT(15) +#define ARLTBL_VALID BIT(16) + +/* ARL Search Control Register (8 bit) */ +#define B53_ARL_SRCH_CTL 0x50 +#define B53_ARL_SRCH_CTL_25 0x20 +#define ARL_SRCH_VLID BIT(0) +#define ARL_SRCH_STDN BIT(7) + +/* ARL Search Address Register (16 bit) */ +#define B53_ARL_SRCH_ADDR 0x51 +#define B53_ARL_SRCH_ADDR_25 0x22 +#define B53_ARL_SRCH_ADDR_65 0x24 +#define ARL_ADDR_MASK GENMASK(14, 0) + +/* ARL Search MAC/VID Result (64 bit) */ +#define B53_ARL_SRCH_RSTL_0_MACVID 0x60 + +/* Single register search result on 5325 */ +#define B53_ARL_SRCH_RSTL_0_MACVID_25 0x24 +/* Single register search result on 5365 */ +#define B53_ARL_SRCH_RSTL_0_MACVID_65 0x30 + +/* ARL Search Data Result (32 bit) */ +#define B53_ARL_SRCH_RSTL_0 0x68 + +#define B53_ARL_SRCH_RSTL_MACVID(x) (B53_ARL_SRCH_RSTL_0_MACVID + ((x) * 0x10)) +#define B53_ARL_SRCH_RSTL(x) (B53_ARL_SRCH_RSTL_0 + ((x) * 0x10)) + +/************************************************************************* + * Port VLAN Registers + *************************************************************************/ + +/* Port VLAN mask (16 bit) IMP port is always 8, also on 5325 & co */ +#define B53_PVLAN_PORT_MASK(i) ((i) * 2) + +/************************************************************************* + * 802.1Q Page Registers + *************************************************************************/ + +/* Global QoS Control (8 bit) */ +#define B53_QOS_GLOBAL_CTL 0x00 + +/* Enable 802.1Q for individual Ports (16 bit) */ +#define B53_802_1P_EN 0x04 + +/************************************************************************* + * VLAN Page Registers + *************************************************************************/ + +/* VLAN Control 0 (8 bit) */ +#define B53_VLAN_CTRL0 0x00 +#define VC0_8021PF_CTRL_MASK 0x3 +#define VC0_8021PF_CTRL_NONE 0x0 +#define VC0_8021PF_CTRL_CHANGE_PRI 0x1 +#define VC0_8021PF_CTRL_CHANGE_VID 0x2 +#define VC0_8021PF_CTRL_CHANGE_BOTH 0x3 +#define VC0_8021QF_CTRL_MASK 0xc +#define VC0_8021QF_CTRL_CHANGE_PRI 0x1 +#define VC0_8021QF_CTRL_CHANGE_VID 0x2 +#define VC0_8021QF_CTRL_CHANGE_BOTH 0x3 +#define VC0_RESERVED_1 BIT(1) +#define VC0_DROP_VID_MISS BIT(4) +#define VC0_VID_HASH_VID BIT(5) +#define VC0_VID_CHK_EN BIT(6) /* Use VID,DA or VID,SA */ +#define VC0_VLAN_EN BIT(7) /* 802.1Q VLAN Enabled */ + +/* VLAN Control 1 (8 bit) */ +#define B53_VLAN_CTRL1 0x01 +#define VC1_RX_MCST_TAG_EN BIT(1) +#define VC1_RX_MCST_FWD_EN BIT(2) +#define VC1_RX_MCST_UNTAG_EN BIT(3) + +/* VLAN Control 2 (8 bit) */ +#define B53_VLAN_CTRL2 0x02 + +/* VLAN Control 3 (8 bit when BCM5325, 16 bit else) */ +#define B53_VLAN_CTRL3 0x03 +#define B53_VLAN_CTRL3_63XX 0x04 +#define VC3_MAXSIZE_1532 BIT(6) /* 5325 only */ +#define VC3_HIGH_8BIT_EN BIT(7) /* 5325 only */ + +/* VLAN Control 4 (8 bit) */ +#define B53_VLAN_CTRL4 0x05 +#define B53_VLAN_CTRL4_25 0x04 +#define B53_VLAN_CTRL4_63XX 0x06 +#define VC4_ING_VID_CHECK_S 6 +#define VC4_ING_VID_CHECK_MASK (0x3 << VC4_ING_VID_CHECK_S) +#define VC4_ING_VID_VIO_FWD 0 /* forward, but do not learn */ +#define VC4_ING_VID_VIO_DROP 1 /* drop VID violations */ +#define VC4_NO_ING_VID_CHK 2 /* do not check */ +#define VC4_ING_VID_VIO_TO_IMP 3 /* redirect to MII port */ + +/* VLAN Control 5 (8 bit) */ +#define B53_VLAN_CTRL5 0x06 +#define B53_VLAN_CTRL5_25 0x05 +#define B53_VLAN_CTRL5_63XX 0x07 +#define VC5_VID_FFF_EN BIT(2) +#define VC5_DROP_VTABLE_MISS BIT(3) + +/* VLAN Control 6 (8 bit) */ +#define B53_VLAN_CTRL6 0x07 +#define B53_VLAN_CTRL6_63XX 0x08 + +/* VLAN Table Access Register (16 bit) */ +#define B53_VLAN_TABLE_ACCESS_25 0x06 /* BCM5325E/5350 */ +#define B53_VLAN_TABLE_ACCESS_65 0x08 /* BCM5365 */ +#define VTA_VID_LOW_MASK_25 0xf +#define VTA_VID_LOW_MASK_65 0xff +#define VTA_VID_HIGH_S_25 4 +#define VTA_VID_HIGH_S_65 8 +#define VTA_VID_HIGH_MASK_25 (0xff << VTA_VID_HIGH_S_25E) +#define VTA_VID_HIGH_MASK_65 (0xf << VTA_VID_HIGH_S_65) +#define VTA_RW_STATE BIT(12) +#define VTA_RW_STATE_RD 0 +#define VTA_RW_STATE_WR BIT(12) +#define VTA_RW_OP_EN BIT(13) + +/* VLAN Read/Write Registers for (16/32 bit) */ +#define B53_VLAN_WRITE_25 0x08 +#define B53_VLAN_WRITE_65 0x0a +#define B53_VLAN_READ 0x0c +#define VA_MEMBER_MASK 0x3f +#define VA_UNTAG_S_25 6 +#define VA_UNTAG_MASK_25 0x3f +#define VA_UNTAG_S_65 7 +#define VA_UNTAG_MASK_65 0x1f +#define VA_VID_HIGH_S 12 +#define VA_VID_HIGH_MASK (0xffff << VA_VID_HIGH_S) +#define VA_VALID_25 BIT(20) +#define VA_VALID_25_R4 BIT(24) +#define VA_VALID_65 BIT(14) + +/* VLAN Port Default Tag (16 bit) */ +#define B53_VLAN_PORT_DEF_TAG(i) (0x10 + 2 * (i)) + +/************************************************************************* + * Jumbo Frame Page Registers + *************************************************************************/ + +/* Jumbo Enable Port Mask (bit i == port i enabled) (32 bit) */ +#define B53_JUMBO_PORT_MASK 0x01 +#define B53_JUMBO_PORT_MASK_63XX 0x04 +#define JPM_10_100_JUMBO_EN BIT(24) /* GigE always enabled */ + +/* Good Frame Max Size without 802.1Q TAG (16 bit) */ +#define B53_JUMBO_MAX_SIZE 0x05 +#define B53_JUMBO_MAX_SIZE_63XX 0x08 +#define JMS_MIN_SIZE 1518 +#define JMS_MAX_SIZE 9724 + +/************************************************************************* + * CFP Configuration Page Registers + *************************************************************************/ + +/* CFP Control Register with ports map (8 bit) */ +#define B53_CFP_CTRL 0x00 + +#endif /* !__B53_REGS_H */ diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c new file mode 100644 index 000000000..2bda0b5f1 --- /dev/null +++ b/drivers/net/dsa/b53/b53_spi.c @@ -0,0 +1,331 @@ +/* + * B53 register access through SPI + * + * Copyright (C) 2011-2013 Jonas Gorski <jogo@openwrt.org> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <asm/unaligned.h> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/platform_data/b53.h> + +#include "b53_priv.h" + +#define B53_SPI_DATA 0xf0 + +#define B53_SPI_STATUS 0xfe +#define B53_SPI_CMD_SPIF BIT(7) +#define B53_SPI_CMD_RACK BIT(5) + +#define B53_SPI_CMD_READ 0x00 +#define B53_SPI_CMD_WRITE 0x01 +#define B53_SPI_CMD_NORMAL 0x60 +#define B53_SPI_CMD_FAST 0x10 + +#define B53_SPI_PAGE_SELECT 0xff + +static inline int b53_spi_read_reg(struct spi_device *spi, u8 reg, u8 *val, + unsigned int len) +{ + u8 txbuf[2]; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_READ; + txbuf[1] = reg; + + return spi_write_then_read(spi, txbuf, 2, val, len); +} + +static inline int b53_spi_clear_status(struct spi_device *spi) +{ + unsigned int i; + u8 rxbuf; + int ret; + + for (i = 0; i < 10; i++) { + ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1); + if (ret) + return ret; + + if (!(rxbuf & B53_SPI_CMD_SPIF)) + break; + + mdelay(1); + } + + if (i == 10) + return -EIO; + + return 0; +} + +static inline int b53_spi_set_page(struct spi_device *spi, u8 page) +{ + u8 txbuf[3]; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = B53_SPI_PAGE_SELECT; + txbuf[2] = page; + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static inline int b53_prepare_reg_access(struct spi_device *spi, u8 page) +{ + int ret = b53_spi_clear_status(spi); + + if (ret) + return ret; + + return b53_spi_set_page(spi, page); +} + +static int b53_spi_prepare_reg_read(struct spi_device *spi, u8 reg) +{ + u8 rxbuf; + int retry_count; + int ret; + + ret = b53_spi_read_reg(spi, reg, &rxbuf, 1); + if (ret) + return ret; + + for (retry_count = 0; retry_count < 10; retry_count++) { + ret = b53_spi_read_reg(spi, B53_SPI_STATUS, &rxbuf, 1); + if (ret) + return ret; + + if (rxbuf & B53_SPI_CMD_RACK) + break; + + mdelay(1); + } + + if (retry_count == 10) + return -EIO; + + return 0; +} + +static int b53_spi_read(struct b53_device *dev, u8 page, u8 reg, u8 *data, + unsigned int len) +{ + struct spi_device *spi = dev->priv; + int ret; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + ret = b53_spi_prepare_reg_read(spi, reg); + if (ret) + return ret; + + return b53_spi_read_reg(spi, B53_SPI_DATA, data, len); +} + +static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + return b53_spi_read(dev, page, reg, val, 1); +} + +static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2); + + if (!ret) + *val = le16_to_cpu(*val); + + return ret; +} + +static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4); + + if (!ret) + *val = le32_to_cpu(*val); + + return ret; +} + +static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret; + + *val = 0; + ret = b53_spi_read(dev, page, reg, (u8 *)val, 6); + if (!ret) + *val = le64_to_cpu(*val); + + return ret; +} + +static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8); + + if (!ret) + *val = le64_to_cpu(*val); + + return ret; +} + +static int b53_spi_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[3]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + txbuf[2] = value; + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static int b53_spi_write16(struct b53_device *dev, u8 page, u8 reg, u16 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[4]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le16(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static int b53_spi_write32(struct b53_device *dev, u8 page, u8 reg, u32 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[6]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le32(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static int b53_spi_write48(struct b53_device *dev, u8 page, u8 reg, u64 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[10]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le64(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf) - 2); +} + +static int b53_spi_write64(struct b53_device *dev, u8 page, u8 reg, u64 value) +{ + struct spi_device *spi = dev->priv; + int ret; + u8 txbuf[10]; + + ret = b53_prepare_reg_access(spi, page); + if (ret) + return ret; + + txbuf[0] = B53_SPI_CMD_NORMAL | B53_SPI_CMD_WRITE; + txbuf[1] = reg; + put_unaligned_le64(value, &txbuf[2]); + + return spi_write(spi, txbuf, sizeof(txbuf)); +} + +static struct b53_io_ops b53_spi_ops = { + .read8 = b53_spi_read8, + .read16 = b53_spi_read16, + .read32 = b53_spi_read32, + .read48 = b53_spi_read48, + .read64 = b53_spi_read64, + .write8 = b53_spi_write8, + .write16 = b53_spi_write16, + .write32 = b53_spi_write32, + .write48 = b53_spi_write48, + .write64 = b53_spi_write64, +}; + +static int b53_spi_probe(struct spi_device *spi) +{ + struct b53_device *dev; + int ret; + + dev = b53_switch_alloc(&spi->dev, &b53_spi_ops, spi); + if (!dev) + return -ENOMEM; + + if (spi->dev.platform_data) + dev->pdata = spi->dev.platform_data; + + ret = b53_switch_register(dev); + if (ret) + return ret; + + spi_set_drvdata(spi, dev); + + return 0; +} + +static int b53_spi_remove(struct spi_device *spi) +{ + struct b53_device *dev = spi_get_drvdata(spi); + + if (dev) + b53_switch_remove(dev); + + return 0; +} + +static struct spi_driver b53_spi_driver = { + .driver = { + .name = "b53-switch", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = b53_spi_probe, + .remove = b53_spi_remove, +}; + +module_spi_driver(b53_spi_driver); + +MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>"); +MODULE_DESCRIPTION("B53 SPI access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c new file mode 100644 index 000000000..3e2d4a5fc --- /dev/null +++ b/drivers/net/dsa/b53/b53_srab.c @@ -0,0 +1,442 @@ +/* + * B53 register access through Switch Register Access Bridge Registers + * + * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/platform_data/b53.h> +#include <linux/of.h> + +#include "b53_priv.h" + +/* command and status register of the SRAB */ +#define B53_SRAB_CMDSTAT 0x2c +#define B53_SRAB_CMDSTAT_RST BIT(2) +#define B53_SRAB_CMDSTAT_WRITE BIT(1) +#define B53_SRAB_CMDSTAT_GORDYN BIT(0) +#define B53_SRAB_CMDSTAT_PAGE 24 +#define B53_SRAB_CMDSTAT_REG 16 + +/* high order word of write data to switch registe */ +#define B53_SRAB_WD_H 0x30 + +/* low order word of write data to switch registe */ +#define B53_SRAB_WD_L 0x34 + +/* high order word of read data from switch register */ +#define B53_SRAB_RD_H 0x38 + +/* low order word of read data from switch register */ +#define B53_SRAB_RD_L 0x3c + +/* command and status register of the SRAB */ +#define B53_SRAB_CTRLS 0x40 +#define B53_SRAB_CTRLS_RCAREQ BIT(3) +#define B53_SRAB_CTRLS_RCAGNT BIT(4) +#define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6) + +/* the register captures interrupt pulses from the switch */ +#define B53_SRAB_INTR 0x44 +#define B53_SRAB_INTR_P(x) BIT(x) +#define B53_SRAB_SWITCH_PHY BIT(8) +#define B53_SRAB_1588_SYNC BIT(9) +#define B53_SRAB_IMP1_SLEEP_TIMER BIT(10) +#define B53_SRAB_P7_SLEEP_TIMER BIT(11) +#define B53_SRAB_IMP0_SLEEP_TIMER BIT(12) + +struct b53_srab_priv { + void __iomem *regs; +}; + +static int b53_srab_request_grant(struct b53_device *dev) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + u32 ctrls; + int i; + + ctrls = readl(regs + B53_SRAB_CTRLS); + ctrls |= B53_SRAB_CTRLS_RCAREQ; + writel(ctrls, regs + B53_SRAB_CTRLS); + + for (i = 0; i < 20; i++) { + ctrls = readl(regs + B53_SRAB_CTRLS); + if (ctrls & B53_SRAB_CTRLS_RCAGNT) + break; + usleep_range(10, 100); + } + if (WARN_ON(i == 5)) + return -EIO; + + return 0; +} + +static void b53_srab_release_grant(struct b53_device *dev) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + u32 ctrls; + + ctrls = readl(regs + B53_SRAB_CTRLS); + ctrls &= ~B53_SRAB_CTRLS_RCAREQ; + writel(ctrls, regs + B53_SRAB_CTRLS); +} + +static int b53_srab_op(struct b53_device *dev, u8 page, u8 reg, u32 op) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int i; + u32 cmdstat; + + /* set register address */ + cmdstat = (page << B53_SRAB_CMDSTAT_PAGE) | + (reg << B53_SRAB_CMDSTAT_REG) | + B53_SRAB_CMDSTAT_GORDYN | + op; + writel(cmdstat, regs + B53_SRAB_CMDSTAT); + + /* check if operation completed */ + for (i = 0; i < 5; ++i) { + cmdstat = readl(regs + B53_SRAB_CMDSTAT); + if (!(cmdstat & B53_SRAB_CMDSTAT_GORDYN)) + break; + usleep_range(10, 100); + } + + if (WARN_ON(i == 5)) + return -EIO; + + return 0; +} + +static int b53_srab_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L) & 0xff; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L) & 0xffff; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L); + *val += ((u64)readl(regs + B53_SRAB_RD_H) & 0xffff) << 32; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + ret = b53_srab_op(dev, page, reg, 0); + if (ret) + goto err; + + *val = readl(regs + B53_SRAB_RD_L); + *val += (u64)readl(regs + B53_SRAB_RD_H) << 32; + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write8(struct b53_device *dev, u8 page, u8 reg, u8 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel(value, regs + B53_SRAB_WD_L); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write16(struct b53_device *dev, u8 page, u8 reg, + u16 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel(value, regs + B53_SRAB_WD_L); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write32(struct b53_device *dev, u8 page, u8 reg, + u32 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel(value, regs + B53_SRAB_WD_L); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write48(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel((u32)value, regs + B53_SRAB_WD_L); + writel((u16)(value >> 32), regs + B53_SRAB_WD_H); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static int b53_srab_write64(struct b53_device *dev, u8 page, u8 reg, + u64 value) +{ + struct b53_srab_priv *priv = dev->priv; + u8 __iomem *regs = priv->regs; + int ret = 0; + + ret = b53_srab_request_grant(dev); + if (ret) + goto err; + + writel((u32)value, regs + B53_SRAB_WD_L); + writel((u32)(value >> 32), regs + B53_SRAB_WD_H); + + ret = b53_srab_op(dev, page, reg, B53_SRAB_CMDSTAT_WRITE); + +err: + b53_srab_release_grant(dev); + + return ret; +} + +static struct b53_io_ops b53_srab_ops = { + .read8 = b53_srab_read8, + .read16 = b53_srab_read16, + .read32 = b53_srab_read32, + .read48 = b53_srab_read48, + .read64 = b53_srab_read64, + .write8 = b53_srab_write8, + .write16 = b53_srab_write16, + .write32 = b53_srab_write32, + .write48 = b53_srab_write48, + .write64 = b53_srab_write64, +}; + +static const struct of_device_id b53_srab_of_match[] = { + { .compatible = "brcm,bcm53010-srab" }, + { .compatible = "brcm,bcm53011-srab" }, + { .compatible = "brcm,bcm53012-srab" }, + { .compatible = "brcm,bcm53018-srab" }, + { .compatible = "brcm,bcm53019-srab" }, + { .compatible = "brcm,bcm5301x-srab" }, + { .compatible = "brcm,bcm58522-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58525-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58535-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58622-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58623-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm58625-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,bcm88312-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { .compatible = "brcm,nsp-srab", .data = (void *)BCM58XX_DEVICE_ID }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, b53_srab_of_match); + +static int b53_srab_probe(struct platform_device *pdev) +{ + struct b53_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + const struct of_device_id *of_id = NULL; + struct b53_srab_priv *priv; + struct b53_device *dev; + struct resource *r; + + if (dn) + of_id = of_match_node(b53_srab_of_match, dn); + + if (of_id) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->chip_id = (u32)(unsigned long)of_id->data; + } + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->regs)) + return -ENOMEM; + + dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv); + if (!dev) + return -ENOMEM; + + if (pdata) + dev->pdata = pdata; + + platform_set_drvdata(pdev, dev); + + return b53_switch_register(dev); +} + +static int b53_srab_remove(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + + if (dev) + b53_switch_remove(dev); + + return 0; +} + +static struct platform_driver b53_srab_driver = { + .probe = b53_srab_probe, + .remove = b53_srab_remove, + .driver = { + .name = "b53-srab-switch", + .of_match_table = b53_srab_of_match, + }, +}; + +module_platform_driver(b53_srab_driver); +MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); +MODULE_DESCRIPTION("B53 Switch Register Access Bridge Registers (SRAB) access driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 10ddd5a5d..b2b838724 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -22,6 +22,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_net.h> +#include <linux/of_mdio.h> #include <net/dsa.h> #include <linux/ethtool.h> #include <linux/if_bridge.h> @@ -460,19 +461,13 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, return 0; } -/* Fast-ageing of ARL entries for a given port, equivalent to an ARL - * flush for that port. - */ -static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) +static int bcm_sf2_fast_age_op(struct bcm_sf2_priv *priv) { - struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int timeout = 1000; u32 reg; - core_writel(priv, port, CORE_FAST_AGE_PORT); - reg = core_readl(priv, CORE_FAST_AGE_CTRL); - reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; + reg |= EN_AGE_PORT | EN_AGE_VLAN | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; core_writel(priv, reg, CORE_FAST_AGE_CTRL); do { @@ -491,13 +486,98 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) return 0; } +/* Fast-ageing of ARL entries for a given port, equivalent to an ARL + * flush for that port. + */ +static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + core_writel(priv, port, CORE_FAST_AGE_PORT); + + return bcm_sf2_fast_age_op(priv); +} + +static int bcm_sf2_sw_fast_age_vlan(struct bcm_sf2_priv *priv, u16 vid) +{ + core_writel(priv, vid, CORE_FAST_AGE_VID); + + return bcm_sf2_fast_age_op(priv); +} + +static int bcm_sf2_vlan_op_wait(struct bcm_sf2_priv *priv) +{ + unsigned int timeout = 10; + u32 reg; + + do { + reg = core_readl(priv, CORE_ARLA_VTBL_RWCTRL); + if (!(reg & ARLA_VTBL_STDN)) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +static int bcm_sf2_vlan_op(struct bcm_sf2_priv *priv, u8 op) +{ + core_writel(priv, ARLA_VTBL_STDN | op, CORE_ARLA_VTBL_RWCTRL); + + return bcm_sf2_vlan_op_wait(priv); +} + +static void bcm_sf2_set_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, + struct bcm_sf2_vlan *vlan) +{ + int ret; + + core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); + core_writel(priv, vlan->untag << UNTAG_MAP_SHIFT | vlan->members, + CORE_ARLA_VTBL_ENTRY); + + ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_WRITE); + if (ret) + pr_err("failed to write VLAN entry\n"); +} + +static int bcm_sf2_get_vlan_entry(struct bcm_sf2_priv *priv, u16 vid, + struct bcm_sf2_vlan *vlan) +{ + u32 entry; + int ret; + + core_writel(priv, vid & VTBL_ADDR_INDEX_MASK, CORE_ARLA_VTBL_ADDR); + + ret = bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_READ); + if (ret) + return ret; + + entry = core_readl(priv, CORE_ARLA_VTBL_ENTRY); + vlan->members = entry & FWD_MAP_MASK; + vlan->untag = (entry >> UNTAG_MAP_SHIFT) & UNTAG_MAP_MASK; + + return 0; +} + static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct bcm_sf2_priv *priv = ds_to_priv(ds); + s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; + /* Make this port leave the all VLANs join since we will have proper + * VLAN entries from now on + */ + reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); + reg &= ~BIT(port); + if ((reg & BIT(cpu_port)) == BIT(cpu_port)) + reg &= ~BIT(cpu_port); + core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); + priv->port_sts[port].bridge_dev = bridge; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); @@ -529,6 +609,7 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct net_device *bridge = priv->port_sts[port].bridge_dev; + s8 cpu_port = ds->dst->cpu_port; unsigned int i; u32 reg, p_ctl; @@ -552,6 +633,13 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; priv->port_sts[port].bridge_dev = NULL; + + /* Make this port join all VLANs without VLAN entries */ + reg = core_readl(priv, CORE_JOIN_ALL_VLAN_EN); + reg |= BIT(port); + if (!(reg & BIT(cpu_port))) + reg |= BIT(cpu_port); + core_writel(priv, reg, CORE_JOIN_ALL_VLAN_EN); } static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, @@ -804,7 +892,7 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, int (*cb)(struct switchdev_obj *obj)) { struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct net_device *dev = ds->ports[port]; + struct net_device *dev = ds->ports[port].netdev; struct bcm_sf2_arl_entry results[2]; unsigned int count = 0; int ret; @@ -836,6 +924,66 @@ static int bcm_sf2_sw_fdb_dump(struct dsa_switch *ds, int port, return 0; } +static int bcm_sf2_sw_indir_rw(struct bcm_sf2_priv *priv, int op, int addr, + int regnum, u16 val) +{ + int ret = 0; + u32 reg; + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg |= MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + /* Page << 8 | offset */ + reg = 0x70; + reg <<= 2; + core_writel(priv, addr, reg); + + /* Page << 8 | offset */ + reg = 0x80 << 8 | regnum << 1; + reg <<= 2; + + if (op) + ret = core_readl(priv, reg); + else + core_writel(priv, val, reg); + + reg = reg_readl(priv, REG_SWITCH_CNTRL); + reg &= ~MDIO_MASTER_SEL; + reg_writel(priv, reg, REG_SWITCH_CNTRL); + + return ret & 0xffff; +} + +static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum) +{ + struct bcm_sf2_priv *priv = bus->priv; + + /* Intercept reads from Broadcom pseudo-PHY address, else, send + * them to our master MDIO bus controller + */ + if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) + return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0); + else + return mdiobus_read(priv->master_mii_bus, addr, regnum); +} + +static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct bcm_sf2_priv *priv = bus->priv; + + /* Intercept writes to the Broadcom pseudo-PHY address, else, + * send them to our master MDIO bus controller + */ + if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr)) + bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val); + else + mdiobus_write(priv->master_mii_bus, addr, regnum, val); + + return 0; +} + static irqreturn_t bcm_sf2_switch_0_isr(int irq, void *dev_id) { struct bcm_sf2_priv *priv = dev_id; @@ -932,133 +1080,70 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv, } } -static int bcm_sf2_sw_setup(struct dsa_switch *ds) +static int bcm_sf2_mdio_register(struct dsa_switch *ds) { - const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct bcm_sf2_priv *priv = ds_to_priv(ds); struct device_node *dn; - void __iomem **base; - unsigned int port; - unsigned int i; - u32 reg, rev; - int ret; - - spin_lock_init(&priv->indir_lock); - mutex_init(&priv->stats_mutex); - - /* All the interesting properties are at the parent device_node - * level - */ - dn = ds->cd->of_node->parent; - bcm_sf2_identify_ports(priv, ds->cd->of_node); - - priv->irq0 = irq_of_parse_and_map(dn, 0); - priv->irq1 = irq_of_parse_and_map(dn, 1); - - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - *base = of_iomap(dn, i); - if (*base == NULL) { - pr_err("unable to find register: %s\n", reg_names[i]); - ret = -ENOMEM; - goto out_unmap; - } - base++; - } - - ret = bcm_sf2_sw_rst(priv); - if (ret) { - pr_err("unable to software reset switch: %d\n", ret); - goto out_unmap; - } - - /* Disable all interrupts and request them */ - bcm_sf2_intr_disable(priv); - - ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); - if (ret < 0) { - pr_err("failed to request switch_0 IRQ\n"); - goto out_unmap; - } - - ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); - if (ret < 0) { - pr_err("failed to request switch_1 IRQ\n"); - goto out_free_irq0; - } - - /* Reset the MIB counters */ - reg = core_readl(priv, CORE_GMNCFGCFG); - reg |= RST_MIB_CNT; - core_writel(priv, reg, CORE_GMNCFGCFG); - reg &= ~RST_MIB_CNT; - core_writel(priv, reg, CORE_GMNCFGCFG); - - /* Get the maximum number of ports for this switch */ - priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; - if (priv->hw_params.num_ports > DSA_MAX_PORTS) - priv->hw_params.num_ports = DSA_MAX_PORTS; - - /* Assume a single GPHY setup if we can't read that property */ - if (of_property_read_u32(dn, "brcm,num-gphy", - &priv->hw_params.num_gphy)) - priv->hw_params.num_gphy = 1; - - /* Enable all valid ports and disable those unused */ - for (port = 0; port < priv->hw_params.num_ports; port++) { - /* IMP port receives special treatment */ - if ((1 << port) & ds->enabled_port_mask) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - bcm_sf2_imp_setup(ds, port); - else - bcm_sf2_port_disable(ds, port, NULL); - } - - /* Include the pseudo-PHY address and the broadcast PHY address to - * divert reads towards our workaround. This is only required for - * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such - * that we can use the regular SWITCH_MDIO master controller instead. + static int index; + int err; + + /* Find our integrated MDIO bus node */ + dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); + priv->master_mii_bus = of_mdio_find_bus(dn); + if (!priv->master_mii_bus) + return -EPROBE_DEFER; + + get_device(&priv->master_mii_bus->dev); + priv->master_mii_dn = dn; + + priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); + if (!priv->slave_mii_bus) + return -ENOMEM; + + priv->slave_mii_bus->priv = priv; + priv->slave_mii_bus->name = "sf2 slave mii"; + priv->slave_mii_bus->read = bcm_sf2_sw_mdio_read; + priv->slave_mii_bus->write = bcm_sf2_sw_mdio_write; + snprintf(priv->slave_mii_bus->id, MII_BUS_ID_SIZE, "sf2-%d", + index++); + priv->slave_mii_bus->dev.of_node = dn; + + /* Include the pseudo-PHY address to divert reads towards our + * workaround. This is only required for 7445D0, since 7445E0 + * disconnects the internal switch pseudo-PHY such that we can use the + * regular SWITCH_MDIO master controller instead. * - * By default, DSA initializes ds->phys_mii_mask to - * ds->enabled_port_mask to have a 1:1 mapping between Port address - * and PHY address in order to utilize the slave_mii_bus instance to - * read from Port PHYs. This is not what we want here, so we - * initialize phys_mii_mask 0 to always utilize the "master" MDIO - * bus backed by the "mdio-unimac" driver. + * Here we flag the pseudo PHY as needing special treatment and would + * otherwise make all other PHY read/writes go to the master MDIO bus + * controller that comes with this switch backed by the "mdio-unimac" + * driver. */ if (of_machine_is_compatible("brcm,bcm7445d0")) - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR); else - ds->phys_mii_mask = 0; + priv->indir_phy_mask = 0; - rev = reg_readl(priv, REG_SWITCH_REVISION); - priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & - SWITCH_TOP_REV_MASK; - priv->hw_params.core_rev = (rev & SF2_REV_MASK); + ds->phys_mii_mask = priv->indir_phy_mask; + ds->slave_mii_bus = priv->slave_mii_bus; + priv->slave_mii_bus->parent = ds->dev->parent; + priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - rev = reg_readl(priv, REG_PHY_REVISION); - priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + if (dn) + err = of_mdiobus_register(priv->slave_mii_bus, dn); + else + err = mdiobus_register(priv->slave_mii_bus); - pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", - priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, - priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, - priv->core, priv->irq0, priv->irq1); + if (err) + of_node_put(dn); - return 0; + return err; +} -out_free_irq0: - free_irq(priv->irq0, priv); -out_unmap: - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - if (*base) - iounmap(*base); - base++; - } - return ret; +static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) +{ + mdiobus_unregister(priv->slave_mii_bus); + if (priv->master_mii_dn) + of_node_put(priv->master_mii_dn); } static int bcm_sf2_sw_set_addr(struct dsa_switch *ds, u8 *addr) @@ -1078,68 +1163,6 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) return priv->hw_params.gphy_rev; } -static int bcm_sf2_sw_indir_rw(struct dsa_switch *ds, int op, int addr, - int regnum, u16 val) -{ - struct bcm_sf2_priv *priv = ds_to_priv(ds); - int ret = 0; - u32 reg; - - reg = reg_readl(priv, REG_SWITCH_CNTRL); - reg |= MDIO_MASTER_SEL; - reg_writel(priv, reg, REG_SWITCH_CNTRL); - - /* Page << 8 | offset */ - reg = 0x70; - reg <<= 2; - core_writel(priv, addr, reg); - - /* Page << 8 | offset */ - reg = 0x80 << 8 | regnum << 1; - reg <<= 2; - - if (op) - ret = core_readl(priv, reg); - else - core_writel(priv, val, reg); - - reg = reg_readl(priv, REG_SWITCH_CNTRL); - reg &= ~MDIO_MASTER_SEL; - reg_writel(priv, reg, REG_SWITCH_CNTRL); - - return ret & 0xffff; -} - -static int bcm_sf2_sw_phy_read(struct dsa_switch *ds, int addr, int regnum) -{ - /* Intercept reads from the MDIO broadcast address or Broadcom - * pseudo-PHY address - */ - switch (addr) { - case 0: - case BRCM_PSEUDO_PHY_ADDR: - return bcm_sf2_sw_indir_rw(ds, 1, addr, regnum, 0); - default: - return 0xffff; - } -} - -static int bcm_sf2_sw_phy_write(struct dsa_switch *ds, int addr, int regnum, - u16 val) -{ - /* Intercept writes to the MDIO broadcast address or Broadcom - * pseudo-PHY address - */ - switch (addr) { - case 0: - case BRCM_PSEUDO_PHY_ADDR: - bcm_sf2_sw_indir_rw(ds, 0, addr, regnum, val); - break; - } - - return 0; -} - static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phydev) { @@ -1248,7 +1271,7 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, * state machine and make it go in PHY_FORCING state instead. */ if (!status->link) - netif_carrier_off(ds->ports[port]); + netif_carrier_off(ds->ports[port].netdev); status->duplex = 1; } else { status->link = 1; @@ -1370,14 +1393,310 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, return p->ethtool_ops->set_wol(p, wol); } +static void bcm_sf2_enable_vlan(struct bcm_sf2_priv *priv, bool enable) +{ + u32 mgmt, vc0, vc1, vc4, vc5; + + mgmt = core_readl(priv, CORE_SWMODE); + vc0 = core_readl(priv, CORE_VLAN_CTRL0); + vc1 = core_readl(priv, CORE_VLAN_CTRL1); + vc4 = core_readl(priv, CORE_VLAN_CTRL4); + vc5 = core_readl(priv, CORE_VLAN_CTRL5); + + mgmt &= ~SW_FWDG_MODE; + + if (enable) { + vc0 |= VLAN_EN | VLAN_LEARN_MODE_IVL; + vc1 |= EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP; + vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); + vc4 |= INGR_VID_CHK_DROP; + vc5 |= DROP_VTABLE_MISS | EN_VID_FFF_FWD; + } else { + vc0 &= ~(VLAN_EN | VLAN_LEARN_MODE_IVL); + vc1 &= ~(EN_RSV_MCAST_UNTAG | EN_RSV_MCAST_FWDMAP); + vc4 &= ~(INGR_VID_CHK_MASK << INGR_VID_CHK_SHIFT); + vc5 &= ~(DROP_VTABLE_MISS | EN_VID_FFF_FWD); + vc4 |= INGR_VID_CHK_VID_VIOL_IMP; + } + + core_writel(priv, vc0, CORE_VLAN_CTRL0); + core_writel(priv, vc1, CORE_VLAN_CTRL1); + core_writel(priv, 0, CORE_VLAN_CTRL3); + core_writel(priv, vc4, CORE_VLAN_CTRL4); + core_writel(priv, vc5, CORE_VLAN_CTRL5); + core_writel(priv, mgmt, CORE_SWMODE); +} + +static void bcm_sf2_sw_configure_vlan(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + unsigned int port; + + /* Clear all VLANs */ + bcm_sf2_vlan_op(priv, ARLA_VTBL_CMD_CLEAR); + + for (port = 0; port < priv->hw_params.num_ports; port++) { + if (!((1 << port) & ds->enabled_port_mask)) + continue; + + core_writel(priv, 1, CORE_DEFAULT_1Q_TAG_P(port)); + } +} + +static int bcm_sf2_sw_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + return 0; +} + +static int bcm_sf2_sw_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + bcm_sf2_enable_vlan(priv, true); + + return 0; +} + +static void bcm_sf2_sw_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + s8 cpu_port = ds->dst->cpu_port; + struct bcm_sf2_vlan *vl; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &priv->vlans[vid]; + + bcm_sf2_get_vlan_entry(priv, vid, vl); + + vl->members |= BIT(port) | BIT(cpu_port); + if (untagged) + vl->untag |= BIT(port) | BIT(cpu_port); + else + vl->untag &= ~(BIT(port) | BIT(cpu_port)); + + bcm_sf2_set_vlan_entry(priv, vid, vl); + bcm_sf2_sw_fast_age_vlan(priv, vid); + } + + if (pvid) { + core_writel(priv, vlan->vid_end, CORE_DEFAULT_1Q_TAG_P(port)); + core_writel(priv, vlan->vid_end, + CORE_DEFAULT_1Q_TAG_P(cpu_port)); + bcm_sf2_sw_fast_age_vlan(priv, vid); + } +} + +static int bcm_sf2_sw_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + s8 cpu_port = ds->dst->cpu_port; + struct bcm_sf2_vlan *vl; + u16 vid, pvid; + int ret; + + pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + vl = &priv->vlans[vid]; + + ret = bcm_sf2_get_vlan_entry(priv, vid, vl); + if (ret) + return ret; + + vl->members &= ~BIT(port); + if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) + vl->members = 0; + if (pvid == vid) + pvid = 0; + if (untagged) { + vl->untag &= ~BIT(port); + if ((vl->untag & BIT(port)) == BIT(cpu_port)) + vl->untag = 0; + } + + bcm_sf2_set_vlan_entry(priv, vid, vl); + bcm_sf2_sw_fast_age_vlan(priv, vid); + } + + core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(port)); + core_writel(priv, pvid, CORE_DEFAULT_1Q_TAG_P(cpu_port)); + bcm_sf2_sw_fast_age_vlan(priv, vid); + + return 0; +} + +static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct bcm_sf2_port_status *p = &priv->port_sts[port]; + struct bcm_sf2_vlan *vl; + u16 vid, pvid; + int err = 0; + + pvid = core_readl(priv, CORE_DEFAULT_1Q_TAG_P(port)); + + for (vid = 0; vid < VLAN_N_VID; vid++) { + vl = &priv->vlans[vid]; + + if (!(vl->members & BIT(port))) + continue; + + vlan->vid_begin = vlan->vid_end = vid; + vlan->flags = 0; + + if (vl->untag & BIT(port)) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + if (p->pvid == vid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static int bcm_sf2_sw_setup(struct dsa_switch *ds) +{ + const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; + struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct device_node *dn; + void __iomem **base; + unsigned int port; + unsigned int i; + u32 reg, rev; + int ret; + + spin_lock_init(&priv->indir_lock); + mutex_init(&priv->stats_mutex); + + /* All the interesting properties are at the parent device_node + * level + */ + dn = ds->cd->of_node->parent; + bcm_sf2_identify_ports(priv, ds->cd->of_node); + + priv->irq0 = irq_of_parse_and_map(dn, 0); + priv->irq1 = irq_of_parse_and_map(dn, 1); + + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + *base = of_iomap(dn, i); + if (*base == NULL) { + pr_err("unable to find register: %s\n", reg_names[i]); + ret = -ENOMEM; + goto out_unmap; + } + base++; + } + + ret = bcm_sf2_sw_rst(priv); + if (ret) { + pr_err("unable to software reset switch: %d\n", ret); + goto out_unmap; + } + + ret = bcm_sf2_mdio_register(ds); + if (ret) { + pr_err("failed to register MDIO bus\n"); + goto out_unmap; + } + + /* Disable all interrupts and request them */ + bcm_sf2_intr_disable(priv); + + ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); + if (ret < 0) { + pr_err("failed to request switch_0 IRQ\n"); + goto out_mdio; + } + + ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); + if (ret < 0) { + pr_err("failed to request switch_1 IRQ\n"); + goto out_free_irq0; + } + + /* Reset the MIB counters */ + reg = core_readl(priv, CORE_GMNCFGCFG); + reg |= RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + reg &= ~RST_MIB_CNT; + core_writel(priv, reg, CORE_GMNCFGCFG); + + /* Get the maximum number of ports for this switch */ + priv->hw_params.num_ports = core_readl(priv, CORE_IMP0_PRT_ID) + 1; + if (priv->hw_params.num_ports > DSA_MAX_PORTS) + priv->hw_params.num_ports = DSA_MAX_PORTS; + + /* Assume a single GPHY setup if we can't read that property */ + if (of_property_read_u32(dn, "brcm,num-gphy", + &priv->hw_params.num_gphy)) + priv->hw_params.num_gphy = 1; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->enabled_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port, NULL); + } + + bcm_sf2_sw_configure_vlan(ds); + + rev = reg_readl(priv, REG_SWITCH_REVISION); + priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & + SWITCH_TOP_REV_MASK; + priv->hw_params.core_rev = (rev & SF2_REV_MASK); + + rev = reg_readl(priv, REG_PHY_REVISION); + priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", + priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, + priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, + priv->core, priv->irq0, priv->irq1); + + return 0; + +out_free_irq0: + free_irq(priv->irq0, priv); +out_mdio: + bcm_sf2_mdio_unregister(priv); +out_unmap: + base = &priv->core; + for (i = 0; i < BCM_SF2_REGS_NUM; i++) { + if (*base) + iounmap(*base); + base++; + } + return ret; +} + static struct dsa_switch_driver bcm_sf2_switch_driver = { .tag_protocol = DSA_TAG_PROTO_BRCM, .probe = bcm_sf2_sw_drv_probe, .setup = bcm_sf2_sw_setup, .set_addr = bcm_sf2_sw_set_addr, .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .phy_read = bcm_sf2_sw_phy_read, - .phy_write = bcm_sf2_sw_phy_write, .get_strings = bcm_sf2_sw_get_strings, .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, .get_sset_count = bcm_sf2_sw_get_sset_count, @@ -1398,6 +1717,11 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .port_fdb_add = bcm_sf2_sw_fdb_add, .port_fdb_del = bcm_sf2_sw_fdb_del, .port_fdb_dump = bcm_sf2_sw_fdb_dump, + .port_vlan_filtering = bcm_sf2_sw_vlan_filtering, + .port_vlan_prepare = bcm_sf2_sw_vlan_prepare, + .port_vlan_add = bcm_sf2_sw_vlan_add, + .port_vlan_del = bcm_sf2_sw_vlan_del, + .port_vlan_dump = bcm_sf2_sw_vlan_dump, }; static int __init bcm_sf2_init(void) diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 71b1e5298..dd446e466 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -21,6 +21,7 @@ #include <linux/ethtool.h> #include <linux/types.h> #include <linux/bitops.h> +#include <linux/if_vlan.h> #include <net/dsa.h> @@ -50,6 +51,7 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; u32 vlan_ctl_mask; + u16 pvid; struct net_device *bridge_dev; }; @@ -63,6 +65,11 @@ struct bcm_sf2_arl_entry { u8 is_static:1; }; +struct bcm_sf2_vlan { + u16 members; + u16 untag; +}; + static inline void bcm_sf2_mac_from_u64(u64 src, u8 *dst) { unsigned int i; @@ -142,6 +149,15 @@ struct bcm_sf2_priv { /* Bitmask of ports having an integrated PHY */ unsigned int int_phy_mask; + + /* Master and slave MDIO bus controller */ + unsigned int indir_phy_mask; + struct device_node *master_mii_dn; + struct mii_bus *slave_mii_bus; + struct mii_bus *master_mii_bus; + + /* Cache of programmed VLANs */ + struct bcm_sf2_vlan vlans[VLAN_N_VID]; }; struct bcm_sf2_hw_stats { diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h index 97780d43b..9f2a9cb42 100644 --- a/drivers/net/dsa/bcm_sf2_regs.h +++ b/drivers/net/dsa/bcm_sf2_regs.h @@ -274,6 +274,23 @@ #define CORE_ARLA_SRCH_RSLT_MACVID(x) (CORE_ARLA_SRCH_RSLT_0_MACVID + ((x) * 0x40)) #define CORE_ARLA_SRCH_RSLT(x) (CORE_ARLA_SRCH_RSLT_0 + ((x) * 0x40)) +#define CORE_ARLA_VTBL_RWCTRL 0x1600 +#define ARLA_VTBL_CMD_WRITE 0 +#define ARLA_VTBL_CMD_READ 1 +#define ARLA_VTBL_CMD_CLEAR 2 +#define ARLA_VTBL_STDN (1 << 7) + +#define CORE_ARLA_VTBL_ADDR 0x1604 +#define VTBL_ADDR_INDEX_MASK 0xfff + +#define CORE_ARLA_VTBL_ENTRY 0x160c +#define FWD_MAP_MASK 0x1ff +#define UNTAG_MAP_MASK 0x1ff +#define UNTAG_MAP_SHIFT 9 +#define MSTP_INDEX_MASK 0x7 +#define MSTP_INDEX_SHIFT 18 +#define FWD_MODE (1 << 21) + #define CORE_MEM_PSM_VDD_CTRL 0x2380 #define P_TXQ_PSM_VDD_SHIFT 2 #define P_TXQ_PSM_VDD_MASK 0x3 @@ -287,6 +304,59 @@ #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define PORT_VLAN_CTRL_MASK 0x1ff +#define CORE_VLAN_CTRL0 0xd000 +#define CHANGE_1P_VID_INNER (1 << 0) +#define CHANGE_1P_VID_OUTER (1 << 1) +#define CHANGE_1Q_VID (1 << 3) +#define VLAN_LEARN_MODE_SVL (0 << 5) +#define VLAN_LEARN_MODE_IVL (3 << 5) +#define VLAN_EN (1 << 7) + +#define CORE_VLAN_CTRL1 0xd004 +#define EN_RSV_MCAST_FWDMAP (1 << 2) +#define EN_RSV_MCAST_UNTAG (1 << 3) +#define EN_IPMC_BYPASS_FWDMAP (1 << 5) +#define EN_IPMC_BYPASS_UNTAG (1 << 6) + +#define CORE_VLAN_CTRL2 0xd008 +#define EN_MIIM_BYPASS_V_FWDMAP (1 << 2) +#define EN_GMRP_GVRP_V_FWDMAP (1 << 5) +#define EN_GMRP_GVRP_UNTAG_MAP (1 << 6) + +#define CORE_VLAN_CTRL3 0xd00c +#define EN_DROP_NON1Q_MASK 0x1ff + +#define CORE_VLAN_CTRL4 0xd014 +#define RESV_MCAST_FLOOD (1 << 1) +#define EN_DOUBLE_TAG_MASK 0x3 +#define EN_DOUBLE_TAG_SHIFT 2 +#define EN_MGE_REV_GMRP (1 << 4) +#define EN_MGE_REV_GVRP (1 << 5) +#define INGR_VID_CHK_SHIFT 6 +#define INGR_VID_CHK_MASK 0x3 +#define INGR_VID_CHK_FWD (0 << INGR_VID_CHK_SHIFT) +#define INGR_VID_CHK_DROP (1 << INGR_VID_CHK_SHIFT) +#define INGR_VID_CHK_NO_CHK (2 << INGR_VID_CHK_SHIFT) +#define INGR_VID_CHK_VID_VIOL_IMP (3 << INGR_VID_CHK_SHIFT) + +#define CORE_VLAN_CTRL5 0xd018 +#define EN_CPU_RX_BYP_INNER_CRCCHCK (1 << 0) +#define EN_VID_FFF_FWD (1 << 2) +#define DROP_VTABLE_MISS (1 << 3) +#define EGRESS_DIR_FRM_BYP_TRUNK_EN (1 << 4) +#define PRESV_NON1Q (1 << 6) + +#define CORE_VLAN_CTRL6 0xd01c +#define STRICT_SFD_DETECT (1 << 0) +#define DIS_ARL_BUST_LMIT (1 << 4) + +#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8)) +#define CFI_SHIFT 12 +#define PRI_SHIFT 13 +#define PRI_MASK 0x7 + +#define CORE_JOIN_ALL_VLAN_EN 0xd140 + #define CORE_EEE_EN_CTRL 0x24800 #define CORE_EEE_LPI_INDICATE 0x24810 diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig new file mode 100644 index 000000000..490bc06f9 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -0,0 +1,7 @@ +config NET_DSA_MV88E6XXX + tristate "Marvell 88E6xxx Ethernet switch fabric support" + depends on NET_DSA + select NET_DSA_TAG_EDSA + help + This driver adds support for most of the Marvell 88E6xxx models of + Ethernet switch chips, except 88E6060. diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile new file mode 100644 index 000000000..6e29a75ee --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NET_DSA_MV88E6XXX) += chip.o diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c new file mode 100644 index 000000000..710679067 --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -0,0 +1,4093 @@ +/* + * Marvell 88e6xxx Ethernet switch single-chip support + * + * Copyright (c) 2008 Marvell Semiconductor + * + * Copyright (c) 2015 CMC Electronics, Inc. + * Added support for VLAN Table Unit operations + * + * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_bridge.h> +#include <linux/jiffies.h> +#include <linux/list.h> +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/netdevice.h> +#include <linux/gpio/consumer.h> +#include <linux/phy.h> +#include <net/dsa.h> +#include <net/switchdev.h> +#include "mv88e6xxx.h" + +static void assert_reg_lock(struct mv88e6xxx_chip *chip) +{ + if (unlikely(!mutex_is_locked(&chip->reg_lock))) { + dev_err(chip->dev, "Switch registers lock not held!\n"); + dump_stack(); + } +} + +/* The switch ADDR[4:1] configuration pins define the chip SMI device address + * (ADDR[0] is always zero, thus only even SMI addresses can be strapped). + * + * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it + * is the only device connected to the SMI master. In this mode it responds to + * all 32 possible SMI addresses, and thus maps directly the internal devices. + * + * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing + * multiple devices to share the SMI interface. In this mode it responds to only + * 2 registers, used to indirectly access the internal SMI devices. + */ + +static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + if (!chip->smi_ops) + return -EOPNOTSUPP; + + return chip->smi_ops->read(chip, addr, reg, val); +} + +static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + if (!chip->smi_ops) + return -EOPNOTSUPP; + + return chip->smi_ops->write(chip, addr, reg, val); +} + +static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + int ret; + + ret = mdiobus_read_nested(chip->bus, addr, reg); + if (ret < 0) + return ret; + + *val = ret & 0xffff; + + return 0; +} + +static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + int ret; + + ret = mdiobus_write_nested(chip->bus, addr, reg, val); + if (ret < 0) + return ret; + + return 0; +} + +static const struct mv88e6xxx_ops mv88e6xxx_smi_single_chip_ops = { + .read = mv88e6xxx_smi_single_chip_read, + .write = mv88e6xxx_smi_single_chip_write, +}; + +static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip) +{ + int ret; + int i; + + for (i = 0; i < 16; i++) { + ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD); + if (ret < 0) + return ret; + + if ((ret & SMI_CMD_BUSY) == 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + int ret; + + /* Wait for the bus to become free. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + /* Transmit the read command. */ + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, + SMI_CMD_OP_22_READ | (addr << 5) | reg); + if (ret < 0) + return ret; + + /* Wait for the read command to complete. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + /* Read the data. */ + ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA); + if (ret < 0) + return ret; + + *val = ret & 0xffff; + + return 0; +} + +static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + int ret; + + /* Wait for the bus to become free. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + /* Transmit the data to write. */ + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val); + if (ret < 0) + return ret; + + /* Transmit the write command. */ + ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD, + SMI_CMD_OP_22_WRITE | (addr << 5) | reg); + if (ret < 0) + return ret; + + /* Wait for the write command to complete. */ + ret = mv88e6xxx_smi_multi_chip_wait(chip); + if (ret < 0) + return ret; + + return 0; +} + +static const struct mv88e6xxx_ops mv88e6xxx_smi_multi_chip_ops = { + .read = mv88e6xxx_smi_multi_chip_read, + .write = mv88e6xxx_smi_multi_chip_write, +}; + +static int mv88e6xxx_read(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 *val) +{ + int err; + + assert_reg_lock(chip); + + err = mv88e6xxx_smi_read(chip, addr, reg, val); + if (err) + return err; + + dev_dbg(chip->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + addr, reg, *val); + + return 0; +} + +static int mv88e6xxx_write(struct mv88e6xxx_chip *chip, + int addr, int reg, u16 val) +{ + int err; + + assert_reg_lock(chip); + + err = mv88e6xxx_smi_write(chip, addr, reg, val); + if (err) + return err; + + dev_dbg(chip->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n", + addr, reg, val); + + return 0; +} + +/* Indirect write to single pointer-data register with an Update bit */ +static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, + u16 update) +{ + u16 val; + int i, err; + + /* Wait until the previous operation is completed */ + for (i = 0; i < 16; ++i) { + err = mv88e6xxx_read(chip, addr, reg, &val); + if (err) + return err; + + if (!(val & BIT(15))) + break; + } + + if (i == 16) + return -ETIMEDOUT; + + /* Set the Update bit to trigger a write operation */ + val = BIT(15) | update; + + return mv88e6xxx_write(chip, addr, reg, val); +} + +static int _mv88e6xxx_reg_read(struct mv88e6xxx_chip *chip, int addr, int reg) +{ + u16 val; + int err; + + err = mv88e6xxx_read(chip, addr, reg, &val); + if (err) + return err; + + return val; +} + +static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, + int reg, u16 val) +{ + return mv88e6xxx_write(chip, addr, reg, val); +} + +static int mv88e6xxx_mdio_read_direct(struct mv88e6xxx_chip *chip, + int addr, int regnum) +{ + if (addr >= 0) + return _mv88e6xxx_reg_read(chip, addr, regnum); + return 0xffff; +} + +static int mv88e6xxx_mdio_write_direct(struct mv88e6xxx_chip *chip, + int addr, int regnum, u16 val) +{ + if (addr >= 0) + return _mv88e6xxx_reg_write(chip, addr, regnum, val); + return 0; +} + +static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) +{ + int ret; + unsigned long timeout; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, + ret & ~GLOBAL_CONTROL_PPU_ENABLE); + if (ret) + return ret; + + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + + usleep_range(1000, 2000); + if ((ret & GLOBAL_STATUS_PPU_MASK) != + GLOBAL_STATUS_PPU_POLLING) + return 0; + } + + return -ETIMEDOUT; +} + +static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) +{ + int ret, err; + unsigned long timeout; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); + if (ret < 0) + return ret; + + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, + ret | GLOBAL_CONTROL_PPU_ENABLE); + if (err) + return err; + + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); + if (ret < 0) + return ret; + + usleep_range(1000, 2000); + if ((ret & GLOBAL_STATUS_PPU_MASK) == + GLOBAL_STATUS_PPU_POLLING) + return 0; + } + + return -ETIMEDOUT; +} + +static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly) +{ + struct mv88e6xxx_chip *chip; + + chip = container_of(ugly, struct mv88e6xxx_chip, ppu_work); + + mutex_lock(&chip->reg_lock); + + if (mutex_trylock(&chip->ppu_mutex)) { + if (mv88e6xxx_ppu_enable(chip) == 0) + chip->ppu_disabled = 0; + mutex_unlock(&chip->ppu_mutex); + } + + mutex_unlock(&chip->reg_lock); +} + +static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps) +{ + struct mv88e6xxx_chip *chip = (void *)_ps; + + schedule_work(&chip->ppu_work); +} + +static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_chip *chip) +{ + int ret; + + mutex_lock(&chip->ppu_mutex); + + /* If the PHY polling unit is enabled, disable it so that + * we can access the PHY registers. If it was already + * disabled, cancel the timer that is going to re-enable + * it. + */ + if (!chip->ppu_disabled) { + ret = mv88e6xxx_ppu_disable(chip); + if (ret < 0) { + mutex_unlock(&chip->ppu_mutex); + return ret; + } + chip->ppu_disabled = 1; + } else { + del_timer(&chip->ppu_timer); + ret = 0; + } + + return ret; +} + +static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_chip *chip) +{ + /* Schedule a timer to re-enable the PHY polling unit. */ + mod_timer(&chip->ppu_timer, jiffies + msecs_to_jiffies(10)); + mutex_unlock(&chip->ppu_mutex); +} + +static void mv88e6xxx_ppu_state_init(struct mv88e6xxx_chip *chip) +{ + mutex_init(&chip->ppu_mutex); + INIT_WORK(&chip->ppu_work, mv88e6xxx_ppu_reenable_work); + init_timer(&chip->ppu_timer); + chip->ppu_timer.data = (unsigned long)chip; + chip->ppu_timer.function = mv88e6xxx_ppu_reenable_timer; +} + +static int mv88e6xxx_mdio_read_ppu(struct mv88e6xxx_chip *chip, int addr, + int regnum) +{ + int ret; + + ret = mv88e6xxx_ppu_access_get(chip); + if (ret >= 0) { + ret = _mv88e6xxx_reg_read(chip, addr, regnum); + mv88e6xxx_ppu_access_put(chip); + } + + return ret; +} + +static int mv88e6xxx_mdio_write_ppu(struct mv88e6xxx_chip *chip, int addr, + int regnum, u16 val) +{ + int ret; + + ret = mv88e6xxx_ppu_access_get(chip); + if (ret >= 0) { + ret = _mv88e6xxx_reg_write(chip, addr, regnum, val); + mv88e6xxx_ppu_access_put(chip); + } + + return ret; +} + +static bool mv88e6xxx_6065_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6065; +} + +static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6095; +} + +static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6097; +} + +static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6165; +} + +static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6185; +} + +static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6320; +} + +static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6351; +} + +static bool mv88e6xxx_6352_family(struct mv88e6xxx_chip *chip) +{ + return chip->info->family == MV88E6XXX_FAMILY_6352; +} + +static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_chip *chip) +{ + return chip->info->num_databases; +} + +static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_chip *chip) +{ + /* Does the device have dedicated FID registers for ATU and VTU ops? */ + if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) + return true; + + return false; +} + +/* We expect the switch to perform auto negotiation if there is a real + * phy. However, in the case of a fixed link phy, we force the port + * settings from the fixed link settings. + */ +static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u32 reg; + int ret; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + + mutex_lock(&chip->reg_lock); + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL); + if (ret < 0) + goto out; + + reg = ret & ~(PORT_PCS_CTRL_LINK_UP | + PORT_PCS_CTRL_FORCE_LINK | + PORT_PCS_CTRL_DUPLEX_FULL | + PORT_PCS_CTRL_FORCE_DUPLEX | + PORT_PCS_CTRL_UNFORCED); + + reg |= PORT_PCS_CTRL_FORCE_LINK; + if (phydev->link) + reg |= PORT_PCS_CTRL_LINK_UP; + + if (mv88e6xxx_6065_family(chip) && phydev->speed > SPEED_100) + goto out; + + switch (phydev->speed) { + case SPEED_1000: + reg |= PORT_PCS_CTRL_1000; + break; + case SPEED_100: + reg |= PORT_PCS_CTRL_100; + break; + case SPEED_10: + reg |= PORT_PCS_CTRL_10; + break; + default: + pr_info("Unknown speed"); + goto out; + } + + reg |= PORT_PCS_CTRL_FORCE_DUPLEX; + if (phydev->duplex == DUPLEX_FULL) + reg |= PORT_PCS_CTRL_DUPLEX_FULL; + + if ((mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip)) && + (port >= chip->info->num_ports - 2)) { + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK; + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK | + PORT_PCS_CTRL_RGMII_DELAY_TXCLK); + } + _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_PCS_CTRL, reg); + +out: + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_stats_wait(struct mv88e6xxx_chip *chip) +{ + int ret; + int i; + + for (i = 0; i < 10; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_OP); + if ((ret & GLOBAL_STATS_OP_BUSY) == 0) + return 0; + } + + return -ETIMEDOUT; +} + +static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_chip *chip, int port) +{ + int ret; + + if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) + port = (port + 1) << 5; + + /* Snapshot the hardware statistics counters for this port. */ + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_CAPTURE_PORT | + GLOBAL_STATS_OP_HIST_RX_TX | port); + if (ret < 0) + return ret; + + /* Wait for the snapshotting to complete. */ + ret = _mv88e6xxx_stats_wait(chip); + if (ret < 0) + return ret; + + return 0; +} + +static void _mv88e6xxx_stats_read(struct mv88e6xxx_chip *chip, + int stat, u32 *val) +{ + u32 _val; + int ret; + + *val = 0; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_READ_CAPTURED | + GLOBAL_STATS_OP_HIST_RX_TX | stat); + if (ret < 0) + return; + + ret = _mv88e6xxx_stats_wait(chip); + if (ret < 0) + return; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_32); + if (ret < 0) + return; + + _val = ret << 16; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATS_COUNTER_01); + if (ret < 0) + return; + + *val = _val | ret; +} + +static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = { + { "in_good_octets", 8, 0x00, BANK0, }, + { "in_bad_octets", 4, 0x02, BANK0, }, + { "in_unicast", 4, 0x04, BANK0, }, + { "in_broadcasts", 4, 0x06, BANK0, }, + { "in_multicasts", 4, 0x07, BANK0, }, + { "in_pause", 4, 0x16, BANK0, }, + { "in_undersize", 4, 0x18, BANK0, }, + { "in_fragments", 4, 0x19, BANK0, }, + { "in_oversize", 4, 0x1a, BANK0, }, + { "in_jabber", 4, 0x1b, BANK0, }, + { "in_rx_error", 4, 0x1c, BANK0, }, + { "in_fcs_error", 4, 0x1d, BANK0, }, + { "out_octets", 8, 0x0e, BANK0, }, + { "out_unicast", 4, 0x10, BANK0, }, + { "out_broadcasts", 4, 0x13, BANK0, }, + { "out_multicasts", 4, 0x12, BANK0, }, + { "out_pause", 4, 0x15, BANK0, }, + { "excessive", 4, 0x11, BANK0, }, + { "collisions", 4, 0x1e, BANK0, }, + { "deferred", 4, 0x05, BANK0, }, + { "single", 4, 0x14, BANK0, }, + { "multiple", 4, 0x17, BANK0, }, + { "out_fcs_error", 4, 0x03, BANK0, }, + { "late", 4, 0x1f, BANK0, }, + { "hist_64bytes", 4, 0x08, BANK0, }, + { "hist_65_127bytes", 4, 0x09, BANK0, }, + { "hist_128_255bytes", 4, 0x0a, BANK0, }, + { "hist_256_511bytes", 4, 0x0b, BANK0, }, + { "hist_512_1023bytes", 4, 0x0c, BANK0, }, + { "hist_1024_max_bytes", 4, 0x0d, BANK0, }, + { "sw_in_discards", 4, 0x10, PORT, }, + { "sw_in_filtered", 2, 0x12, PORT, }, + { "sw_out_filtered", 2, 0x13, PORT, }, + { "in_discards", 4, 0x00 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_filtered", 4, 0x01 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_accepted", 4, 0x02 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_accepted", 4, 0x03 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_good_avb_class_a", 4, 0x04 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_good_avb_class_b", 4, 0x05 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_avb_class_a", 4, 0x06 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_bad_avb_class_b", 4, 0x07 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_0", 4, 0x08 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_1", 4, 0x09 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_2", 4, 0x0a | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "tcam_counter_3", 4, 0x0b | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_da_unknown", 4, 0x0e | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "in_management", 4, 0x0f | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_0", 4, 0x10 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_1", 4, 0x11 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_2", 4, 0x12 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_3", 4, 0x13 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_4", 4, 0x14 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_5", 4, 0x15 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_6", 4, 0x16 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_queue_7", 4, 0x17 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_cut_through", 4, 0x18 | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_octets_a", 4, 0x1a | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_octets_b", 4, 0x1b | GLOBAL_STATS_OP_BANK_1, BANK1, }, + { "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, }, +}; + +static bool mv88e6xxx_has_stat(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_hw_stat *stat) +{ + switch (stat->type) { + case BANK0: + return true; + case BANK1: + return mv88e6xxx_6320_family(chip); + case PORT: + return mv88e6xxx_6095_family(chip) || + mv88e6xxx_6185_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6352_family(chip); + } + return false; +} + +static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_hw_stat *s, + int port) +{ + u32 low; + u32 high = 0; + int ret; + u64 value; + + switch (s->type) { + case PORT: + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), s->reg); + if (ret < 0) + return UINT64_MAX; + + low = ret; + if (s->sizeof_stat == 4) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), + s->reg + 1); + if (ret < 0) + return UINT64_MAX; + high = ret; + } + break; + case BANK0: + case BANK1: + _mv88e6xxx_stats_read(chip, s->reg, &low); + if (s->sizeof_stat == 8) + _mv88e6xxx_stats_read(chip, s->reg + 1, &high); + } + value = (((u64)high) << 16) | low; + return value; +} + +static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, + uint8_t *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int i, j; + + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(chip, stat)) { + memcpy(data + j * ETH_GSTRING_LEN, stat->string, + ETH_GSTRING_LEN); + j++; + } + } +} + +static int mv88e6xxx_get_sset_count(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int i, j; + + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(chip, stat)) + j++; + } + return j; +} + +static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_hw_stat *stat; + int ret; + int i, j; + + mutex_lock(&chip->reg_lock); + + ret = _mv88e6xxx_stats_snapshot(chip, port); + if (ret < 0) { + mutex_unlock(&chip->reg_lock); + return; + } + for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) { + stat = &mv88e6xxx_hw_stats[i]; + if (mv88e6xxx_has_stat(chip, stat)) { + data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port); + j++; + } + } + + mutex_unlock(&chip->reg_lock); +} + +static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port) +{ + return 32 * sizeof(u16); +} + +static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, + struct ethtool_regs *regs, void *_p) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 *p = _p; + int i; + + regs->version = 0; + + memset(p, 0xff, 32 * sizeof(u16)); + + mutex_lock(&chip->reg_lock); + + for (i = 0; i < 32; i++) { + int ret; + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), i); + if (ret >= 0) + p[i] = ret; + } + + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int reg, int offset, + u16 mask) +{ + unsigned long timeout = jiffies + HZ / 10; + + while (time_before(jiffies, timeout)) { + int ret; + + ret = _mv88e6xxx_reg_read(chip, reg, offset); + if (ret < 0) + return ret; + if (!(ret & mask)) + return 0; + + usleep_range(1000, 2000); + } + return -ETIMEDOUT; +} + +static int mv88e6xxx_mdio_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, + GLOBAL2_SMI_OP_BUSY); +} + +static int _mv88e6xxx_atu_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_ATU_OP, + GLOBAL_ATU_OP_BUSY); +} + +static int mv88e6xxx_mdio_read_indirect(struct mv88e6xxx_chip *chip, + int addr, int regnum) +{ + int ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, + GLOBAL2_SMI_OP_22_READ | (addr << 5) | + regnum); + if (ret < 0) + return ret; + + ret = mv88e6xxx_mdio_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA); + + return ret; +} + +static int mv88e6xxx_mdio_write_indirect(struct mv88e6xxx_chip *chip, + int addr, int regnum, u16 val) +{ + int ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_DATA, val); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL2, GLOBAL2_SMI_OP, + GLOBAL2_SMI_OP_22_WRITE | (addr << 5) | + regnum); + + return mv88e6xxx_mdio_wait(chip); +} + +static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int reg; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + reg = mv88e6xxx_mdio_read_indirect(chip, port, 16); + if (reg < 0) + goto out; + + e->eee_enabled = !!(reg & 0x0200); + e->tx_lpi_enabled = !!(reg & 0x0100); + + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); + if (reg < 0) + goto out; + + e->eee_active = !!(reg & PORT_STATUS_EEE); + reg = 0; + +out: + mutex_unlock(&chip->reg_lock); + return reg; +} + +static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, + struct phy_device *phydev, struct ethtool_eee *e) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int reg; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + ret = mv88e6xxx_mdio_read_indirect(chip, port, 16); + if (ret < 0) + goto out; + + reg = ret & ~0x0300; + if (e->eee_enabled) + reg |= 0x0200; + if (e->tx_lpi_enabled) + reg |= 0x0100; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, 16, reg); +out: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_chip *chip, u16 fid, u16 cmd) +{ + int ret; + + if (mv88e6xxx_has_fid_reg(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_FID, + fid); + if (ret < 0) + return ret; + } else if (mv88e6xxx_num_databases(chip) == 256) { + /* ATU DBNum[7:4] are located in ATU Control 15:12 */ + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, + (ret & 0xfff) | + ((fid << 8) & 0xf000)); + if (ret < 0) + return ret; + + /* ATU DBNum[3:0] are located in ATU Operation 3:0 */ + cmd |= fid & 0xf; + } + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_OP, cmd); + if (ret < 0) + return ret; + + return _mv88e6xxx_atu_wait(chip); +} + +static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK; + + if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (entry->trunk) { + data |= GLOBAL_ATU_DATA_TRUNK; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + data |= (entry->portv_trunkid << shift) & mask; + } + + return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_ATU_DATA, data); +} + +static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry, + bool static_too) +{ + int op; + int err; + + err = _mv88e6xxx_atu_wait(chip); + if (err) + return err; + + err = _mv88e6xxx_atu_data_write(chip, entry); + if (err) + return err; + + if (entry->fid) { + op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB : + GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; + } else { + op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL : + GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; + } + + return _mv88e6xxx_atu_cmd(chip, entry->fid, op); +} + +static int _mv88e6xxx_atu_flush(struct mv88e6xxx_chip *chip, + u16 fid, bool static_too) +{ + struct mv88e6xxx_atu_entry entry = { + .fid = fid, + .state = 0, /* EntryState bits must be 0 */ + }; + + return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); +} + +static int _mv88e6xxx_atu_move(struct mv88e6xxx_chip *chip, u16 fid, + int from_port, int to_port, bool static_too) +{ + struct mv88e6xxx_atu_entry entry = { + .trunk = false, + .fid = fid, + }; + + /* EntryState bits must be 0xF */ + entry.state = GLOBAL_ATU_DATA_STATE_MASK; + + /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */ + entry.portv_trunkid = (to_port & 0x0f) << 4; + entry.portv_trunkid |= from_port & 0x0f; + + return _mv88e6xxx_atu_flush_move(chip, &entry, static_too); +} + +static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid, + int port, bool static_too) +{ + /* Destination port 0xF means remove the entries */ + return _mv88e6xxx_atu_move(chip, fid, port, 0x0f, static_too); +} + +static const char * const mv88e6xxx_port_state_names[] = { + [PORT_CONTROL_STATE_DISABLED] = "Disabled", + [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening", + [PORT_CONTROL_STATE_LEARNING] = "Learning", + [PORT_CONTROL_STATE_FORWARDING] = "Forwarding", +}; + +static int _mv88e6xxx_port_state(struct mv88e6xxx_chip *chip, int port, + u8 state) +{ + struct dsa_switch *ds = chip->ds; + int reg, ret = 0; + u8 oldstate; + + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL); + if (reg < 0) + return reg; + + oldstate = reg & PORT_CONTROL_STATE_MASK; + + if (oldstate != state) { + /* Flush forwarding database if we're moving a port + * from Learning or Forwarding state to Disabled or + * Blocking or Listening state. + */ + if ((oldstate == PORT_CONTROL_STATE_LEARNING || + oldstate == PORT_CONTROL_STATE_FORWARDING) && + (state == PORT_CONTROL_STATE_DISABLED || + state == PORT_CONTROL_STATE_BLOCKING)) { + ret = _mv88e6xxx_atu_remove(chip, 0, port, false); + if (ret) + return ret; + } + + reg = (reg & ~PORT_CONTROL_STATE_MASK) | state; + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL, + reg); + if (ret) + return ret; + + netdev_dbg(ds->ports[port].netdev, "PortState %s (was %s)\n", + mv88e6xxx_port_state_names[state], + mv88e6xxx_port_state_names[oldstate]); + } + + return ret; +} + +static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port) +{ + struct net_device *bridge = chip->ports[port].bridge_dev; + const u16 mask = (1 << chip->info->num_ports) - 1; + struct dsa_switch *ds = chip->ds; + u16 output_ports = 0; + int reg; + int i; + + /* allow CPU port or DSA link(s) to send frames to every port */ + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + output_ports = mask; + } else { + for (i = 0; i < chip->info->num_ports; ++i) { + /* allow sending frames to every group member */ + if (bridge && chip->ports[i].bridge_dev == bridge) + output_ports |= BIT(i); + + /* allow sending frames to CPU port and DSA link(s) */ + if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) + output_ports |= BIT(i); + } + } + + /* prevent frames from going back out of the port they came in on */ + output_ports &= ~BIT(port); + + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN); + if (reg < 0) + return reg; + + reg &= ~mask; + reg |= output_ports & mask; + + return _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, reg); +} + +static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int stp_state; + int err; + + switch (state) { + case BR_STATE_DISABLED: + stp_state = PORT_CONTROL_STATE_DISABLED; + break; + case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: + stp_state = PORT_CONTROL_STATE_BLOCKING; + break; + case BR_STATE_LEARNING: + stp_state = PORT_CONTROL_STATE_LEARNING; + break; + case BR_STATE_FORWARDING: + default: + stp_state = PORT_CONTROL_STATE_FORWARDING; + break; + } + + mutex_lock(&chip->reg_lock); + err = _mv88e6xxx_port_state(chip, port, stp_state); + mutex_unlock(&chip->reg_lock); + + if (err) + netdev_err(ds->ports[port].netdev, + "failed to update state to %s\n", + mv88e6xxx_port_state_names[stp_state]); +} + +static int _mv88e6xxx_port_pvid(struct mv88e6xxx_chip *chip, int port, + u16 *new, u16 *old) +{ + struct dsa_switch *ds = chip->ds; + u16 pvid; + int ret; + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_DEFAULT_VLAN); + if (ret < 0) + return ret; + + pvid = ret & PORT_DEFAULT_VLAN_MASK; + + if (new) { + ret &= ~PORT_DEFAULT_VLAN_MASK; + ret |= *new & PORT_DEFAULT_VLAN_MASK; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_DEFAULT_VLAN, ret); + if (ret < 0) + return ret; + + netdev_dbg(ds->ports[port].netdev, + "DefaultVID %d (was %d)\n", *new, pvid); + } + + if (old) + *old = pvid; + + return 0; +} + +static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_chip *chip, + int port, u16 *pvid) +{ + return _mv88e6xxx_port_pvid(chip, port, NULL, pvid); +} + +static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_chip *chip, + int port, u16 pvid) +{ + return _mv88e6xxx_port_pvid(chip, port, &pvid, NULL); +} + +static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL, GLOBAL_VTU_OP, + GLOBAL_VTU_OP_BUSY); +} + +static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_chip *chip, u16 op) +{ + int ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_OP, op); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_wait(chip); +} + +static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_chip *chip) +{ + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_FLUSH_ALL); +} + +static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + u16 regs[3]; + int i; + int ret; + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i); + if (ret < 0) + return ret; + + regs[i] = ret; + } + + for (i = 0; i < chip->info->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u16 reg = regs[i / 4]; + + entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK; + } + + return 0; +} + +static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_read(chip, entry, 0); +} + +static int mv88e6xxx_stu_data_read(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_read(chip, entry, 2); +} + +static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry, + unsigned int nibble_offset) +{ + u16 regs[3] = { 0 }; + int i; + int ret; + + for (i = 0; i < chip->info->num_ports; ++i) { + unsigned int shift = (i % 4) * 4 + nibble_offset; + u8 data = entry->data[i]; + + regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift; + } + + for (i = 0; i < 3; ++i) { + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, + GLOBAL_VTU_DATA_0_3 + i, regs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_write(chip, entry, 0); +} + +static int mv88e6xxx_stu_data_write(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + return _mv88e6xxx_vtu_stu_data_write(chip, entry, 2); +} + +static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_chip *chip, u16 vid) +{ + return _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, + vid & GLOBAL_VTU_VID_MASK); +} + +static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_vtu_stu_entry next = { 0 }; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_VTU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.vid = ret & GLOBAL_VTU_VID_MASK; + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = mv88e6xxx_vtu_data_read(chip, &next); + if (ret < 0) + return ret; + + if (mv88e6xxx_has_fid_reg(chip)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_FID); + if (ret < 0) + return ret; + + next.fid = ret & GLOBAL_VTU_FID_MASK; + } else if (mv88e6xxx_num_databases(chip) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_OP); + if (ret < 0) + return ret; + + next.fid = (ret & 0xf00) >> 4; + next.fid |= ret & 0xf; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + } + } + + *entry = next; + return 0; +} + +static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry next; + u16 pvid; + int err; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + err = _mv88e6xxx_port_pvid_get(chip, port, &pvid); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &next); + if (err) + break; + + if (!next.valid) + break; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + continue; + + /* reinit and dump this VLAN obj */ + vlan->vid_begin = next.vid; + vlan->vid_end = next.vid; + vlan->flags = 0; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + + if (next.vid == pvid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } while (next.vid < GLOBAL_VTU_VID_MASK); + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + if (!entry->valid) + goto loadpurge; + + /* Write port member tags */ + ret = mv88e6xxx_vtu_data_write(chip, entry); + if (ret < 0) + return ret; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_STU)) { + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, + reg); + if (ret < 0) + return ret; + } + + if (mv88e6xxx_has_fid_reg(chip)) { + reg = entry->fid & GLOBAL_VTU_FID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_FID, + reg); + if (ret < 0) + return ret; + } else if (mv88e6xxx_num_databases(chip) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + op |= (entry->fid & 0xf0) << 8; + op |= entry->fid & 0xf; + } + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + reg |= entry->vid & GLOBAL_VTU_VID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(chip, op); +} + +static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_chip *chip, u8 sid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct mv88e6xxx_vtu_stu_entry next = { 0 }; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, + sid & GLOBAL_VTU_SID_MASK); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_GET_NEXT); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_SID); + if (ret < 0) + return ret; + + next.sid = ret & GLOBAL_VTU_SID_MASK; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_VTU_VID); + if (ret < 0) + return ret; + + next.valid = !!(ret & GLOBAL_VTU_VID_VALID); + + if (next.valid) { + ret = mv88e6xxx_stu_data_read(chip, &next); + if (ret < 0) + return ret; + } + + *entry = next; + return 0; +} + +static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + u16 reg = 0; + int ret; + + ret = _mv88e6xxx_vtu_wait(chip); + if (ret < 0) + return ret; + + if (!entry->valid) + goto loadpurge; + + /* Write port states */ + ret = mv88e6xxx_stu_data_write(chip, entry); + if (ret < 0) + return ret; + + reg = GLOBAL_VTU_VID_VALID; +loadpurge: + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_VID, reg); + if (ret < 0) + return ret; + + reg = entry->sid & GLOBAL_VTU_SID_MASK; + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_VTU_SID, reg); + if (ret < 0) + return ret; + + return _mv88e6xxx_vtu_cmd(chip, GLOBAL_VTU_OP_STU_LOAD_PURGE); +} + +static int _mv88e6xxx_port_fid(struct mv88e6xxx_chip *chip, int port, + u16 *new, u16 *old) +{ + struct dsa_switch *ds = chip->ds; + u16 upper_mask; + u16 fid; + int ret; + + if (mv88e6xxx_num_databases(chip) == 4096) + upper_mask = 0xff; + else if (mv88e6xxx_num_databases(chip) == 256) + upper_mask = 0xf; + else + return -EOPNOTSUPP; + + /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_BASE_VLAN); + if (ret < 0) + return ret; + + fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12; + + if (new) { + ret &= ~PORT_BASE_VLAN_FID_3_0_MASK; + ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_BASE_VLAN, + ret); + if (ret < 0) + return ret; + } + + /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */ + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_1); + if (ret < 0) + return ret; + + fid |= (ret & upper_mask) << 4; + + if (new) { + ret &= ~upper_mask; + ret |= (*new >> 4) & upper_mask; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1, + ret); + if (ret < 0) + return ret; + + netdev_dbg(ds->ports[port].netdev, + "FID %d (was %d)\n", *new, fid); + } + + if (old) + *old = fid; + + return 0; +} + +static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_chip *chip, + int port, u16 *fid) +{ + return _mv88e6xxx_port_fid(chip, port, NULL, fid); +} + +static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_chip *chip, + int port, u16 fid) +{ + return _mv88e6xxx_port_fid(chip, port, &fid, NULL); +} + +static int _mv88e6xxx_fid_new(struct mv88e6xxx_chip *chip, u16 *fid) +{ + DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); + + /* Set every FID bit used by the (un)bridged ports */ + for (i = 0; i < chip->info->num_ports; ++i) { + err = _mv88e6xxx_port_fid_get(chip, i, fid); + if (err) + return err; + + set_bit(*fid, fid_bitmap); + } + + /* Set every FID bit used by the VLAN entries */ + err = _mv88e6xxx_vtu_vid_write(chip, GLOBAL_VTU_VID_MASK); + if (err) + return err; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + return err; + + if (!vlan.valid) + break; + + set_bit(vlan.fid, fid_bitmap); + } while (vlan.vid < GLOBAL_VTU_VID_MASK); + + /* The reset value 0x000 is used to indicate that multiple address + * databases are not needed. Return the next positive available. + */ + *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); + if (unlikely(*fid >= mv88e6xxx_num_databases(chip))) + return -ENOSPC; + + /* Clear the database */ + return _mv88e6xxx_atu_flush(chip, *fid, true); +} + +static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) +{ + struct dsa_switch *ds = chip->ds; + struct mv88e6xxx_vtu_stu_entry vlan = { + .valid = true, + .vid = vid, + }; + int i, err; + + err = _mv88e6xxx_fid_new(chip, &vlan.fid); + if (err) + return err; + + /* exclude all ports except the CPU and DSA ports */ + for (i = 0; i < chip->info->num_ports; ++i) + vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i) + ? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED + : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) || + mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) { + struct mv88e6xxx_vtu_stu_entry vstp; + + /* Adding a VTU entry requires a valid STU entry. As VSTP is not + * implemented, only one STU entry is needed to cover all VTU + * entries. Thus, validate the SID 0. + */ + vlan.sid = 0; + err = _mv88e6xxx_stu_getnext(chip, GLOBAL_VTU_SID_MASK, &vstp); + if (err) + return err; + + if (vstp.sid != vlan.sid || !vstp.valid) { + memset(&vstp, 0, sizeof(vstp)); + vstp.valid = true; + vstp.sid = vlan.sid; + + err = _mv88e6xxx_stu_loadpurge(chip, &vstp); + if (err) + return err; + } + } + + *entry = vlan; + return 0; +} + +static int _mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry, bool creat) +{ + int err; + + if (!vid) + return -EINVAL; + + err = _mv88e6xxx_vtu_vid_write(chip, vid - 1); + if (err) + return err; + + err = _mv88e6xxx_vtu_getnext(chip, entry); + if (err) + return err; + + if (entry->vid != vid || !entry->valid) { + if (!creat) + return -EOPNOTSUPP; + /* -ENOENT would've been more appropriate, but switchdev expects + * -EOPNOTSUPP to inform bridge about an eventual software VLAN. + */ + + err = _mv88e6xxx_vtu_new(chip, vid, entry); + } + + return err; +} + +static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, + u16 vid_begin, u16 vid_end) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + if (!vid_begin) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + err = _mv88e6xxx_vtu_vid_write(chip, vid_begin - 1); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + goto unlock; + + if (!vlan.valid) + break; + + if (vlan.vid > vid_end) + break; + + for (i = 0; i < chip->info->num_ports; ++i) { + if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) + continue; + + if (vlan.data[i] == + GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + continue; + + if (chip->ports[i].bridge_dev == + chip->ports[port].bridge_dev) + break; /* same bridge, check next VLAN */ + + netdev_warn(ds->ports[port].netdev, + "hardware VLAN %d already used by %s\n", + vlan.vid, + netdev_name(chip->ports[i].bridge_dev)); + err = -EOPNOTSUPP; + goto unlock; + } + } while (vlan.vid < vid_end); + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static const char * const mv88e6xxx_port_8021q_mode_names[] = { + [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled", + [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback", + [PORT_CONTROL_2_8021Q_CHECK] = "Check", + [PORT_CONTROL_2_8021Q_SECURE] = "Secure", +}; + +static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE : + PORT_CONTROL_2_8021Q_DISABLED; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_CONTROL_2); + if (ret < 0) + goto unlock; + + old = ret & PORT_CONTROL_2_8021Q_MASK; + + if (new != old) { + ret &= ~PORT_CONTROL_2_8021Q_MASK; + ret |= new & PORT_CONTROL_2_8021Q_MASK; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_2, + ret); + if (ret < 0) + goto unlock; + + netdev_dbg(ds->ports[port].netdev, "802.1Q Mode %s (was %s)\n", + mv88e6xxx_port_8021q_mode_names[new], + mv88e6xxx_port_8021q_mode_names[old]); + } + + ret = 0; +unlock: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int +mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + /* If the requested port doesn't belong to the same bridge as the VLAN + * members, do not support it (yet) and fallback to software VLAN. + */ + err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin, + vlan->vid_end); + if (err) + return err; + + /* We don't need any dynamic resource from the kernel (yet), + * so skip the prepare phase. + */ + return 0; +} + +static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_chip *chip, int port, + u16 vid, bool untagged) +{ + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, true); + if (err) + return err; + + vlan.data[port] = untagged ? + GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : + GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; + + return _mv88e6xxx_vtu_loadpurge(chip, &vlan); +} + +static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + u16 vid; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return; + + mutex_lock(&chip->reg_lock); + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) + if (_mv88e6xxx_port_vlan_add(chip, port, vid, untagged)) + netdev_err(ds->ports[port].netdev, + "failed to add VLAN %d%c\n", + vid, untagged ? 'u' : 't'); + + if (pvid && _mv88e6xxx_port_pvid_set(chip, port, vlan->vid_end)) + netdev_err(ds->ports[port].netdev, "failed to set PVID %d\n", + vlan->vid_end); + + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_chip *chip, + int port, u16 vid) +{ + struct dsa_switch *ds = chip->ds; + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); + if (err) + return err; + + /* Tell switchdev if this VLAN is handled in software */ + if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + return -EOPNOTSUPP; + + vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; + + /* keep the VLAN unless all ports are excluded */ + vlan.valid = false; + for (i = 0; i < chip->info->num_ports; ++i) { + if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) + continue; + + if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) { + vlan.valid = true; + break; + } + } + + err = _mv88e6xxx_vtu_loadpurge(chip, &vlan); + if (err) + return err; + + return _mv88e6xxx_atu_remove(chip, vlan.fid, port, false); +} + +static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + u16 pvid, vid; + int err = 0; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_VTU)) + return -EOPNOTSUPP; + + mutex_lock(&chip->reg_lock); + + err = _mv88e6xxx_port_pvid_get(chip, port, &pvid); + if (err) + goto unlock; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) { + err = _mv88e6xxx_port_vlan_del(chip, port, vid); + if (err) + goto unlock; + + if (vid == pvid) { + err = _mv88e6xxx_port_pvid_set(chip, port, 0); + if (err) + goto unlock; + } + } + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_chip *chip, + const unsigned char *addr) +{ + int i, ret; + + for (i = 0; i < 3; i++) { + ret = _mv88e6xxx_reg_write( + chip, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i, + (addr[i * 2] << 8) | addr[i * 2 + 1]); + if (ret < 0) + return ret; + } + + return 0; +} + +static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_chip *chip, + unsigned char *addr) +{ + int i, ret; + + for (i = 0; i < 3; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, + GLOBAL_ATU_MAC_01 + i); + if (ret < 0) + return ret; + addr[i * 2] = ret >> 8; + addr[i * 2 + 1] = ret & 0xff; + } + + return 0; +} + +static int _mv88e6xxx_atu_load(struct mv88e6xxx_chip *chip, + struct mv88e6xxx_atu_entry *entry) +{ + int ret; + + ret = _mv88e6xxx_atu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_write(chip, entry->mac); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_data_write(chip, entry); + if (ret < 0) + return ret; + + return _mv88e6xxx_atu_cmd(chip, entry->fid, GLOBAL_ATU_OP_LOAD_DB); +} + +static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_chip *chip, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry = { 0 }; + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + /* Null VLAN ID corresponds to the port private database */ + if (vid == 0) + err = _mv88e6xxx_port_fid_get(chip, port, &vlan.fid); + else + err = _mv88e6xxx_vtu_get(chip, vid, &vlan, false); + if (err) + return err; + + entry.fid = vlan.fid; + entry.state = state; + ether_addr_copy(entry.mac, addr); + if (state != GLOBAL_ATU_DATA_STATE_UNUSED) { + entry.trunk = false; + entry.portv_trunkid = BIT(port); + } + + return _mv88e6xxx_atu_load(chip, &entry); +} + +static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + /* We don't need any dynamic resource from the kernel (yet), + * so skip the prepare phase. + */ + return 0; +} + +static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + int state = is_multicast_ether_addr(fdb->addr) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC; + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + mutex_lock(&chip->reg_lock); + if (_mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, state)) + netdev_err(ds->ports[port].netdev, + "failed to load MAC address\n"); + mutex_unlock(&chip->reg_lock); +} + +static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_port_fdb_load(chip, port, fdb->addr, fdb->vid, + GLOBAL_ATU_DATA_STATE_UNUSED); + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_chip *chip, u16 fid, + struct mv88e6xxx_atu_entry *entry) +{ + struct mv88e6xxx_atu_entry next = { 0 }; + int ret; + + next.fid = fid; + + ret = _mv88e6xxx_atu_wait(chip); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_cmd(chip, fid, GLOBAL_ATU_OP_GET_NEXT_DB); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_atu_mac_read(chip, next.mac); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_ATU_DATA); + if (ret < 0) + return ret; + + next.state = ret & GLOBAL_ATU_DATA_STATE_MASK; + if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) { + unsigned int mask, shift; + + if (ret & GLOBAL_ATU_DATA_TRUNK) { + next.trunk = true; + mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK; + shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT; + } else { + next.trunk = false; + mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK; + shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT; + } + + next.portv_trunkid = (ret & mask) >> shift; + } + + *entry = next; + return 0; +} + +static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_chip *chip, + u16 fid, u16 vid, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_atu_entry addr = { + .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; + int err; + + err = _mv88e6xxx_atu_mac_write(chip, addr.mac); + if (err) + return err; + + do { + err = _mv88e6xxx_atu_getnext(chip, fid, &addr); + if (err) + break; + + if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) + break; + + if (!addr.trunk && addr.portv_trunkid & BIT(port)) { + bool is_static = addr.state == + (is_multicast_ether_addr(addr.mac) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); + + fdb->vid = vid; + ether_addr_copy(fdb->addr, addr.mac); + fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; + + err = cb(&fdb->obj); + if (err) + break; + } + } while (!is_broadcast_ether_addr(addr.mac)); + + return err; +} + +static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan = { + .vid = GLOBAL_VTU_VID_MASK, /* all ones */ + }; + u16 fid; + int err; + + mutex_lock(&chip->reg_lock); + + /* Dump port's default Filtering Information Database (VLAN ID 0) */ + err = _mv88e6xxx_port_fid_get(chip, port, &fid); + if (err) + goto unlock; + + err = _mv88e6xxx_port_fdb_dump_one(chip, fid, 0, port, fdb, cb); + if (err) + goto unlock; + + /* Dump VLANs' Filtering Information Databases */ + err = _mv88e6xxx_vtu_vid_write(chip, vlan.vid); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(chip, &vlan); + if (err) + break; + + if (!vlan.valid) + break; + + err = _mv88e6xxx_port_fdb_dump_one(chip, vlan.fid, vlan.vid, + port, fdb, cb); + if (err) + break; + } while (vlan.vid < GLOBAL_VTU_VID_MASK); + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int i, err = 0; + + mutex_lock(&chip->reg_lock); + + /* Assign the bridge and remap each port's VLANTable */ + chip->ports[port].bridge_dev = bridge; + + for (i = 0; i < chip->info->num_ports; ++i) { + if (chip->ports[i].bridge_dev == bridge) { + err = _mv88e6xxx_port_based_vlan_map(chip, i); + if (err) + break; + } + } + + mutex_unlock(&chip->reg_lock); + + return err; +} + +static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + struct net_device *bridge = chip->ports[port].bridge_dev; + int i; + + mutex_lock(&chip->reg_lock); + + /* Unassign the bridge and remap each port's VLANTable */ + chip->ports[port].bridge_dev = NULL; + + for (i = 0; i < chip->info->num_ports; ++i) + if (i == port || chip->ports[i].bridge_dev == bridge) + if (_mv88e6xxx_port_based_vlan_map(chip, i)) + netdev_warn(ds->ports[i].netdev, + "failed to remap\n"); + + mutex_unlock(&chip->reg_lock); +} + +static int _mv88e6xxx_mdio_page_write(struct mv88e6xxx_chip *chip, + int port, int page, int reg, int val) +{ + int ret; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); + if (ret < 0) + goto restore_page_0; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, reg, val); +restore_page_0: + mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); + + return ret; +} + +static int _mv88e6xxx_mdio_page_read(struct mv88e6xxx_chip *chip, + int port, int page, int reg) +{ + int ret; + + ret = mv88e6xxx_mdio_write_indirect(chip, port, 0x16, page); + if (ret < 0) + goto restore_page_0; + + ret = mv88e6xxx_mdio_read_indirect(chip, port, reg); +restore_page_0: + mv88e6xxx_mdio_write_indirect(chip, port, 0x16, 0x0); + + return ret; +} + +static int mv88e6xxx_switch_reset(struct mv88e6xxx_chip *chip) +{ + bool ppu_active = mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE); + u16 is_reset = (ppu_active ? 0x8800 : 0xc800); + struct gpio_desc *gpiod = chip->reset; + unsigned long timeout; + int ret; + int i; + + /* Set all ports to the disabled state. */ + for (i = 0; i < chip->info->num_ports; i++) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(i), PORT_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(i), PORT_CONTROL, + ret & 0xfffc); + if (ret) + return ret; + } + + /* Wait for transmit queues to drain. */ + usleep_range(2000, 4000); + + /* If there is a gpio connected to the reset pin, toggle it */ + if (gpiod) { + gpiod_set_value_cansleep(gpiod, 1); + usleep_range(10000, 20000); + gpiod_set_value_cansleep(gpiod, 0); + usleep_range(10000, 20000); + } + + /* Reset the switch. Keep the PPU active if requested. The PPU + * needs to be active to support indirect phy register access + * through global registers 0x18 and 0x19. + */ + if (ppu_active) + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc000); + else + ret = _mv88e6xxx_reg_write(chip, REG_GLOBAL, 0x04, 0xc400); + if (ret) + return ret; + + /* Wait up to one second for reset to complete. */ + timeout = jiffies + 1 * HZ; + while (time_before(jiffies, timeout)) { + ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, 0x00); + if (ret < 0) + return ret; + + if ((ret & is_reset) == is_reset) + break; + usleep_range(1000, 2000); + } + if (time_after(jiffies, timeout)) + ret = -ETIMEDOUT; + else + ret = 0; + + return ret; +} + +static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_chip *chip) +{ + int ret; + + ret = _mv88e6xxx_mdio_page_read(chip, REG_FIBER_SERDES, + PAGE_FIBER_SERDES, MII_BMCR); + if (ret < 0) + return ret; + + if (ret & BMCR_PDOWN) { + ret &= ~BMCR_PDOWN; + ret = _mv88e6xxx_mdio_page_write(chip, REG_FIBER_SERDES, + PAGE_FIBER_SERDES, MII_BMCR, + ret); + } + + return ret; +} + +static int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, + int reg, u16 *val) +{ + int addr = chip->info->port_base_addr + port; + + if (port >= chip->info->num_ports) + return -EINVAL; + + return mv88e6xxx_read(chip, addr, reg, val); +} + +static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) +{ + struct dsa_switch *ds = chip->ds; + int ret; + u16 reg; + + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip) || + mv88e6xxx_6065_family(chip) || mv88e6xxx_6320_family(chip)) { + /* MAC Forcing register: don't force link, speed, + * duplex or flow control state to any particular + * values on physical ports, but force the CPU port + * and all DSA ports to their maximum bandwidth and + * full duplex. + */ + reg = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_PCS_CTRL); + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + reg &= ~PORT_PCS_CTRL_UNFORCED; + reg |= PORT_PCS_CTRL_FORCE_LINK | + PORT_PCS_CTRL_LINK_UP | + PORT_PCS_CTRL_DUPLEX_FULL | + PORT_PCS_CTRL_FORCE_DUPLEX; + if (mv88e6xxx_6065_family(chip)) + reg |= PORT_PCS_CTRL_100; + else + reg |= PORT_PCS_CTRL_1000; + } else { + reg |= PORT_PCS_CTRL_UNFORCED; + } + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_PCS_CTRL, reg); + if (ret) + return ret; + } + + /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, + * disable Header mode, enable IGMP/MLD snooping, disable VLAN + * tunneling, determine priority by looking at 802.1p and IP + * priority fields (IP prio has precedence), and set STP state + * to Forwarding. + * + * If this is the CPU link, use DSA or EDSA tagging depending + * on which tagging mode was configured. + * + * If this is a link to another switch, use DSA tagging mode. + * + * If this is the upstream port for this switch, enable + * forwarding of unknown unicasts and multicasts. + */ + reg = 0; + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || mv88e6xxx_6065_family(chip) || + mv88e6xxx_6185_family(chip) || mv88e6xxx_6320_family(chip)) + reg = PORT_CONTROL_IGMP_MLD_SNOOP | + PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP | + PORT_CONTROL_STATE_FORWARDING; + if (dsa_is_cpu_port(ds, port)) { + if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) + reg |= PORT_CONTROL_DSA_TAG; + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA | + PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; + } + + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || + mv88e6xxx_6065_family(chip) || + mv88e6xxx_6185_family(chip) || + mv88e6xxx_6320_family(chip)) { + reg |= PORT_CONTROL_EGRESS_ADD_TAG; + } + } + if (dsa_is_dsa_port(ds, port)) { + if (mv88e6xxx_6095_family(chip) || + mv88e6xxx_6185_family(chip)) + reg |= PORT_CONTROL_DSA_TAG; + if (mv88e6xxx_6352_family(chip) || + mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || + mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + reg |= PORT_CONTROL_FRAME_MODE_DSA; + } + + if (port == dsa_upstream_port(ds)) + reg |= PORT_CONTROL_FORWARD_UNKNOWN | + PORT_CONTROL_FORWARD_UNKNOWN_MC; + } + if (reg) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_CONTROL, reg); + if (ret) + return ret; + } + + /* If this port is connected to a SerDes, make sure the SerDes is not + * powered down. + */ + if (mv88e6xxx_6352_family(chip)) { + ret = _mv88e6xxx_reg_read(chip, REG_PORT(port), PORT_STATUS); + if (ret < 0) + return ret; + ret &= PORT_STATUS_CMODE_MASK; + if ((ret == PORT_STATUS_CMODE_100BASE_X) || + (ret == PORT_STATUS_CMODE_1000BASE_X) || + (ret == PORT_STATUS_CMODE_SGMII)) { + ret = mv88e6xxx_power_on_serdes(chip); + if (ret < 0) + return ret; + } + } + + /* Port Control 2: don't force a good FCS, set the maximum frame size to + * 10240 bytes, disable 802.1q tags checking, don't discard tagged or + * untagged frames on this port, do a destination address lookup on all + * received packets as usual, disable ARP mirroring and don't send a + * copy of all transmitted/received frames on this port to the CPU. + */ + reg = 0; + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) || + mv88e6xxx_6185_family(chip)) + reg = PORT_CONTROL_2_MAP_DA; + + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6320_family(chip)) + reg |= PORT_CONTROL_2_JUMBO_10240; + + if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) { + /* Set the upstream port this port should use */ + reg |= dsa_upstream_port(ds); + /* enable forwarding of unknown multicast addresses to + * the upstream port + */ + if (port == dsa_upstream_port(ds)) + reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; + } + + reg |= PORT_CONTROL_2_8021Q_DISABLED; + + if (reg) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_CONTROL_2, reg); + if (ret) + return ret; + } + + /* Port Association Vector: when learning source addresses + * of packets, add the address to the address database using + * a port bitmap that has only the bit for this port set and + * the other bits clear. + */ + reg = 1 << port; + /* Disable learning for CPU port */ + if (dsa_is_cpu_port(ds, port)) + reg = 0; + + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_ASSOC_VECTOR, + reg); + if (ret) + return ret; + + /* Egress rate control 2: disable egress rate control. */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_RATE_CONTROL_2, + 0x0000); + if (ret) + return ret; + + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + /* Do not limit the period of time that this port can + * be paused for by the remote end or the period of + * time that this port can pause the remote end. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_PAUSE_CTRL, 0x0000); + if (ret) + return ret; + + /* Port ATU control: disable limiting the number of + * address database entries that this port is allowed + * to use. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_ATU_CONTROL, 0x0000); + /* Priority Override: disable DA, SA and VTU priority + * override. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_PRI_OVERRIDE, 0x0000); + if (ret) + return ret; + + /* Port Ethertype: use the Ethertype DSA Ethertype + * value. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_ETH_TYPE, ETH_P_EDSA); + if (ret) + return ret; + /* Tag Remap: use an identity 802.1p prio -> switch + * prio mapping. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_TAG_REGMAP_0123, 0x3210); + if (ret) + return ret; + + /* Tag Remap 2: use an identity 802.1p prio -> switch + * prio mapping. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_TAG_REGMAP_4567, 0x7654); + if (ret) + return ret; + } + + /* Rate Control: disable ingress rate limiting. */ + if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) || + mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) || + mv88e6xxx_6320_family(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_RATE_CONTROL, 0x0001); + if (ret) + return ret; + } else if (mv88e6xxx_6185_family(chip) || mv88e6xxx_6095_family(chip)) { + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), + PORT_RATE_CONTROL, 0x0000); + if (ret) + return ret; + } + + /* Port Control 1: disable trunking, disable sending + * learning messages to this port. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_CONTROL_1, + 0x0000); + if (ret) + return ret; + + /* Port based VLAN map: give each port the same default address + * database, and allow bidirectional communication between the + * CPU and DSA port(s), and the other ports. + */ + ret = _mv88e6xxx_port_fid_set(chip, port, 0); + if (ret) + return ret; + + ret = _mv88e6xxx_port_based_vlan_map(chip, port); + if (ret) + return ret; + + /* Default VLAN ID and priority: don't set a default VLAN + * ID, and set the default packet priority to zero. + */ + ret = _mv88e6xxx_reg_write(chip, REG_PORT(port), PORT_DEFAULT_VLAN, + 0x0000); + if (ret) + return ret; + + return 0; +} + +static int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) +{ + int err; + + err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_01, + (addr[0] << 8) | addr[1]); + if (err) + return err; + + err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_23, + (addr[2] << 8) | addr[3]); + if (err) + return err; + + return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_MAC_45, + (addr[4] << 8) | addr[5]); +} + +static int mv88e6xxx_g1_set_age_time(struct mv88e6xxx_chip *chip, + unsigned int msecs) +{ + const unsigned int coeff = chip->info->age_time_coeff; + const unsigned int min = 0x01 * coeff; + const unsigned int max = 0xff * coeff; + u8 age_time; + u16 val; + int err; + + if (msecs < min || msecs > max) + return -ERANGE; + + /* Round to nearest multiple of coeff */ + age_time = (msecs + coeff / 2) / coeff; + + err = mv88e6xxx_read(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, &val); + if (err) + return err; + + /* AgeTime is 11:4 bits */ + val &= ~0xff0; + val |= age_time << 4; + + return mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, val); +} + +static int mv88e6xxx_set_ageing_time(struct dsa_switch *ds, + unsigned int ageing_time) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_g1_set_age_time(chip, ageing_time); + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_g1_setup(struct mv88e6xxx_chip *chip) +{ + struct dsa_switch *ds = chip->ds; + u32 upstream_port = dsa_upstream_port(ds); + u16 reg; + int err; + + /* Enable the PHY Polling Unit if present, don't discard any packets, + * and mask all interrupt sources. + */ + reg = 0; + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU) || + mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU_ACTIVE)) + reg |= GLOBAL_CONTROL_PPU_ENABLE; + + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL, reg); + if (err) + return err; + + /* Configure the upstream port, and configure it as the port to which + * ingress and egress and ARP monitor frames are to be sent. + */ + reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, + reg); + if (err) + return err; + + /* Disable remote management, and set the switch's DSA device number. */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_CONTROL_2, + GLOBAL_CONTROL_2_MULTIPLE_CASCADE | + (ds->index & 0x1f)); + if (err) + return err; + + /* Clear all the VTU and STU entries */ + err = _mv88e6xxx_vtu_stu_flush(chip); + if (err < 0) + return err; + + /* Set the default address aging time to 5 minutes, and + * enable address learn messages to be sent to all message + * ports. + */ + err = mv88e6xxx_write(chip, REG_GLOBAL, GLOBAL_ATU_CONTROL, + GLOBAL_ATU_CONTROL_LEARN2ALL); + if (err) + return err; + + err = mv88e6xxx_g1_set_age_time(chip, 300000); + if (err) + return err; + + /* Clear all ATU entries */ + err = _mv88e6xxx_atu_flush(chip, 0, true); + if (err) + return err; + + /* Configure the IP ToS mapping registers. */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff); + if (err) + return err; + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff); + if (err) + return err; + + /* Configure the IEEE 802.1p priority mapping register. */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41); + if (err) + return err; + + /* Clear the statistics counters for all ports */ + err = _mv88e6xxx_reg_write(chip, REG_GLOBAL, GLOBAL_STATS_OP, + GLOBAL_STATS_OP_FLUSH_ALL); + if (err) + return err; + + /* Wait for the flush to complete. */ + err = _mv88e6xxx_stats_wait(chip); + if (err) + return err; + + return 0; +} + +static int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, + int target, int port) +{ + u16 val = (target << 8) | (port & 0xf); + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING, val); +} + +static int mv88e6xxx_g2_set_device_mapping(struct mv88e6xxx_chip *chip) +{ + int target, port; + int err; + + /* Initialize the routing port to the 32 possible target devices */ + for (target = 0; target < 32; ++target) { + port = 0xf; + + if (target < DSA_MAX_SWITCHES) { + port = chip->ds->rtable[target]; + if (port == DSA_RTABLE_NONE) + port = 0xf; + } + + err = mv88e6xxx_g2_device_mapping_write(chip, target, port); + if (err) + break; + } + + return err; +} + +static int mv88e6xxx_g2_trunk_mask_write(struct mv88e6xxx_chip *chip, int num, + bool hask, u16 mask) +{ + const u16 port_mask = BIT(chip->info->num_ports) - 1; + u16 val = (num << 12) | (mask & port_mask); + + if (hask) + val |= GLOBAL2_TRUNK_MASK_HASK; + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MASK, val); +} + +static int mv88e6xxx_g2_trunk_mapping_write(struct mv88e6xxx_chip *chip, int id, + u16 map) +{ + const u16 port_mask = BIT(chip->info->num_ports) - 1; + u16 val = (id << 11) | (map & port_mask); + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING, val); +} + +static int mv88e6xxx_g2_clear_trunk(struct mv88e6xxx_chip *chip) +{ + const u16 port_mask = BIT(chip->info->num_ports) - 1; + int i, err; + + /* Clear all eight possible Trunk Mask vectors */ + for (i = 0; i < 8; ++i) { + err = mv88e6xxx_g2_trunk_mask_write(chip, i, false, port_mask); + if (err) + return err; + } + + /* Clear all sixteen possible Trunk ID routing vectors */ + for (i = 0; i < 16; ++i) { + err = mv88e6xxx_g2_trunk_mapping_write(chip, i, 0); + if (err) + return err; + } + + return 0; +} + +static int mv88e6xxx_g2_clear_irl(struct mv88e6xxx_chip *chip) +{ + int port, err; + + /* Init all Ingress Rate Limit resources of all ports */ + for (port = 0; port < chip->info->num_ports; ++port) { + /* XXX newer chips (like 88E6390) have different 2-bit ops */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, + GLOBAL2_IRL_CMD_OP_INIT_ALL | + (port << 8)); + if (err) + break; + + /* Wait for the operation to complete */ + err = _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_IRL_CMD, + GLOBAL2_IRL_CMD_BUSY); + if (err) + break; + } + + return err; +} + +/* Indirect write to the Switch MAC/WoL/WoF register */ +static int mv88e6xxx_g2_switch_mac_write(struct mv88e6xxx_chip *chip, + unsigned int pointer, u8 data) +{ + u16 val = (pointer << 8) | data; + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MAC, val); +} + +static int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr) +{ + int i, err; + + for (i = 0; i < 6; i++) { + err = mv88e6xxx_g2_switch_mac_write(chip, i, addr[i]); + if (err) + break; + } + + return err; +} + +static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, + u8 data) +{ + u16 val = (pointer << 8) | (data & 0x7); + + return mv88e6xxx_update(chip, REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE, val); +} + +static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) +{ + int i, err; + + /* Clear all sixteen possible Priority Override entries */ + for (i = 0; i < 16; i++) { + err = mv88e6xxx_g2_pot_write(chip, i, 0); + if (err) + break; + } + + return err; +} + +static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip) +{ + return _mv88e6xxx_wait(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, + GLOBAL2_EEPROM_CMD_BUSY | + GLOBAL2_EEPROM_CMD_RUNNING); +} + +static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd) +{ + int err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, cmd); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_wait(chip); +} + +static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip, + u8 addr, u16 *data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ | addr; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_g2_eeprom_cmd(chip, cmd); + if (err) + return err; + + return mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); +} + +static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip, + u8 addr, u16 data) +{ + u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | addr; + int err; + + err = mv88e6xxx_g2_eeprom_wait(chip); + if (err) + return err; + + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data); + if (err) + return err; + + return mv88e6xxx_g2_eeprom_cmd(chip, cmd); +} + +static int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) +{ + u16 reg; + int err; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) { + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:2x as MGMT. + */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, + 0xffff); + if (err) + return err; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) { + /* Consider the frames with reserved multicast destination + * addresses matching 01:80:c2:00:00:0x as MGMT. + */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, + 0xffff); + if (err) + return err; + } + + /* Ignore removed tag data on doubly tagged packets, disable + * flow control messages, force flow control priority to the + * highest, and send all special multicast frames to the CPU + * port at the highest priority. + */ + reg = GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI | (0x7 << 4); + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) || + mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) + reg |= GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x7; + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT, reg); + if (err) + return err; + + /* Program the DSA routing table. */ + err = mv88e6xxx_g2_set_device_mapping(chip); + if (err) + return err; + + /* Clear all trunk masks and mapping. */ + err = mv88e6xxx_g2_clear_trunk(chip); + if (err) + return err; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_IRL)) { + /* Disable ingress rate limiting by resetting all per port + * ingress rate limit resources to their initial state. + */ + err = mv88e6xxx_g2_clear_irl(chip); + if (err) + return err; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_PVT)) { + /* Initialize Cross-chip Port VLAN Table to reset defaults */ + err = mv88e6xxx_write(chip, REG_GLOBAL2, GLOBAL2_PVT_ADDR, + GLOBAL2_PVT_ADDR_OP_INIT_ONES); + if (err) + return err; + } + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) { + /* Clear the priority override table. */ + err = mv88e6xxx_g2_clear_pot(chip); + if (err) + return err; + } + + return 0; +} + +static int mv88e6xxx_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + int i; + + chip->ds = ds; + ds->slave_mii_bus = chip->mdio_bus; + + mutex_lock(&chip->reg_lock); + + err = mv88e6xxx_switch_reset(chip); + if (err) + goto unlock; + + /* Setup Switch Port Registers */ + for (i = 0; i < chip->info->num_ports; i++) { + err = mv88e6xxx_setup_port(chip, i); + if (err) + goto unlock; + } + + /* Setup Switch Global 1 Registers */ + err = mv88e6xxx_g1_setup(chip); + if (err) + goto unlock; + + /* Setup Switch Global 2 Registers */ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + err = mv88e6xxx_g2_setup(chip); + if (err) + goto unlock; + } + +unlock: + mutex_unlock(&chip->reg_lock); + + return err; +} + +static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + mutex_lock(&chip->reg_lock); + + /* Has an indirect Switch MAC/WoL/WoF register in Global 2? */ + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_SWITCH_MAC)) + err = mv88e6xxx_g2_set_switch_mac(chip, addr); + else + err = mv88e6xxx_g1_set_switch_mac(chip, addr); + + mutex_unlock(&chip->reg_lock); + + return err; +} + +#ifdef CONFIG_NET_DSA_HWMON +static int mv88e6xxx_mdio_page_read(struct dsa_switch *ds, int port, int page, + int reg) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_mdio_page_read(chip, port, page, reg); + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int mv88e6xxx_mdio_page_write(struct dsa_switch *ds, int port, int page, + int reg, int val) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + + mutex_lock(&chip->reg_lock); + ret = _mv88e6xxx_mdio_page_write(chip, port, page, reg, val); + mutex_unlock(&chip->reg_lock); + + return ret; +} +#endif + +static int mv88e6xxx_port_to_mdio_addr(struct mv88e6xxx_chip *chip, int port) +{ + if (port >= 0 && port < chip->info->num_ports) + return port; + return -EINVAL; +} + +static int mv88e6xxx_mdio_read(struct mii_bus *bus, int port, int regnum) +{ + struct mv88e6xxx_chip *chip = bus->priv; + int addr = mv88e6xxx_port_to_mdio_addr(chip, port); + int ret; + + if (addr < 0) + return 0xffff; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_mdio_read_ppu(chip, addr, regnum); + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) + ret = mv88e6xxx_mdio_read_indirect(chip, addr, regnum); + else + ret = mv88e6xxx_mdio_read_direct(chip, addr, regnum); + + mutex_unlock(&chip->reg_lock); + return ret; +} + +static int mv88e6xxx_mdio_write(struct mii_bus *bus, int port, int regnum, + u16 val) +{ + struct mv88e6xxx_chip *chip = bus->priv; + int addr = mv88e6xxx_port_to_mdio_addr(chip, port); + int ret; + + if (addr < 0) + return 0xffff; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + ret = mv88e6xxx_mdio_write_ppu(chip, addr, regnum, val); + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_SMI_PHY)) + ret = mv88e6xxx_mdio_write_indirect(chip, addr, regnum, val); + else + ret = mv88e6xxx_mdio_write_direct(chip, addr, regnum, val); + + mutex_unlock(&chip->reg_lock); + return ret; +} + +static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, + struct device_node *np) +{ + static int index; + struct mii_bus *bus; + int err; + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_PPU)) + mv88e6xxx_ppu_state_init(chip); + + if (np) + chip->mdio_np = of_get_child_by_name(np, "mdio"); + + bus = devm_mdiobus_alloc(chip->dev); + if (!bus) + return -ENOMEM; + + bus->priv = (void *)chip; + if (np) { + bus->name = np->full_name; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); + } else { + bus->name = "mv88e6xxx SMI"; + snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++); + } + + bus->read = mv88e6xxx_mdio_read; + bus->write = mv88e6xxx_mdio_write; + bus->parent = chip->dev; + + if (chip->mdio_np) + err = of_mdiobus_register(bus, chip->mdio_np); + else + err = mdiobus_register(bus); + if (err) { + dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err); + goto out; + } + chip->mdio_bus = bus; + + return 0; + +out: + if (chip->mdio_np) + of_node_put(chip->mdio_np); + + return err; +} + +static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip) + +{ + struct mii_bus *bus = chip->mdio_bus; + + mdiobus_unregister(bus); + + if (chip->mdio_np) + of_node_put(chip->mdio_np); +} + +#ifdef CONFIG_NET_DSA_HWMON + +static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int ret; + int val; + + *temp = 0; + + mutex_lock(&chip->reg_lock); + + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x6); + if (ret < 0) + goto error; + + /* Enable temperature sensor */ + ret = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); + if (ret < 0) + goto error; + + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret | (1 << 5)); + if (ret < 0) + goto error; + + /* Wait for temperature to stabilize */ + usleep_range(10000, 12000); + + val = mv88e6xxx_mdio_read_direct(chip, 0x0, 0x1a); + if (val < 0) { + ret = val; + goto error; + } + + /* Disable temperature sensor */ + ret = mv88e6xxx_mdio_write_direct(chip, 0x0, 0x1a, ret & ~(1 << 5)); + if (ret < 0) + goto error; + + *temp = ((val & 0x1f) - 5) * 5; + +error: + mv88e6xxx_mdio_write_direct(chip, 0x0, 0x16, 0x0); + mutex_unlock(&chip->reg_lock); + return ret; +} + +static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + *temp = 0; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 27); + if (ret < 0) + return ret; + + *temp = (ret & 0xff) - 25; + + return 0; +} + +static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP)) + return -EOPNOTSUPP; + + if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip)) + return mv88e63xx_get_temp(ds, temp); + + return mv88e61xx_get_temp(ds, temp); +} + +static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) + return -EOPNOTSUPP; + + *temp = 0; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *temp = (((ret >> 8) & 0x1f) * 5) - 25; + + return 0; +} + +static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) + return -EOPNOTSUPP; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f); + return mv88e6xxx_mdio_page_write(ds, phy, 6, 26, + (ret & 0xe0ff) | (temp << 8)); +} + +static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int phy = mv88e6xxx_6320_family(chip) ? 3 : 0; + int ret; + + if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT)) + return -EOPNOTSUPP; + + *alarm = false; + + ret = mv88e6xxx_mdio_page_read(ds, phy, 6, 26); + if (ret < 0) + return ret; + + *alarm = !!(ret & 0x40); + + return 0; +} +#endif /* CONFIG_NET_DSA_HWMON */ + +static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + return chip->eeprom_len; +} + +static int mv88e6xxx_get_eeprom16(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + u16 val; + int err; + + eeprom->len = 0; + + if (offset & 1) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + *data++ = (val >> 8) & 0xff; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + *data++ = val & 0xff; + *data++ = (val >> 8) & 0xff; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + *data++ = val & 0xff; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6xxx_get_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16)) + err = mv88e6xxx_get_eeprom16(chip, eeprom, data); + else + err = -EOPNOTSUPP; + + mutex_unlock(&chip->reg_lock); + + if (err) + return err; + + eeprom->magic = 0xc3ec4951; + + return 0; +} + +static int mv88e6xxx_set_eeprom16(struct mv88e6xxx_chip *chip, + struct ethtool_eeprom *eeprom, u8 *data) +{ + unsigned int offset = eeprom->offset; + unsigned int len = eeprom->len; + u16 val; + int err; + + /* Ensure the RO WriteEn bit is set */ + err = mv88e6xxx_read(chip, REG_GLOBAL2, GLOBAL2_EEPROM_CMD, &val); + if (err) + return err; + + if (!(val & GLOBAL2_EEPROM_CMD_WRITE_EN)) + return -EROFS; + + eeprom->len = 0; + + if (offset & 1) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + val = (*data++ << 8) | (val & 0xff); + + err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val); + if (err) + return err; + + offset++; + len--; + eeprom->len++; + } + + while (len >= 2) { + val = *data++; + val |= *data++ << 8; + + err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val); + if (err) + return err; + + offset += 2; + len -= 2; + eeprom->len += 2; + } + + if (len) { + err = mv88e6xxx_g2_eeprom_read16(chip, offset >> 1, &val); + if (err) + return err; + + val = (val & 0xff00) | *data++; + + err = mv88e6xxx_g2_eeprom_write16(chip, offset >> 1, val); + if (err) + return err; + + offset++; + len--; + eeprom->len++; + } + + return 0; +} + +static int mv88e6xxx_set_eeprom(struct dsa_switch *ds, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + int err; + + if (eeprom->magic != 0xc3ec4951) + return -EINVAL; + + mutex_lock(&chip->reg_lock); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16)) + err = mv88e6xxx_set_eeprom16(chip, eeprom, data); + else + err = -EOPNOTSUPP; + + mutex_unlock(&chip->reg_lock); + + return err; +} + +static const struct mv88e6xxx_info mv88e6xxx_table[] = { + [MV88E6085] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6085, + .family = MV88E6XXX_FAMILY_6097, + .name = "Marvell 88E6085", + .num_databases = 4096, + .num_ports = 10, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6097, + }, + + [MV88E6095] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6095, + .family = MV88E6XXX_FAMILY_6095, + .name = "Marvell 88E6095/88E6095F", + .num_databases = 256, + .num_ports = 11, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6095, + }, + + [MV88E6123] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6123, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6123", + .num_databases = 4096, + .num_ports = 3, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6131] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6131, + .family = MV88E6XXX_FAMILY_6185, + .name = "Marvell 88E6131", + .num_databases = 256, + .num_ports = 8, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, + }, + + [MV88E6161] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6161, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6161", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6165] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6165, + .family = MV88E6XXX_FAMILY_6165, + .name = "Marvell 88E6165", + .num_databases = 4096, + .num_ports = 6, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6165, + }, + + [MV88E6171] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6171, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6171", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6172] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6172, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6172", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6175] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6175, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6175", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6176] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6176, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6176", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6185] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6185, + .family = MV88E6XXX_FAMILY_6185, + .name = "Marvell 88E6185", + .num_databases = 256, + .num_ports = 10, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6185, + }, + + [MV88E6240] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6240, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6240", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, + + [MV88E6320] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6320, + .family = MV88E6XXX_FAMILY_6320, + .name = "Marvell 88E6320", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, + }, + + [MV88E6321] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6321, + .family = MV88E6XXX_FAMILY_6320, + .name = "Marvell 88E6321", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6320, + }, + + [MV88E6350] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6350, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6350", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6351] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6351, + .family = MV88E6XXX_FAMILY_6351, + .name = "Marvell 88E6351", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6351, + }, + + [MV88E6352] = { + .prod_num = PORT_SWITCH_ID_PROD_NUM_6352, + .family = MV88E6XXX_FAMILY_6352, + .name = "Marvell 88E6352", + .num_databases = 4096, + .num_ports = 7, + .port_base_addr = 0x10, + .age_time_coeff = 15000, + .flags = MV88E6XXX_FLAGS_FAMILY_6352, + }, +}; + +static const struct mv88e6xxx_info *mv88e6xxx_lookup_info(unsigned int prod_num) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mv88e6xxx_table); ++i) + if (mv88e6xxx_table[i].prod_num == prod_num) + return &mv88e6xxx_table[i]; + + return NULL; +} + +static int mv88e6xxx_detect(struct mv88e6xxx_chip *chip) +{ + const struct mv88e6xxx_info *info; + unsigned int prod_num, rev; + u16 id; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_port_read(chip, 0, PORT_SWITCH_ID, &id); + mutex_unlock(&chip->reg_lock); + if (err) + return err; + + prod_num = (id & 0xfff0) >> 4; + rev = id & 0x000f; + + info = mv88e6xxx_lookup_info(prod_num); + if (!info) + return -ENODEV; + + /* Update the compatible info with the probed one */ + chip->info = info; + + dev_info(chip->dev, "switch 0x%x detected: %s, revision %u\n", + chip->info->prod_num, chip->info->name, rev); + + return 0; +} + +static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev) +{ + struct mv88e6xxx_chip *chip; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return NULL; + + chip->dev = dev; + + mutex_init(&chip->reg_lock); + + return chip; +} + +static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, + struct mii_bus *bus, int sw_addr) +{ + /* ADDR[0] pin is unavailable externally and considered zero */ + if (sw_addr & 0x1) + return -EINVAL; + + if (sw_addr == 0) + chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; + else if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_MULTI_CHIP)) + chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; + else + return -EINVAL; + + chip->bus = bus; + chip->sw_addr = sw_addr; + + return 0; +} + +static const char *mv88e6xxx_drv_probe(struct device *dsa_dev, + struct device *host_dev, int sw_addr, + void **priv) +{ + struct mv88e6xxx_chip *chip; + struct mii_bus *bus; + int err; + + bus = dsa_host_dev_to_mii_bus(host_dev); + if (!bus) + return NULL; + + chip = mv88e6xxx_alloc_chip(dsa_dev); + if (!chip) + return NULL; + + /* Legacy SMI probing will only support chips similar to 88E6085 */ + chip->info = &mv88e6xxx_table[MV88E6085]; + + err = mv88e6xxx_smi_init(chip, bus, sw_addr); + if (err) + goto free; + + err = mv88e6xxx_detect(chip); + if (err) + goto free; + + err = mv88e6xxx_mdio_register(chip, NULL); + if (err) + goto free; + + *priv = chip; + + return chip->info->name; +free: + devm_kfree(dsa_dev, chip); + + return NULL; +} + +static struct dsa_switch_driver mv88e6xxx_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .probe = mv88e6xxx_drv_probe, + .setup = mv88e6xxx_setup, + .set_addr = mv88e6xxx_set_addr, + .adjust_link = mv88e6xxx_adjust_link, + .get_strings = mv88e6xxx_get_strings, + .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, + .get_sset_count = mv88e6xxx_get_sset_count, + .set_eee = mv88e6xxx_set_eee, + .get_eee = mv88e6xxx_get_eee, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, + .get_temp_limit = mv88e6xxx_get_temp_limit, + .set_temp_limit = mv88e6xxx_set_temp_limit, + .get_temp_alarm = mv88e6xxx_get_temp_alarm, +#endif + .get_eeprom_len = mv88e6xxx_get_eeprom_len, + .get_eeprom = mv88e6xxx_get_eeprom, + .set_eeprom = mv88e6xxx_set_eeprom, + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, + .set_ageing_time = mv88e6xxx_set_ageing_time, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_stp_state_set = mv88e6xxx_port_stp_state_set, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, + .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, + .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, +}; + +static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip, + struct device_node *np) +{ + struct device *dev = chip->dev; + struct dsa_switch *ds; + + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + if (!ds) + return -ENOMEM; + + ds->dev = dev; + ds->priv = chip; + ds->drv = &mv88e6xxx_switch_driver; + + dev_set_drvdata(dev, ds); + + return dsa_register_switch(ds, np); +} + +static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip) +{ + dsa_unregister_switch(chip->ds); +} + +static int mv88e6xxx_probe(struct mdio_device *mdiodev) +{ + struct device *dev = &mdiodev->dev; + struct device_node *np = dev->of_node; + const struct mv88e6xxx_info *compat_info; + struct mv88e6xxx_chip *chip; + u32 eeprom_len; + int err; + + compat_info = of_device_get_match_data(dev); + if (!compat_info) + return -EINVAL; + + chip = mv88e6xxx_alloc_chip(dev); + if (!chip) + return -ENOMEM; + + chip->info = compat_info; + + err = mv88e6xxx_smi_init(chip, mdiodev->bus, mdiodev->addr); + if (err) + return err; + + err = mv88e6xxx_detect(chip); + if (err) + return err; + + chip->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS); + if (IS_ERR(chip->reset)) + return PTR_ERR(chip->reset); + + if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_EEPROM16) && + !of_property_read_u32(np, "eeprom-length", &eeprom_len)) + chip->eeprom_len = eeprom_len; + + err = mv88e6xxx_mdio_register(chip, np); + if (err) + return err; + + err = mv88e6xxx_register_switch(chip, np); + if (err) { + mv88e6xxx_mdio_unregister(chip); + return err; + } + + return 0; +} + +static void mv88e6xxx_remove(struct mdio_device *mdiodev) +{ + struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev); + struct mv88e6xxx_chip *chip = ds_to_priv(ds); + + mv88e6xxx_unregister_switch(chip); + mv88e6xxx_mdio_unregister(chip); +} + +static const struct of_device_id mv88e6xxx_of_match[] = { + { + .compatible = "marvell,mv88e6085", + .data = &mv88e6xxx_table[MV88E6085], + }, + { /* sentinel */ }, +}; + +MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match); + +static struct mdio_driver mv88e6xxx_driver = { + .probe = mv88e6xxx_probe, + .remove = mv88e6xxx_remove, + .mdiodrv.driver = { + .name = "mv88e6085", + .of_match_table = mv88e6xxx_of_match, + }, +}; + +static int __init mv88e6xxx_init(void) +{ + register_switch_driver(&mv88e6xxx_switch_driver); + return mdio_driver_register(&mv88e6xxx_driver); +} +module_init(mv88e6xxx_init); + +static void __exit mv88e6xxx_cleanup(void) +{ + mdio_driver_unregister(&mv88e6xxx_driver); + unregister_switch_driver(&mv88e6xxx_switch_driver); +} +module_exit(mv88e6xxx_cleanup); + +MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>"); +MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h new file mode 100644 index 000000000..48d6ea77f --- /dev/null +++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h @@ -0,0 +1,678 @@ +/* + * Marvell 88e6xxx common definitions + * + * Copyright (c) 2008 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __MV88E6XXX_H +#define __MV88E6XXX_H + +#include <linux/if_vlan.h> +#include <linux/gpio/consumer.h> + +#ifndef UINT64_MAX +#define UINT64_MAX (u64)(~((u64)0)) +#endif + +#define SMI_CMD 0x00 +#define SMI_CMD_BUSY BIT(15) +#define SMI_CMD_CLAUSE_22 BIT(12) +#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22) +#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22) +#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY) +#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY) +#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY) +#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY) +#define SMI_DATA 0x01 + +/* Fiber/SERDES Registers are located at SMI address F, page 1 */ +#define REG_FIBER_SERDES 0x0f +#define PAGE_FIBER_SERDES 0x01 + +#define REG_PORT(p) (0x10 + (p)) +#define PORT_STATUS 0x00 +#define PORT_STATUS_PAUSE_EN BIT(15) +#define PORT_STATUS_MY_PAUSE BIT(14) +#define PORT_STATUS_HD_FLOW BIT(13) +#define PORT_STATUS_PHY_DETECT BIT(12) +#define PORT_STATUS_LINK BIT(11) +#define PORT_STATUS_DUPLEX BIT(10) +#define PORT_STATUS_SPEED_MASK 0x0300 +#define PORT_STATUS_SPEED_10 0x0000 +#define PORT_STATUS_SPEED_100 0x0100 +#define PORT_STATUS_SPEED_1000 0x0200 +#define PORT_STATUS_EEE BIT(6) /* 6352 */ +#define PORT_STATUS_AM_DIS BIT(6) /* 6165 */ +#define PORT_STATUS_MGMII BIT(6) /* 6185 */ +#define PORT_STATUS_TX_PAUSED BIT(5) +#define PORT_STATUS_FLOW_CTRL BIT(4) +#define PORT_STATUS_CMODE_MASK 0x0f +#define PORT_STATUS_CMODE_100BASE_X 0x8 +#define PORT_STATUS_CMODE_1000BASE_X 0x9 +#define PORT_STATUS_CMODE_SGMII 0xa +#define PORT_PCS_CTRL 0x01 +#define PORT_PCS_CTRL_RGMII_DELAY_RXCLK BIT(15) +#define PORT_PCS_CTRL_RGMII_DELAY_TXCLK BIT(14) +#define PORT_PCS_CTRL_FC BIT(7) +#define PORT_PCS_CTRL_FORCE_FC BIT(6) +#define PORT_PCS_CTRL_LINK_UP BIT(5) +#define PORT_PCS_CTRL_FORCE_LINK BIT(4) +#define PORT_PCS_CTRL_DUPLEX_FULL BIT(3) +#define PORT_PCS_CTRL_FORCE_DUPLEX BIT(2) +#define PORT_PCS_CTRL_10 0x00 +#define PORT_PCS_CTRL_100 0x01 +#define PORT_PCS_CTRL_1000 0x02 +#define PORT_PCS_CTRL_UNFORCED 0x03 +#define PORT_PAUSE_CTRL 0x02 +#define PORT_SWITCH_ID 0x03 +#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a +#define PORT_SWITCH_ID_PROD_NUM_6095 0x095 +#define PORT_SWITCH_ID_PROD_NUM_6131 0x106 +#define PORT_SWITCH_ID_PROD_NUM_6320 0x115 +#define PORT_SWITCH_ID_PROD_NUM_6123 0x121 +#define PORT_SWITCH_ID_PROD_NUM_6161 0x161 +#define PORT_SWITCH_ID_PROD_NUM_6165 0x165 +#define PORT_SWITCH_ID_PROD_NUM_6171 0x171 +#define PORT_SWITCH_ID_PROD_NUM_6172 0x172 +#define PORT_SWITCH_ID_PROD_NUM_6175 0x175 +#define PORT_SWITCH_ID_PROD_NUM_6176 0x176 +#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7 +#define PORT_SWITCH_ID_PROD_NUM_6240 0x240 +#define PORT_SWITCH_ID_PROD_NUM_6321 0x310 +#define PORT_SWITCH_ID_PROD_NUM_6352 0x352 +#define PORT_SWITCH_ID_PROD_NUM_6350 0x371 +#define PORT_SWITCH_ID_PROD_NUM_6351 0x375 +#define PORT_CONTROL 0x04 +#define PORT_CONTROL_USE_CORE_TAG BIT(15) +#define PORT_CONTROL_DROP_ON_LOCK BIT(14) +#define PORT_CONTROL_EGRESS_UNMODIFIED (0x0 << 12) +#define PORT_CONTROL_EGRESS_UNTAGGED (0x1 << 12) +#define PORT_CONTROL_EGRESS_TAGGED (0x2 << 12) +#define PORT_CONTROL_EGRESS_ADD_TAG (0x3 << 12) +#define PORT_CONTROL_HEADER BIT(11) +#define PORT_CONTROL_IGMP_MLD_SNOOP BIT(10) +#define PORT_CONTROL_DOUBLE_TAG BIT(9) +#define PORT_CONTROL_FRAME_MODE_NORMAL (0x0 << 8) +#define PORT_CONTROL_FRAME_MODE_DSA (0x1 << 8) +#define PORT_CONTROL_FRAME_MODE_PROVIDER (0x2 << 8) +#define PORT_CONTROL_FRAME_ETHER_TYPE_DSA (0x3 << 8) +#define PORT_CONTROL_DSA_TAG BIT(8) +#define PORT_CONTROL_VLAN_TUNNEL BIT(7) +#define PORT_CONTROL_TAG_IF_BOTH BIT(6) +#define PORT_CONTROL_USE_IP BIT(5) +#define PORT_CONTROL_USE_TAG BIT(4) +#define PORT_CONTROL_FORWARD_UNKNOWN_MC BIT(3) +#define PORT_CONTROL_FORWARD_UNKNOWN BIT(2) +#define PORT_CONTROL_STATE_MASK 0x03 +#define PORT_CONTROL_STATE_DISABLED 0x00 +#define PORT_CONTROL_STATE_BLOCKING 0x01 +#define PORT_CONTROL_STATE_LEARNING 0x02 +#define PORT_CONTROL_STATE_FORWARDING 0x03 +#define PORT_CONTROL_1 0x05 +#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0) +#define PORT_BASE_VLAN 0x06 +#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12) +#define PORT_DEFAULT_VLAN 0x07 +#define PORT_DEFAULT_VLAN_MASK 0xfff +#define PORT_CONTROL_2 0x08 +#define PORT_CONTROL_2_IGNORE_FCS BIT(15) +#define PORT_CONTROL_2_VTU_PRI_OVERRIDE BIT(14) +#define PORT_CONTROL_2_SA_PRIO_OVERRIDE BIT(13) +#define PORT_CONTROL_2_DA_PRIO_OVERRIDE BIT(12) +#define PORT_CONTROL_2_JUMBO_1522 (0x00 << 12) +#define PORT_CONTROL_2_JUMBO_2048 (0x01 << 12) +#define PORT_CONTROL_2_JUMBO_10240 (0x02 << 12) +#define PORT_CONTROL_2_8021Q_MASK (0x03 << 10) +#define PORT_CONTROL_2_8021Q_DISABLED (0x00 << 10) +#define PORT_CONTROL_2_8021Q_FALLBACK (0x01 << 10) +#define PORT_CONTROL_2_8021Q_CHECK (0x02 << 10) +#define PORT_CONTROL_2_8021Q_SECURE (0x03 << 10) +#define PORT_CONTROL_2_DISCARD_TAGGED BIT(9) +#define PORT_CONTROL_2_DISCARD_UNTAGGED BIT(8) +#define PORT_CONTROL_2_MAP_DA BIT(7) +#define PORT_CONTROL_2_DEFAULT_FORWARD BIT(6) +#define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6) +#define PORT_CONTROL_2_EGRESS_MONITOR BIT(5) +#define PORT_CONTROL_2_INGRESS_MONITOR BIT(4) +#define PORT_RATE_CONTROL 0x09 +#define PORT_RATE_CONTROL_2 0x0a +#define PORT_ASSOC_VECTOR 0x0b +#define PORT_ASSOC_VECTOR_HOLD_AT_1 BIT(15) +#define PORT_ASSOC_VECTOR_INT_AGE_OUT BIT(14) +#define PORT_ASSOC_VECTOR_LOCKED_PORT BIT(13) +#define PORT_ASSOC_VECTOR_IGNORE_WRONG BIT(12) +#define PORT_ASSOC_VECTOR_REFRESH_LOCKED BIT(11) +#define PORT_ATU_CONTROL 0x0c +#define PORT_PRI_OVERRIDE 0x0d +#define PORT_ETH_TYPE 0x0f +#define PORT_IN_DISCARD_LO 0x10 +#define PORT_IN_DISCARD_HI 0x11 +#define PORT_IN_FILTERED 0x12 +#define PORT_OUT_FILTERED 0x13 +#define PORT_TAG_REGMAP_0123 0x18 +#define PORT_TAG_REGMAP_4567 0x19 + +#define REG_GLOBAL 0x1b +#define GLOBAL_STATUS 0x00 +#define GLOBAL_STATUS_PPU_STATE BIT(15) /* 6351 and 6171 */ +/* Two bits for 6165, 6185 etc */ +#define GLOBAL_STATUS_PPU_MASK (0x3 << 14) +#define GLOBAL_STATUS_PPU_DISABLED_RST (0x0 << 14) +#define GLOBAL_STATUS_PPU_INITIALIZING (0x1 << 14) +#define GLOBAL_STATUS_PPU_DISABLED (0x2 << 14) +#define GLOBAL_STATUS_PPU_POLLING (0x3 << 14) +#define GLOBAL_MAC_01 0x01 +#define GLOBAL_MAC_23 0x02 +#define GLOBAL_MAC_45 0x03 +#define GLOBAL_ATU_FID 0x01 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID 0x02 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_FID_MASK 0xfff +#define GLOBAL_VTU_SID 0x03 /* 6097 6165 6351 6352 */ +#define GLOBAL_VTU_SID_MASK 0x3f +#define GLOBAL_CONTROL 0x04 +#define GLOBAL_CONTROL_SW_RESET BIT(15) +#define GLOBAL_CONTROL_PPU_ENABLE BIT(14) +#define GLOBAL_CONTROL_DISCARD_EXCESS BIT(13) /* 6352 */ +#define GLOBAL_CONTROL_SCHED_PRIO BIT(11) /* 6152 */ +#define GLOBAL_CONTROL_MAX_FRAME_1632 BIT(10) /* 6152 */ +#define GLOBAL_CONTROL_RELOAD_EEPROM BIT(9) /* 6152 */ +#define GLOBAL_CONTROL_DEVICE_EN BIT(7) +#define GLOBAL_CONTROL_STATS_DONE_EN BIT(6) +#define GLOBAL_CONTROL_VTU_PROBLEM_EN BIT(5) +#define GLOBAL_CONTROL_VTU_DONE_EN BIT(4) +#define GLOBAL_CONTROL_ATU_PROBLEM_EN BIT(3) +#define GLOBAL_CONTROL_ATU_DONE_EN BIT(2) +#define GLOBAL_CONTROL_TCAM_EN BIT(1) +#define GLOBAL_CONTROL_EEPROM_DONE_EN BIT(0) +#define GLOBAL_VTU_OP 0x05 +#define GLOBAL_VTU_OP_BUSY BIT(15) +#define GLOBAL_VTU_OP_FLUSH_ALL ((0x01 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_LOAD_PURGE ((0x03 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_VTU_GET_NEXT ((0x04 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_LOAD_PURGE ((0x05 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_OP_STU_GET_NEXT ((0x06 << 12) | GLOBAL_VTU_OP_BUSY) +#define GLOBAL_VTU_VID 0x06 +#define GLOBAL_VTU_VID_MASK 0xfff +#define GLOBAL_VTU_VID_VALID BIT(12) +#define GLOBAL_VTU_DATA_0_3 0x07 +#define GLOBAL_VTU_DATA_4_7 0x08 +#define GLOBAL_VTU_DATA_8_11 0x09 +#define GLOBAL_VTU_STU_DATA_MASK 0x03 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED 0x00 +#define GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED 0x01 +#define GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED 0x02 +#define GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER 0x03 +#define GLOBAL_STU_DATA_PORT_STATE_DISABLED 0x00 +#define GLOBAL_STU_DATA_PORT_STATE_BLOCKING 0x01 +#define GLOBAL_STU_DATA_PORT_STATE_LEARNING 0x02 +#define GLOBAL_STU_DATA_PORT_STATE_FORWARDING 0x03 +#define GLOBAL_ATU_CONTROL 0x0a +#define GLOBAL_ATU_CONTROL_LEARN2ALL BIT(3) +#define GLOBAL_ATU_OP 0x0b +#define GLOBAL_ATU_OP_BUSY BIT(15) +#define GLOBAL_ATU_OP_NOP (0 << 12) +#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL ((1 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC ((2 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_LOAD_DB ((3 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_GET_NEXT_DB ((4 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB ((5 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_OP_GET_CLR_VIOLATION ((7 << 12) | GLOBAL_ATU_OP_BUSY) +#define GLOBAL_ATU_DATA 0x0c +#define GLOBAL_ATU_DATA_TRUNK BIT(15) +#define GLOBAL_ATU_DATA_TRUNK_ID_MASK 0x00f0 +#define GLOBAL_ATU_DATA_TRUNK_ID_SHIFT 4 +#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK 0x3ff0 +#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT 4 +#define GLOBAL_ATU_DATA_STATE_MASK 0x0f +#define GLOBAL_ATU_DATA_STATE_UNUSED 0x00 +#define GLOBAL_ATU_DATA_STATE_UC_MGMT 0x0d +#define GLOBAL_ATU_DATA_STATE_UC_STATIC 0x0e +#define GLOBAL_ATU_DATA_STATE_UC_PRIO_OVER 0x0f +#define GLOBAL_ATU_DATA_STATE_MC_NONE_RATE 0x05 +#define GLOBAL_ATU_DATA_STATE_MC_STATIC 0x07 +#define GLOBAL_ATU_DATA_STATE_MC_MGMT 0x0e +#define GLOBAL_ATU_DATA_STATE_MC_PRIO_OVER 0x0f +#define GLOBAL_ATU_MAC_01 0x0d +#define GLOBAL_ATU_MAC_23 0x0e +#define GLOBAL_ATU_MAC_45 0x0f +#define GLOBAL_IP_PRI_0 0x10 +#define GLOBAL_IP_PRI_1 0x11 +#define GLOBAL_IP_PRI_2 0x12 +#define GLOBAL_IP_PRI_3 0x13 +#define GLOBAL_IP_PRI_4 0x14 +#define GLOBAL_IP_PRI_5 0x15 +#define GLOBAL_IP_PRI_6 0x16 +#define GLOBAL_IP_PRI_7 0x17 +#define GLOBAL_IEEE_PRI 0x18 +#define GLOBAL_CORE_TAG_TYPE 0x19 +#define GLOBAL_MONITOR_CONTROL 0x1a +#define GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT 12 +#define GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT 8 +#define GLOBAL_MONITOR_CONTROL_ARP_SHIFT 4 +#define GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT 0 +#define GLOBAL_MONITOR_CONTROL_ARP_DISABLED (0xf0) +#define GLOBAL_CONTROL_2 0x1c +#define GLOBAL_CONTROL_2_NO_CASCADE 0xe000 +#define GLOBAL_CONTROL_2_MULTIPLE_CASCADE 0xf000 + +#define GLOBAL_STATS_OP 0x1d +#define GLOBAL_STATS_OP_BUSY BIT(15) +#define GLOBAL_STATS_OP_NOP (0 << 12) +#define GLOBAL_STATS_OP_FLUSH_ALL ((1 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_FLUSH_PORT ((2 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_READ_CAPTURED ((4 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_CAPTURE_PORT ((5 << 12) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_HIST_RX ((1 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_HIST_TX ((2 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_HIST_RX_TX ((3 << 10) | GLOBAL_STATS_OP_BUSY) +#define GLOBAL_STATS_OP_BANK_1 BIT(9) +#define GLOBAL_STATS_COUNTER_32 0x1e +#define GLOBAL_STATS_COUNTER_01 0x1f + +#define REG_GLOBAL2 0x1c +#define GLOBAL2_INT_SOURCE 0x00 +#define GLOBAL2_INT_MASK 0x01 +#define GLOBAL2_MGMT_EN_2X 0x02 +#define GLOBAL2_MGMT_EN_0X 0x03 +#define GLOBAL2_FLOW_CONTROL 0x04 +#define GLOBAL2_SWITCH_MGMT 0x05 +#define GLOBAL2_SWITCH_MGMT_USE_DOUBLE_TAG_DATA BIT(15) +#define GLOBAL2_SWITCH_MGMT_PREVENT_LOOPS BIT(14) +#define GLOBAL2_SWITCH_MGMT_FLOW_CONTROL_MSG BIT(13) +#define GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI BIT(7) +#define GLOBAL2_SWITCH_MGMT_RSVD2CPU BIT(3) +#define GLOBAL2_DEVICE_MAPPING 0x06 +#define GLOBAL2_DEVICE_MAPPING_UPDATE BIT(15) +#define GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT 8 +#define GLOBAL2_DEVICE_MAPPING_PORT_MASK 0x0f +#define GLOBAL2_TRUNK_MASK 0x07 +#define GLOBAL2_TRUNK_MASK_UPDATE BIT(15) +#define GLOBAL2_TRUNK_MASK_NUM_SHIFT 12 +#define GLOBAL2_TRUNK_MASK_HASK BIT(11) +#define GLOBAL2_TRUNK_MAPPING 0x08 +#define GLOBAL2_TRUNK_MAPPING_UPDATE BIT(15) +#define GLOBAL2_TRUNK_MAPPING_ID_SHIFT 11 +#define GLOBAL2_IRL_CMD 0x09 +#define GLOBAL2_IRL_CMD_BUSY BIT(15) +#define GLOBAL2_IRL_CMD_OP_INIT_ALL ((0x001 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_CMD_OP_INIT_SEL ((0x010 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_CMD_OP_WRITE_SEL ((0x011 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_CMD_OP_READ_SEL ((0x100 << 12) | GLOBAL2_IRL_CMD_BUSY) +#define GLOBAL2_IRL_DATA 0x0a +#define GLOBAL2_PVT_ADDR 0x0b +#define GLOBAL2_PVT_ADDR_BUSY BIT(15) +#define GLOBAL2_PVT_ADDR_OP_INIT_ONES ((0x01 << 12) | GLOBAL2_PVT_ADDR_BUSY) +#define GLOBAL2_PVT_ADDR_OP_WRITE_PVLAN ((0x03 << 12) | GLOBAL2_PVT_ADDR_BUSY) +#define GLOBAL2_PVT_ADDR_OP_READ ((0x04 << 12) | GLOBAL2_PVT_ADDR_BUSY) +#define GLOBAL2_PVT_DATA 0x0c +#define GLOBAL2_SWITCH_MAC 0x0d +#define GLOBAL2_ATU_STATS 0x0e +#define GLOBAL2_PRIO_OVERRIDE 0x0f +#define GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP BIT(7) +#define GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT 4 +#define GLOBAL2_PRIO_OVERRIDE_FORCE_ARP BIT(3) +#define GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT 0 +#define GLOBAL2_EEPROM_CMD 0x14 +#define GLOBAL2_EEPROM_CMD_BUSY BIT(15) +#define GLOBAL2_EEPROM_CMD_OP_WRITE ((0x3 << 12) | GLOBAL2_EEPROM_CMD_BUSY) +#define GLOBAL2_EEPROM_CMD_OP_READ ((0x4 << 12) | GLOBAL2_EEPROM_CMD_BUSY) +#define GLOBAL2_EEPROM_CMD_OP_LOAD ((0x6 << 12) | GLOBAL2_EEPROM_CMD_BUSY) +#define GLOBAL2_EEPROM_CMD_RUNNING BIT(11) +#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10) +#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff +#define GLOBAL2_EEPROM_DATA 0x15 +#define GLOBAL2_PTP_AVB_OP 0x16 +#define GLOBAL2_PTP_AVB_DATA 0x17 +#define GLOBAL2_SMI_OP 0x18 +#define GLOBAL2_SMI_OP_BUSY BIT(15) +#define GLOBAL2_SMI_OP_CLAUSE_22 BIT(12) +#define GLOBAL2_SMI_OP_22_WRITE ((1 << 10) | GLOBAL2_SMI_OP_BUSY | \ + GLOBAL2_SMI_OP_CLAUSE_22) +#define GLOBAL2_SMI_OP_22_READ ((2 << 10) | GLOBAL2_SMI_OP_BUSY | \ + GLOBAL2_SMI_OP_CLAUSE_22) +#define GLOBAL2_SMI_OP_45_WRITE_ADDR ((0 << 10) | GLOBAL2_SMI_OP_BUSY) +#define GLOBAL2_SMI_OP_45_WRITE_DATA ((1 << 10) | GLOBAL2_SMI_OP_BUSY) +#define GLOBAL2_SMI_OP_45_READ_DATA ((2 << 10) | GLOBAL2_SMI_OP_BUSY) +#define GLOBAL2_SMI_DATA 0x19 +#define GLOBAL2_SCRATCH_MISC 0x1a +#define GLOBAL2_SCRATCH_BUSY BIT(15) +#define GLOBAL2_SCRATCH_REGISTER_SHIFT 8 +#define GLOBAL2_SCRATCH_VALUE_MASK 0xff +#define GLOBAL2_WDOG_CONTROL 0x1b +#define GLOBAL2_QOS_WEIGHT 0x1c +#define GLOBAL2_MISC 0x1d + +#define MV88E6XXX_N_FID 4096 + +/* List of supported models */ +enum mv88e6xxx_model { + MV88E6085, + MV88E6095, + MV88E6123, + MV88E6131, + MV88E6161, + MV88E6165, + MV88E6171, + MV88E6172, + MV88E6175, + MV88E6176, + MV88E6185, + MV88E6240, + MV88E6320, + MV88E6321, + MV88E6350, + MV88E6351, + MV88E6352, +}; + +enum mv88e6xxx_family { + MV88E6XXX_FAMILY_NONE, + MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */ + MV88E6XXX_FAMILY_6095, /* 6092 6095 */ + MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */ + MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */ + MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */ + MV88E6XXX_FAMILY_6320, /* 6320 6321 */ + MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */ + MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */ +}; + +enum mv88e6xxx_cap { + /* Energy Efficient Ethernet. + */ + MV88E6XXX_CAP_EEE, + + /* Switch Global 2 Registers. + * The device contains a second set of global 16-bit registers. + */ + MV88E6XXX_CAP_GLOBAL2, + MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ + MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ + MV88E6XXX_CAP_G2_IRL_CMD, /* (0x09) Ingress Rate Command */ + MV88E6XXX_CAP_G2_IRL_DATA, /* (0x0a) Ingress Rate Data */ + MV88E6XXX_CAP_G2_PVT_ADDR, /* (0x0b) Cross Chip Port VLAN Addr */ + MV88E6XXX_CAP_G2_PVT_DATA, /* (0x0c) Cross Chip Port VLAN Data */ + MV88E6XXX_CAP_G2_SWITCH_MAC, /* (0x0d) Switch MAC/WoL/WoF */ + MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ + MV88E6XXX_CAP_G2_EEPROM_CMD, /* (0x14) EEPROM Command */ + MV88E6XXX_CAP_G2_EEPROM_DATA, /* (0x15) EEPROM Data */ + + /* Multi-chip Addressing Mode. + * Some chips require an indirect SMI access when their SMI device + * address is not zero. See SMI_CMD and SMI_DATA. + */ + MV88E6XXX_CAP_MULTI_CHIP, + + /* PHY Polling Unit. + * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING. + */ + MV88E6XXX_CAP_PPU, + MV88E6XXX_CAP_PPU_ACTIVE, + + /* SMI PHY Command and Data registers. + * This requires an indirect access to PHY registers through + * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done. + */ + MV88E6XXX_CAP_SMI_PHY, + + /* Per VLAN Spanning Tree Unit (STU). + * The Port State database, if present, is accessed through VTU + * operations and dedicated SID registers. See GLOBAL_VTU_SID. + */ + MV88E6XXX_CAP_STU, + + /* Internal temperature sensor. + * Available from any enabled port's PHY register 26, page 6. + */ + MV88E6XXX_CAP_TEMP, + MV88E6XXX_CAP_TEMP_LIMIT, + + /* VLAN Table Unit. + * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP. + */ + MV88E6XXX_CAP_VTU, +}; + +/* Bitmask of capabilities */ +#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE) +#define MV88E6XXX_FLAG_GLOBAL2 BIT(MV88E6XXX_CAP_GLOBAL2) +#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT(MV88E6XXX_CAP_G2_MGMT_EN_2X) +#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT(MV88E6XXX_CAP_G2_MGMT_EN_0X) +#define MV88E6XXX_FLAG_G2_IRL_CMD BIT(MV88E6XXX_CAP_G2_IRL_CMD) +#define MV88E6XXX_FLAG_G2_IRL_DATA BIT(MV88E6XXX_CAP_G2_IRL_DATA) +#define MV88E6XXX_FLAG_G2_PVT_ADDR BIT(MV88E6XXX_CAP_G2_PVT_ADDR) +#define MV88E6XXX_FLAG_G2_PVT_DATA BIT(MV88E6XXX_CAP_G2_PVT_DATA) +#define MV88E6XXX_FLAG_G2_SWITCH_MAC BIT(MV88E6XXX_CAP_G2_SWITCH_MAC) +#define MV88E6XXX_FLAG_G2_POT BIT(MV88E6XXX_CAP_G2_POT) +#define MV88E6XXX_FLAG_G2_EEPROM_CMD BIT(MV88E6XXX_CAP_G2_EEPROM_CMD) +#define MV88E6XXX_FLAG_G2_EEPROM_DATA BIT(MV88E6XXX_CAP_G2_EEPROM_DATA) +#define MV88E6XXX_FLAG_MULTI_CHIP BIT(MV88E6XXX_CAP_MULTI_CHIP) +#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU) +#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE) +#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY) +#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU) +#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP) +#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT) +#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU) + +/* EEPROM Programming via Global2 with 16-bit data */ +#define MV88E6XXX_FLAGS_EEPROM16 \ + (MV88E6XXX_FLAG_G2_EEPROM_CMD | \ + MV88E6XXX_FLAG_G2_EEPROM_DATA) + +/* Ingress Rate Limit unit */ +#define MV88E6XXX_FLAGS_IRL \ + (MV88E6XXX_FLAG_G2_IRL_CMD | \ + MV88E6XXX_FLAG_G2_IRL_DATA) + +/* Cross-chip Port VLAN Table */ +#define MV88E6XXX_FLAGS_PVT \ + (MV88E6XXX_FLAG_G2_PVT_ADDR | \ + MV88E6XXX_FLAG_G2_PVT_DATA) + +#define MV88E6XXX_FLAGS_FAMILY_6095 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VTU) + +#define MV88E6XXX_FLAGS_FAMILY_6097 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6165 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6185 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU | \ + MV88E6XXX_FLAG_VTU) + +#define MV88E6XXX_FLAGS_FAMILY_6320 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_TEMP_LIMIT | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_EEPROM16 | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6351 \ + (MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +#define MV88E6XXX_FLAGS_FAMILY_6352 \ + (MV88E6XXX_FLAG_EEE | \ + MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ + MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ + MV88E6XXX_FLAG_G2_SWITCH_MAC | \ + MV88E6XXX_FLAG_G2_POT | \ + MV88E6XXX_FLAG_MULTI_CHIP | \ + MV88E6XXX_FLAG_PPU_ACTIVE | \ + MV88E6XXX_FLAG_SMI_PHY | \ + MV88E6XXX_FLAG_STU | \ + MV88E6XXX_FLAG_TEMP | \ + MV88E6XXX_FLAG_TEMP_LIMIT | \ + MV88E6XXX_FLAG_VTU | \ + MV88E6XXX_FLAGS_EEPROM16 | \ + MV88E6XXX_FLAGS_IRL | \ + MV88E6XXX_FLAGS_PVT) + +struct mv88e6xxx_info { + enum mv88e6xxx_family family; + u16 prod_num; + const char *name; + unsigned int num_databases; + unsigned int num_ports; + unsigned int port_base_addr; + unsigned int age_time_coeff; + unsigned long flags; +}; + +struct mv88e6xxx_atu_entry { + u16 fid; + u8 state; + bool trunk; + u16 portv_trunkid; + u8 mac[ETH_ALEN]; +}; + +struct mv88e6xxx_vtu_stu_entry { + /* VTU only */ + u16 vid; + u16 fid; + + /* VTU and STU */ + u8 sid; + bool valid; + u8 data[DSA_MAX_PORTS]; +}; + +struct mv88e6xxx_ops; + +struct mv88e6xxx_priv_port { + struct net_device *bridge_dev; +}; + +struct mv88e6xxx_chip { + const struct mv88e6xxx_info *info; + + /* The dsa_switch this private structure is related to */ + struct dsa_switch *ds; + + /* The device this structure is associated to */ + struct device *dev; + + /* This mutex protects the access to the switch registers */ + struct mutex reg_lock; + + /* The MII bus and the address on the bus that is used to + * communication with the switch + */ + const struct mv88e6xxx_ops *smi_ops; + struct mii_bus *bus; + int sw_addr; + + /* Handles automatic disabling and re-enabling of the PHY + * polling unit. + */ + struct mutex ppu_mutex; + int ppu_disabled; + struct work_struct ppu_work; + struct timer_list ppu_timer; + + /* This mutex serialises access to the statistics unit. + * Hold this mutex over snapshot + dump sequences. + */ + struct mutex stats_mutex; + + struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; + + /* A switch may have a GPIO line tied to its reset pin. Parse + * this from the device tree, and use it before performing + * switch soft reset. + */ + struct gpio_desc *reset; + + /* set to size of eeprom if supported by the switch */ + int eeprom_len; + + /* Device node for the MDIO bus */ + struct device_node *mdio_np; + + /* And the MDIO bus itself */ + struct mii_bus *mdio_bus; +}; + +struct mv88e6xxx_ops { + int (*read)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val); + int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val); +}; + +enum stat_type { + BANK0, + BANK1, + PORT, +}; + +struct mv88e6xxx_hw_stat { + char string[ETH_GSTRING_LEN]; + int sizeof_stat; + int reg; + enum stat_type type; +}; + +static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, + unsigned long flags) +{ + return (chip->info->flags & flags) == flags; +} + +#endif diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index c89b9aeec..39ca9350d 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -84,7 +84,6 @@ static u32 ax_msg_enable; struct ax_device { struct mii_bus *mii_bus; struct mdiobb_ctrl bb_ctrl; - struct phy_device *phy_dev; void __iomem *addr_memr; u8 reg_memr; int link; @@ -320,7 +319,7 @@ static void ax_block_output(struct net_device *dev, int count, static void ax_handle_link_change(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; + struct phy_device *phy_dev = dev->phydev; int status_change = 0; if (phy_dev->link && ((ax->speed != phy_dev->speed) || @@ -369,8 +368,6 @@ static int ax_mii_probe(struct net_device *dev) phy_dev->supported &= PHY_BASIC_FEATURES; phy_dev->advertising = phy_dev->supported; - ax->phy_dev = phy_dev; - netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); @@ -410,7 +407,7 @@ static int ax_open(struct net_device *dev) ret = ax_mii_probe(dev); if (ret) goto failed_mii_probe; - phy_start(ax->phy_dev); + phy_start(dev->phydev); ret = ax_ei_open(dev); if (ret) @@ -421,7 +418,7 @@ static int ax_open(struct net_device *dev) return 0; failed_ax_ei_open: - phy_disconnect(ax->phy_dev); + phy_disconnect(dev->phydev); failed_mii_probe: ax_phy_switch(dev, 0); free_irq(dev->irq, dev); @@ -442,7 +439,7 @@ static int ax_close(struct net_device *dev) /* turn the phy off */ ax_phy_switch(dev, 0); - phy_disconnect(ax->phy_dev); + phy_disconnect(dev->phydev); free_irq(dev->irq, dev); return 0; @@ -450,8 +447,7 @@ static int ax_close(struct net_device *dev) static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; + struct phy_device *phy_dev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -474,28 +470,6 @@ static void ax_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info)); } -static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!phy_dev) - return -ENODEV; - - return phy_ethtool_gset(phy_dev, cmd); -} - -static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ax_device *ax = to_ax_dev(dev); - struct phy_device *phy_dev = ax->phy_dev; - - if (!phy_dev) - return -ENODEV; - - return phy_ethtool_sset(phy_dev, cmd); -} - static u32 ax_get_msglevel(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); @@ -512,12 +486,12 @@ static void ax_set_msglevel(struct net_device *dev, u32 v) static const struct ethtool_ops ax_ethtool_ops = { .get_drvinfo = ax_get_drvinfo, - .get_settings = ax_get_settings, - .set_settings = ax_set_settings, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_msglevel = ax_get_msglevel, .set_msglevel = ax_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; #ifdef CONFIG_AX88796_93CX6 @@ -936,7 +910,8 @@ static int ax_probe(struct platform_device *pdev) iounmap(ax->map2); exit_mem2: - release_mem_region(mem2->start, mem2_size); + if (mem2) + release_mem_region(mem2->start, mem2_size); exit_mem1: iounmap(ei_local->mem); diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 3d2245fdc..38eaea18d 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -310,7 +310,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, static void bfin_mac_adjust_link(struct net_device *dev) { struct bfin_mac_local *lp = netdev_priv(dev); - struct phy_device *phydev = lp->phydev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int new_state = 0; @@ -430,7 +430,6 @@ static int mii_probe(struct net_device *dev, int phy_mode) lp->old_link = 0; lp->old_speed = 0; lp->old_duplex = -1; - lp->phydev = phydev; phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n", MDC_CLK, mdc_div, sclk / 1000000); @@ -450,31 +449,6 @@ static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int -bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - if (lp->phydev) - return phy_ethtool_gset(lp->phydev, cmd); - - return -EINVAL; -} - -static int -bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct bfin_mac_local *lp = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (lp->phydev) - return phy_ethtool_sset(lp->phydev, cmd); - - return -EINVAL; -} - static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -552,8 +526,6 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev, #endif static const struct ethtool_ops bfin_mac_ethtool_ops = { - .get_settings = bfin_mac_ethtool_getsettings, - .set_settings = bfin_mac_ethtool_setsettings, .get_link = ethtool_op_get_link, .get_drvinfo = bfin_mac_ethtool_getdrvinfo, .get_wol = bfin_mac_ethtool_getwol, @@ -561,6 +533,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = { #ifdef CONFIG_BFIN_MAC_USE_HWSTAMP .get_ts_info = bfin_mac_ethtool_get_ts_info, #endif + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /**************************************************************************/ @@ -1427,7 +1401,7 @@ static void bfin_mac_timeout(struct net_device *dev) if (netif_queue_stopped(dev)) netif_wake_queue(dev); - bfin_mac_enable(lp->phydev); + bfin_mac_enable(dev->phydev); /* We can accept TX packets again */ netif_trans_update(dev); /* prevent tx timeout */ @@ -1491,8 +1465,6 @@ static void bfin_mac_set_multicast_list(struct net_device *dev) static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { - struct bfin_mac_local *lp = netdev_priv(netdev); - if (!netif_running(netdev)) return -EINVAL; @@ -1502,8 +1474,8 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGHWTSTAMP: return bfin_mac_hwtstamp_get(netdev, ifr); default: - if (lp->phydev) - return phy_mii_ioctl(lp->phydev, ifr, cmd); + if (netdev->phydev) + return phy_mii_ioctl(netdev->phydev, ifr, cmd); else return -EOPNOTSUPP; } @@ -1547,12 +1519,12 @@ static int bfin_mac_open(struct net_device *dev) if (ret) return ret; - phy_start(lp->phydev); + phy_start(dev->phydev); setup_system_regs(dev); setup_mac_addr(dev->dev_addr); bfin_mac_disable(); - ret = bfin_mac_enable(lp->phydev); + ret = bfin_mac_enable(dev->phydev); if (ret) return ret; pr_debug("hardware init finished\n"); @@ -1578,8 +1550,8 @@ static int bfin_mac_close(struct net_device *dev) napi_disable(&lp->napi); netif_carrier_off(dev); - phy_stop(lp->phydev); - phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); + phy_stop(dev->phydev); + phy_write(dev->phydev, MII_BMCR, BMCR_PDOWN); /* clear everything */ bfin_mac_shutdown(dev); diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h index d1217db70..8c3b56198 100644 --- a/drivers/net/ethernet/adi/bfin_mac.h +++ b/drivers/net/ethernet/adi/bfin_mac.h @@ -92,7 +92,6 @@ struct bfin_mac_local { int old_speed; int old_duplex; - struct phy_device *phydev; struct mii_bus *mii_bus; #if defined(CONFIG_BFIN_MAC_USE_HWSTAMP) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 821d86c38..c83ebae73 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -440,7 +440,6 @@ struct et131x_adapter { struct net_device *netdev; struct pci_dev *pdev; struct mii_bus *mii_bus; - struct phy_device *phydev; struct napi_struct napi; /* Flags that indicate current state of the adapter */ @@ -864,7 +863,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter) { int32_t delay = 0; struct mac_regs __iomem *mac = &adapter->regs->mac; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; u32 cfg1; u32 cfg2; u32 ifctrl; @@ -1035,7 +1034,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) static void et1310_config_rxmac_regs(struct et131x_adapter *adapter) { struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; u32 sa_lo; u32 sa_hi = 0; u32 pf_ctrl = 0; @@ -1230,7 +1229,7 @@ out: static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; if (!phydev) return -EIO; @@ -1311,7 +1310,7 @@ static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter, static void et1310_config_flow_control(struct et131x_adapter *adapter) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; if (phydev->duplex == DUPLEX_HALF) { adapter->flow = FLOW_NONE; @@ -1456,7 +1455,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) { u16 data; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; et131x_mii_read(adapter, MII_BMCR, &data); data &= ~BMCR_PDOWN; @@ -1469,7 +1468,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) static void et131x_xcvr_init(struct et131x_adapter *adapter) { u16 lcr2; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; /* Set the LED behavior such that LED 1 indicates speed (off = * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates @@ -2111,7 +2110,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter) /* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */ static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) { - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; /* For version B silicon, we do not use the RxDMA timer for 10 and 100 * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. @@ -2426,7 +2425,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) struct sk_buff *skb = tcb->skb; u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; dma_addr_t dma_addr; struct tx_ring *tx_ring = &adapter->tx_ring; @@ -2791,22 +2790,6 @@ static void et131x_handle_send_pkts(struct et131x_adapter *adapter) spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); } -static int et131x_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct et131x_adapter *adapter = netdev_priv(netdev); - - return phy_ethtool_gset(adapter->phydev, cmd); -} - -static int et131x_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct et131x_adapter *adapter = netdev_priv(netdev); - - return phy_ethtool_sset(adapter->phydev, cmd); -} - static int et131x_get_regs_len(struct net_device *netdev) { #define ET131X_REGS_LEN 256 @@ -2979,12 +2962,12 @@ static void et131x_get_drvinfo(struct net_device *netdev, } static struct ethtool_ops et131x_ethtool_ops = { - .get_settings = et131x_get_settings, - .set_settings = et131x_set_settings, .get_drvinfo = et131x_get_drvinfo, .get_regs_len = et131x_get_regs_len, .get_regs = et131x_get_regs, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* et131x_hwaddr_init - set up the MAC Address */ @@ -3098,7 +3081,7 @@ err_out: static void et131x_error_timer_handler(unsigned long data) { struct et131x_adapter *adapter = (struct et131x_adapter *)data; - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = adapter->netdev->phydev; if (et1310_in_phy_coma(adapter)) { /* Bring the device immediately out of coma, to @@ -3168,7 +3151,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) static void et131x_adjust_link(struct net_device *netdev) { struct et131x_adapter *adapter = netdev_priv(netdev); - struct phy_device *phydev = adapter->phydev; + struct phy_device *phydev = netdev->phydev; if (!phydev) return; @@ -3287,7 +3270,6 @@ static int et131x_mii_probe(struct net_device *netdev) phydev->advertising = phydev->supported; phydev->autoneg = AUTONEG_ENABLE; - adapter->phydev = phydev; phy_attached_info(phydev); @@ -3323,7 +3305,7 @@ static void et131x_pci_remove(struct pci_dev *pdev) unregister_netdev(netdev); netif_napi_del(&adapter->napi); - phy_disconnect(adapter->phydev); + phy_disconnect(netdev->phydev); mdiobus_unregister(adapter->mii_bus); mdiobus_free(adapter->mii_bus); @@ -3338,20 +3320,16 @@ static void et131x_pci_remove(struct pci_dev *pdev) static void et131x_up(struct net_device *netdev) { - struct et131x_adapter *adapter = netdev_priv(netdev); - et131x_enable_txrx(netdev); - phy_start(adapter->phydev); + phy_start(netdev->phydev); } static void et131x_down(struct net_device *netdev) { - struct et131x_adapter *adapter = netdev_priv(netdev); - /* Save the timestamp for the TX watchdog, prevent a timeout */ netif_trans_update(netdev); - phy_stop(adapter->phydev); + phy_stop(netdev->phydev); et131x_disable_txrx(netdev); } @@ -3684,12 +3662,10 @@ static int et131x_close(struct net_device *netdev) static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) { - struct et131x_adapter *adapter = netdev_priv(netdev); - - if (!adapter->phydev) + if (!netdev->phydev) return -EINVAL; - return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); + return phy_mii_ioctl(netdev->phydev, reqbuf, cmd); } /* et131x_set_packet_filter - Configures the Rx Packet filtering */ @@ -4073,7 +4049,7 @@ out: return rc; err_phy_disconnect: - phy_disconnect(adapter->phydev); + phy_disconnect(netdev->phydev); err_mdio_unregister: mdiobus_unregister(adapter->mii_bus); err_mdio_free: diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index de2c4bf5f..6ffdff68b 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -77,7 +77,6 @@ struct emac_board_info { int emacrx_completed_flag; - struct phy_device *phy_dev; struct device_node *phy_node; unsigned int link; unsigned int speed; @@ -115,7 +114,7 @@ static void emac_update_duplex(struct net_device *dev) static void emac_handle_link_change(struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); - struct phy_device *phydev = db->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -154,21 +153,22 @@ static void emac_handle_link_change(struct net_device *dev) static int emac_mdio_probe(struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); + struct phy_device *phydev; /* to-do: PHY interrupts are currently not supported */ /* attach the mac to the phy */ - db->phy_dev = of_phy_connect(db->ndev, db->phy_node, - &emac_handle_link_change, 0, - db->phy_interface); - if (!db->phy_dev) { + phydev = of_phy_connect(db->ndev, db->phy_node, + &emac_handle_link_change, 0, + db->phy_interface); + if (!phydev) { netdev_err(db->ndev, "could not find the PHY\n"); return -ENODEV; } /* mask with MAC supported features */ - db->phy_dev->supported &= PHY_BASIC_FEATURES; - db->phy_dev->advertising = db->phy_dev->supported; + phydev->supported &= PHY_BASIC_FEATURES; + phydev->advertising = phydev->supported; db->link = 0; db->speed = 0; @@ -179,10 +179,7 @@ static int emac_mdio_probe(struct net_device *dev) static void emac_mdio_remove(struct net_device *dev) { - struct emac_board_info *db = netdev_priv(dev); - - phy_disconnect(db->phy_dev); - db->phy_dev = NULL; + phy_disconnect(dev->phydev); } static void emac_reset(struct emac_board_info *db) @@ -208,8 +205,7 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count) static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct emac_board_info *dm = netdev_priv(dev); - struct phy_device *phydev = dm->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -229,33 +225,11 @@ static void emac_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info)); } -static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct emac_board_info *dm = netdev_priv(dev); - struct phy_device *phydev = dm->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct emac_board_info *dm = netdev_priv(dev); - struct phy_device *phydev = dm->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static const struct ethtool_ops emac_ethtool_ops = { .get_drvinfo = emac_get_drvinfo, - .get_settings = emac_get_settings, - .set_settings = emac_set_settings, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static unsigned int emac_setup(struct net_device *ndev) @@ -744,7 +718,7 @@ static int emac_open(struct net_device *dev) return ret; } - phy_start(db->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); return 0; @@ -781,7 +755,7 @@ static int emac_stop(struct net_device *ndev) netif_stop_queue(ndev); netif_carrier_off(ndev); - phy_stop(db->phy_dev); + phy_stop(ndev->phydev); emac_mdio_remove(ndev); diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 103c30ddd..e0052003d 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -473,7 +473,6 @@ struct altera_tse_private { int phy_addr; /* PHY's MDIO address, -1 for autodetection */ phy_interface_t phy_iface; struct mii_bus *mdio; - struct phy_device *phydev; int oldspeed; int oldduplex; int oldlink; diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index be72e1e64..7c367713c 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -233,40 +233,18 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, buf[i] = csrrd32(priv->mac_dev, i * 4); } -static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct altera_tse_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; - - if (phydev == NULL) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct altera_tse_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; - - if (phydev == NULL) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static const struct ethtool_ops tse_ethtool_ops = { .get_drvinfo = tse_get_drvinfo, .get_regs_len = tse_reglen, .get_regs = tse_get_regs, .get_link = ethtool_op_get_link, - .get_settings = tse_get_settings, - .set_settings = tse_set_settings, .get_strings = tse_gstrings, .get_sset_count = tse_sset_count, .get_ethtool_stats = tse_fill_stats, .get_msglevel = tse_get_msglevel, .set_msglevel = tse_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; void altera_tse_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index f749e4d38..bda31f308 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -625,7 +625,7 @@ out: static void altera_tse_adjust_link(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int new_state = 0; /* only change config if there is a link */ @@ -815,6 +815,7 @@ static int init_phy(struct net_device *dev) phydev = of_phy_connect(dev, phynode, &altera_tse_adjust_link, 0, priv->phy_iface); } + of_node_put(phynode); if (!phydev) { netdev_err(dev, "Could not find the PHY\n"); @@ -845,7 +846,6 @@ static int init_phy(struct net_device *dev) netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n", phydev->mdio.addr, phydev->phy_id, phydev->link); - priv->phydev = phydev; return 0; } @@ -1172,8 +1172,8 @@ static int tse_open(struct net_device *dev) spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - if (priv->phydev) - phy_start(priv->phydev); + if (dev->phydev) + phy_start(dev->phydev); napi_enable(&priv->napi); netif_start_queue(dev); @@ -1205,8 +1205,8 @@ static int tse_shutdown(struct net_device *dev) unsigned long int flags; /* Stop the PHY */ - if (priv->phydev) - phy_stop(priv->phydev); + if (dev->phydev) + phy_stop(dev->phydev); netif_stop_queue(dev); napi_disable(&priv->napi); @@ -1545,10 +1545,9 @@ err_free_netdev: static int altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); - struct altera_tse_private *priv = netdev_priv(ndev); - if (priv->phydev) - phy_disconnect(priv->phydev); + if (ndev->phydev) + phy_disconnect(ndev->phydev); platform_set_drvdata(pdev, NULL); altera_tse_mdio_destroy(ndev); diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 20760e102..df664187c 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -412,13 +412,13 @@ static void au1000_adjust_link(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); - struct phy_device *phydev = aup->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; u32 reg; int status_change = 0; - BUG_ON(!aup->phy_dev); + BUG_ON(!phydev); spin_lock_irqsave(&aup->lock, flags); @@ -579,7 +579,6 @@ static int au1000_mii_probe(struct net_device *dev) aup->old_link = 0; aup->old_speed = 0; aup->old_duplex = -1; - aup->phy_dev = phydev; phy_attached_info(phydev); @@ -678,29 +677,6 @@ au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) * ethtool operations */ -static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (aup->phy_dev) - return phy_ethtool_gset(aup->phy_dev, cmd); - - return -EINVAL; -} - -static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct au1000_private *aup = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (aup->phy_dev) - return phy_ethtool_sset(aup->phy_dev, cmd); - - return -EINVAL; -} - static void au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -725,12 +701,12 @@ static u32 au1000_get_msglevel(struct net_device *dev) } static const struct ethtool_ops au1000_ethtool_ops = { - .get_settings = au1000_get_settings, - .set_settings = au1000_set_settings, .get_drvinfo = au1000_get_drvinfo, .get_link = ethtool_op_get_link, .get_msglevel = au1000_get_msglevel, .set_msglevel = au1000_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; @@ -778,8 +754,8 @@ static int au1000_init(struct net_device *dev) #ifndef CONFIG_CPU_LITTLE_ENDIAN control |= MAC_BIG_ENDIAN; #endif - if (aup->phy_dev) { - if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex)) + if (dev->phydev) { + if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex)) control |= MAC_FULL_DUPLEX; else control |= MAC_DISABLE_RX_OWN; @@ -891,11 +867,10 @@ static int au1000_rx(struct net_device *dev) static void au1000_update_tx_stats(struct net_device *dev, u32 status) { - struct au1000_private *aup = netdev_priv(dev); struct net_device_stats *ps = &dev->stats; if (status & TX_FRAME_ABORTED) { - if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) { + if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) { if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) { /* any other tx errors are only valid * in half duplex mode @@ -975,10 +950,10 @@ static int au1000_open(struct net_device *dev) return retval; } - if (aup->phy_dev) { + if (dev->phydev) { /* cause the PHY state machine to schedule a link state check */ - aup->phy_dev->state = PHY_CHANGELINK; - phy_start(aup->phy_dev); + dev->phydev->state = PHY_CHANGELINK; + phy_start(dev->phydev); } netif_start_queue(dev); @@ -995,8 +970,8 @@ static int au1000_close(struct net_device *dev) netif_dbg(aup, drv, dev, "close: dev=%p\n", dev); - if (aup->phy_dev) - phy_stop(aup->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&aup->lock, flags); @@ -1110,15 +1085,13 @@ static void au1000_multicast_list(struct net_device *dev) static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct au1000_private *aup = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!aup->phy_dev) + if (!dev->phydev) return -EINVAL; /* PHY not controllable */ - return phy_mii_ioctl(aup->phy_dev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static const struct net_device_ops au1000_netdev_ops = { diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h index ca53024f0..4c47c2377 100644 --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -106,7 +106,6 @@ struct au1000_private { int old_speed; int old_duplex; - struct phy_device *phy_dev; struct mii_bus *mii_bus; /* PHY configuration */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ebf9224b2..a9b270956 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -154,7 +154,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata) goto err_rx_ring; for (i = 0, channel = channel_mem; i < count; i++, channel++) { - snprintf(channel->name, sizeof(channel->name), "channel-%d", i); + snprintf(channel->name, sizeof(channel->name), "channel-%u", i); channel->pdata = pdata; channel->queue_index = i; channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE + diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 19e38afbc..300e3b5c5 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig @@ -3,6 +3,7 @@ config NET_XGENE depends on HAS_DMA depends on ARCH_XGENE || COMPILE_TEST select PHYLIB + select MDIO_XGENE help This is the Ethernet driver for the on-chip ethernet interface on the APM X-Gene SoC. diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index 416d6ebfc..22a7b26ca 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -65,8 +65,15 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) return phy_ethtool_gset(phydev, cmd); } else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { - cmd->supported = SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | SUPPORTED_MII; + if (pdata->mdio_driver) { + if (!phydev) + return -ENODEV; + + return phy_ethtool_gset(phydev, cmd); + } + + cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | + SUPPORTED_MII; cmd->advertising = cmd->supported; ethtool_cmd_speed_set(cmd, SPEED_1000); cmd->duplex = DUPLEX_FULL; @@ -92,12 +99,21 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) struct phy_device *phydev = pdata->phy_dev; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { - if (phydev == NULL) + if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, cmd); } + if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { + if (pdata->mdio_driver) { + if (!phydev) + return -ENODEV; + + return phy_ethtool_sset(phydev, cmd); + } + } + return -EINVAL; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 2f5638f7f..18bb9556d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -381,59 +381,6 @@ static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata, rd_addr); } -static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id, - u32 reg, u16 data) -{ - u32 addr = 0, wr_data = 0; - u32 done; - u8 wait = 10; - - PHY_ADDR_SET(&addr, phy_id); - REG_ADDR_SET(&addr, reg); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); - - PHY_CONTROL_SET(&wr_data, data); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data); - do { - usleep_range(5, 10); - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); - } while ((done & BUSY_MASK) && wait--); - - if (done & BUSY_MASK) { - netdev_err(pdata->ndev, "MII_MGMT write failed\n"); - return -EBUSY; - } - - return 0; -} - -static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata, - u8 phy_id, u32 reg) -{ - u32 addr = 0; - u32 data, done; - u8 wait = 10; - - PHY_ADDR_SET(&addr, phy_id); - REG_ADDR_SET(&addr, reg); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); - do { - usleep_range(5, 10); - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done); - } while ((done & BUSY_MASK) && wait--); - - if (done & BUSY_MASK) { - netdev_err(pdata->ndev, "MII_MGMT read failed\n"); - return -EBUSY; - } - - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0); - - return data; -} - static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) { u32 addr0, addr1; @@ -512,14 +459,11 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) #endif } -static void xgene_gmac_init(struct xgene_enet_pdata *pdata) +static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; - u32 value, mc2; - u32 intf_ctl, rgmii; - u32 icm0, icm2; - - xgene_gmac_reset(pdata); + u32 icm0, icm2, mc2; + u32 intf_ctl, rgmii, value; xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); @@ -564,7 +508,21 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata) mc2 |= FULL_DUPLEX2 | PAD_CRC; xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2); xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); + xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); + xgene_enet_configure_clock(pdata); + + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); + xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); +} + +static void xgene_gmac_init(struct xgene_enet_pdata *pdata) +{ + u32 value; + if (!pdata->mdio_driver) + xgene_gmac_reset(pdata); + + xgene_gmac_set_speed(pdata); xgene_gmac_set_mac_addr(pdata); /* Adjust MDC clock frequency */ @@ -579,15 +537,10 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata) /* Rtype should be copied from FP */ xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); - xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); - xgene_enet_configure_clock(pdata); /* Rx-Tx traffic resume */ xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); - xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); - xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); - xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); value &= ~TX_DV_GATE_EN0; value &= ~RX_DV_GATE_EN0; @@ -671,93 +624,155 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { - u32 val; + struct device *dev = &pdata->pdev->dev; if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!IS_ERR(pdata->clk)) { + if (pdata->mdio_driver) { + xgene_enet_config_ring_if_assoc(pdata); + return 0; + } + + if (dev->of_node) { clk_prepare_enable(pdata->clk); + udelay(5); clk_disable_unprepare(pdata->clk); + udelay(5); clk_prepare_enable(pdata->clk); - xgene_enet_ecc_init(pdata); + udelay(5); + } else { +#ifdef CONFIG_ACPI + if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), + "_INI")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_INI", NULL, NULL); + } +#endif } - xgene_enet_config_ring_if_assoc(pdata); - /* Enable auto-incr for scanning */ - xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val); - val |= SCAN_AUTO_INCR; - MGMT_CLOCK_SEL_SET(&val, 1); - xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); + xgene_enet_ecc_init(pdata); + xgene_enet_config_ring_if_assoc(pdata); return 0; } -static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) +static void xgene_enet_clear(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring) { - if (!IS_ERR(pdata->clk)) - clk_disable_unprepare(pdata->clk); -} + u32 addr, val, data; -static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -{ - struct xgene_enet_pdata *pdata = bus->priv; - u32 val; + val = xgene_enet_ring_bufnum(ring->id); - val = xgene_mii_phy_read(pdata, mii_id, regnum); - netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n", - mii_id, regnum, val); + if (xgene_enet_is_bufpool(ring->id)) { + addr = ENET_CFGSSQMIFPRESET_ADDR; + data = BIT(val - 0x20); + } else { + addr = ENET_CFGSSQMIWQRESET_ADDR; + data = BIT(val); + } - return val; + xgene_enet_wr_ring_if(pdata, addr, data); } -static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, - u16 val) +static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) { - struct xgene_enet_pdata *pdata = bus->priv; + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + u32 pb, val; + int i; + + pb = 0; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]->buf_pool; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val - 0x20); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); + + pb = 0; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); - netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n", - mii_id, regnum, val); - return xgene_mii_phy_write(pdata, mii_id, regnum, val); + if (dev->of_node) { + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + } } static void xgene_enet_adjust_link(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); + const struct xgene_mac_ops *mac_ops = pdata->mac_ops; struct phy_device *phydev = pdata->phy_dev; if (phydev->link) { if (pdata->phy_speed != phydev->speed) { pdata->phy_speed = phydev->speed; - xgene_gmac_init(pdata); - xgene_gmac_rx_enable(pdata); - xgene_gmac_tx_enable(pdata); + mac_ops->set_speed(pdata); + mac_ops->rx_enable(pdata); + mac_ops->tx_enable(pdata); phy_print_status(phydev); } } else { - xgene_gmac_rx_disable(pdata); - xgene_gmac_tx_disable(pdata); + mac_ops->rx_disable(pdata); + mac_ops->tx_disable(pdata); pdata->phy_speed = SPEED_UNKNOWN; phy_print_status(phydev); } } -static int xgene_enet_phy_connect(struct net_device *ndev) +#ifdef CONFIG_ACPI +static struct acpi_device *acpi_phy_find_device(struct device *dev) +{ + struct acpi_reference_args args; + struct fwnode_handle *fw_node; + int status; + + fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev)); + status = acpi_node_get_property_reference(fw_node, "phy-handle", 0, + &args); + if (ACPI_FAILURE(status)) { + dev_dbg(dev, "No matching phy in ACPI table\n"); + return NULL; + } + + return args.adev; +} +#endif + +int xgene_enet_phy_connect(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct device_node *phy_np; + struct device_node *np; struct phy_device *phy_dev; struct device *dev = &pdata->pdev->dev; + int i; if (dev->of_node) { - phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0); - if (!phy_np) { + for (i = 0 ; i < 2; i++) { + np = of_parse_phandle(dev->of_node, "phy-handle", i); + if (np) + break; + } + + if (!np) { netdev_dbg(ndev, "No phy-handle found in DT\n"); return -ENODEV; } - phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link, + phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, 0, pdata->phy_mode); + of_node_put(np); if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; @@ -765,6 +780,11 @@ static int xgene_enet_phy_connect(struct net_device *ndev) pdata->phy_dev = phy_dev; } else { +#ifdef CONFIG_ACPI + struct acpi_device *adev = acpi_phy_find_device(dev); + if (adev) + pdata->phy_dev = adev->driver_data; + phy_dev = pdata->phy_dev; if (!phy_dev || @@ -773,6 +793,9 @@ static int xgene_enet_phy_connect(struct net_device *ndev) netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } +#else + return -ENODEV; +#endif } pdata->phy_speed = SPEED_UNKNOWN; @@ -792,8 +815,8 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, struct phy_device *phy; struct device_node *child_np; struct device_node *mdio_np = NULL; + u32 phy_addr; int ret; - u32 phy_id; if (dev->of_node) { for_each_child_of_node(dev->of_node, child_np) { @@ -820,21 +843,17 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, if (ret) return ret; - ret = device_property_read_u32(dev, "phy-channel", &phy_id); + ret = device_property_read_u32(dev, "phy-channel", &phy_addr); if (ret) - ret = device_property_read_u32(dev, "phy-addr", &phy_id); + ret = device_property_read_u32(dev, "phy-addr", &phy_addr); if (ret) return -EINVAL; - phy = get_phy_device(mdio, phy_id, false); - if (IS_ERR(phy)) + phy = xgene_enet_phy_register(mdio, phy_addr); + if (!phy) return -EIO; - ret = phy_device_register(phy); - if (ret) - phy_device_free(phy); - else - pdata->phy_dev = phy; + pdata->phy_dev = phy; return ret; } @@ -850,13 +869,13 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) return -ENOMEM; mdio_bus->name = "APM X-Gene MDIO bus"; - mdio_bus->read = xgene_enet_mdio_read; - mdio_bus->write = xgene_enet_mdio_write; + mdio_bus->read = xgene_mdio_rgmii_read; + mdio_bus->write = xgene_mdio_rgmii_write; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", ndev->name); - mdio_bus->priv = pdata; - mdio_bus->parent = &ndev->dev; + mdio_bus->priv = (void __force *)pdata->mcx_mac_addr; + mdio_bus->parent = &pdata->pdev->dev; ret = xgene_mdiobus_register(pdata, mdio_bus); if (ret) { @@ -873,6 +892,12 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) return ret; } +void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata) +{ + if (pdata->phy_dev) + phy_disconnect(pdata->phy_dev); +} + void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) { if (pdata->phy_dev) @@ -890,11 +915,13 @@ const struct xgene_mac_ops xgene_gmac_ops = { .tx_enable = xgene_gmac_tx_enable, .rx_disable = xgene_gmac_rx_disable, .tx_disable = xgene_gmac_tx_disable, + .set_speed = xgene_gmac_set_speed, .set_mac_addr = xgene_gmac_set_mac_addr, }; const struct xgene_port_ops xgene_gport_ops = { .reset = xgene_enet_reset, + .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_gport_shutdown, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 45220be31..179a44dce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -104,6 +104,8 @@ enum xgene_enet_rm { #define RECOMBBUF BIT(27) #define MAC_OFFSET 0x30 +#define OFFSET_4 0x04 +#define OFFSET_8 0x08 #define BLOCK_ETH_CSR_OFFSET 0x2000 #define BLOCK_ETH_CLE_CSR_OFFSET 0x6000 @@ -165,6 +167,8 @@ enum xgene_enet_rm { #define TX_DV_GATE_EN0 BIT(2) #define RX_DV_GATE_EN0 BIT(1) #define RESUME_RX0 BIT(0) +#define ENET_CFGSSQMIFPRESET_ADDR 0x14 +#define ENET_CFGSSQMIWQRESET_ADDR 0x1c #define ENET_CFGSSQMIWQASSOC_ADDR 0xe0 #define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc #define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0 @@ -297,11 +301,6 @@ enum xgene_enet_ring_bufnum { RING_BUFNUM_INVALID }; -enum xgene_enet_cmd { - XGENE_ENET_WR_CMD = BIT(31), - XGENE_ENET_RD_CMD = BIT(30) -}; - enum xgene_enet_err_code { HBF_READ_DATA = 3, HBF_LL_READ = 4, @@ -347,6 +346,8 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); bool xgene_ring_mgr_init(struct xgene_enet_pdata *p); +int xgene_enet_phy_connect(struct net_device *ndev); +void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata); extern const struct xgene_mac_ops xgene_gmac_ops; extern const struct xgene_port_ops xgene_gport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d208b172f..d1d6b5eeb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -102,25 +102,13 @@ static u8 xgene_enet_hdr_len(const void *data) static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool) { - struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev); - struct xgene_enet_raw_desc16 *raw_desc; - u32 slots = buf_pool->slots - 1; - u32 tail = buf_pool->tail; - u32 userinfo; - int i, len; - - len = pdata->ring_ops->len(buf_pool); - for (i = 0; i < len; i++) { - tail = (tail - 1) & slots; - raw_desc = &buf_pool->raw_desc16[tail]; + int i; - /* Hardware stores descriptor in little endian format */ - userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); - dev_kfree_skb_any(buf_pool->rx_skb[userinfo]); + /* Free up the buffers held by hardware */ + for (i = 0; i < buf_pool->slots; i++) { + if (buf_pool->rx_skb[i]) + dev_kfree_skb_any(buf_pool->rx_skb[i]); } - - pdata->ring_ops->wr_cmd(buf_pool, -len); - buf_pool->tail = tail; } static irqreturn_t xgene_enet_rx_irq(const int irq, void *data) @@ -481,6 +469,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE); skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0)); skb = buf_pool->rx_skb[skb_index]; + buf_pool->rx_skb[skb_index] = NULL; /* checking for error */ status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || @@ -619,6 +608,30 @@ static void xgene_enet_timeout(struct net_device *ndev) } } +static void xgene_enet_set_irq_name(struct net_device *ndev) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct xgene_enet_desc_ring *ring; + int i; + + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (!pdata->cq_cnt) { + snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d", + ndev->name, i); + } + } + + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; + snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d", + ndev->name, i); + } +} + static int xgene_enet_register_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); @@ -626,6 +639,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) struct xgene_enet_desc_ring *ring; int ret = 0, i; + xgene_enet_set_irq_name(ndev); for (i = 0; i < pdata->rxq_cnt; i++) { ring = pdata->rx_ring[i]; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); @@ -720,20 +734,21 @@ static int xgene_enet_open(struct net_device *ndev) if (ret) return ret; - mac_ops->tx_enable(pdata); - mac_ops->rx_enable(pdata); - xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); if (ret) return ret; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->phy_dev) { phy_start(pdata->phy_dev); - else + } else { schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF); + netif_carrier_off(ndev); + } - netif_start_queue(ndev); + mac_ops->tx_enable(pdata); + mac_ops->rx_enable(pdata); + netif_tx_start_all_queues(ndev); return ret; } @@ -744,16 +759,15 @@ static int xgene_enet_close(struct net_device *ndev) const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int i; - netif_stop_queue(ndev); + netif_tx_stop_all_queues(ndev); + mac_ops->tx_disable(pdata); + mac_ops->rx_disable(pdata); - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->phy_dev) phy_stop(pdata->phy_dev); else cancel_delayed_work_sync(&pdata->link_work); - mac_ops->tx_disable(pdata); - mac_ops->rx_disable(pdata); - xgene_enet_free_irq(ndev); xgene_enet_napi_disable(pdata); for (i = 0; i < pdata->rxq_cnt; i++) @@ -761,7 +775,6 @@ static int xgene_enet_close(struct net_device *ndev) return 0; } - static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata; @@ -771,7 +784,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) dev = ndev_to_dev(ring->ndev); pdata->ring_ops->clear(ring); - dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); } static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) @@ -784,6 +797,9 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) ring = pdata->tx_ring[i]; if (ring) { xgene_enet_delete_ring(ring); + pdata->port_ops->clear(pdata, ring); + if (pdata->cq_cnt) + xgene_enet_delete_ring(ring->cp_ring); pdata->tx_ring[i] = NULL; } } @@ -794,6 +810,7 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) buf_pool = ring->buf_pool; xgene_enet_delete_bufpool(buf_pool); xgene_enet_delete_ring(buf_pool); + pdata->port_ops->clear(pdata, buf_pool); xgene_enet_delete_ring(ring); pdata->rx_ring[i] = NULL; } @@ -842,7 +859,7 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring) if (ring->desc_addr) { pdata->ring_ops->clear(ring); - dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); + dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma); } devm_kfree(dev, ring); } @@ -900,9 +917,10 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( struct net_device *ndev, u32 ring_num, enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id) { - struct xgene_enet_desc_ring *ring; struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); + struct xgene_enet_desc_ring *ring; + void *irq_mbox_addr; int size; size = xgene_enet_get_ring_size(dev, cfgsize); @@ -919,8 +937,8 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( ring->cfgsize = cfgsize; ring->id = ring_id; - ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma, - GFP_KERNEL); + ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma, + GFP_KERNEL | __GFP_ZERO); if (!ring->desc_addr) { devm_kfree(dev, ring); return NULL; @@ -928,14 +946,16 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( ring->size = size; if (is_irq_mbox_required(pdata, ring)) { - ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE, - &ring->irq_mbox_dma, GFP_KERNEL); - if (!ring->irq_mbox_addr) { - dma_free_coherent(dev, size, ring->desc_addr, - ring->dma); + irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE, + &ring->irq_mbox_dma, + GFP_KERNEL | __GFP_ZERO); + if (!irq_mbox_addr) { + dmam_free_coherent(dev, size, ring->desc_addr, + ring->dma); devm_kfree(dev, ring); return NULL; } + ring->irq_mbox_addr = irq_mbox_addr; } ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring); @@ -996,6 +1016,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; + __le64 *exp_bufs; u16 ring_id; int i, ret, size; @@ -1027,13 +1048,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) rx_ring->nbufpool = NUM_BUFPOOL; rx_ring->buf_pool = buf_pool; rx_ring->irq = pdata->irqs[i]; - if (!pdata->cq_cnt) { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", - ndev->name); - } else { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d", - ndev->name, i); - } buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, sizeof(struct sk_buff *), GFP_KERNEL); @@ -1060,13 +1074,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) } size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; - tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, - &dma_exp_bufs, - GFP_KERNEL); - if (!tx_ring->exp_bufs) { + exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs, + GFP_KERNEL | __GFP_ZERO); + if (!exp_bufs) { ret = -ENOMEM; goto err; } + tx_ring->exp_bufs = exp_bufs; pdata->tx_ring[i] = tx_ring; @@ -1086,8 +1100,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; cp_ring->index = i; - snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d", - ndev->name, i); } cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, @@ -1283,6 +1295,23 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) return 0; } +static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata) +{ + int ret; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) + return 0; + + if (!IS_ENABLED(CONFIG_MDIO_XGENE)) + return 0; + + ret = xgene_enet_phy_connect(pdata->ndev); + if (!ret) + pdata->mdio_driver = true; + + return 0; +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1368,6 +1397,10 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; + ret = xgene_enet_check_phy_handle(pdata); + if (ret) + return ret; + pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { /* Firmware may have set up the clock already. */ @@ -1447,6 +1480,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); } + pdata->phy_speed = SPEED_UNKNOWN; pdata->mac_ops->init(pdata); return ret; @@ -1556,28 +1590,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) } } -static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) -{ - struct napi_struct *napi; - int i; - - for (i = 0; i < pdata->rxq_cnt; i++) { - napi = &pdata->rx_ring[i]->napi; - netif_napi_del(napi); - } - - for (i = 0; i < pdata->cq_cnt; i++) { - napi = &pdata->tx_ring[i]->cp_ring->napi; - netif_napi_del(napi); - } -} - static int xgene_enet_probe(struct platform_device *pdev) { struct net_device *ndev; struct xgene_enet_pdata *pdata; struct device *dev = &pdev->dev; - const struct xgene_mac_ops *mac_ops; + void (*link_state)(struct work_struct *); const struct of_device_id *of_id; int ret; @@ -1635,27 +1653,31 @@ static int xgene_enet_probe(struct platform_device *pdev) goto err; } - ret = register_netdev(ndev); - if (ret) { - netdev_err(ndev, "Failed to register netdev\n"); - goto err; - } - ret = xgene_enet_init_hw(pdata); if (ret) goto err_netdev; - mac_ops = pdata->mac_ops; - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { - ret = xgene_enet_mdio_config(pdata); - if (ret) - goto err_netdev; - } else { - INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); + link_state = pdata->mac_ops->link_state; + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + INIT_DELAYED_WORK(&pdata->link_work, link_state); + } else if (!pdata->mdio_driver) { + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + ret = xgene_enet_mdio_config(pdata); + else + INIT_DELAYED_WORK(&pdata->link_work, link_state); } + if (ret) + goto err; xgene_enet_napi_add(pdata); + ret = register_netdev(ndev); + if (ret) { + netdev_err(ndev, "Failed to register netdev\n"); + goto err; + } + return 0; + err_netdev: unregister_netdev(ndev); err: @@ -1673,20 +1695,38 @@ static int xgene_enet_remove(struct platform_device *pdev) mac_ops = pdata->mac_ops; ndev = pdata->ndev; - mac_ops->rx_disable(pdata); - mac_ops->tx_disable(pdata); + rtnl_lock(); + if (netif_running(ndev)) + dev_close(ndev); + rtnl_unlock(); - xgene_enet_napi_del(pdata); - if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + if (pdata->mdio_driver) + xgene_enet_phy_disconnect(pdata); + else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) xgene_enet_mdio_remove(pdata); + unregister_netdev(ndev); - xgene_enet_delete_desc_rings(pdata); pdata->port_ops->shutdown(pdata); + xgene_enet_delete_desc_rings(pdata); free_netdev(ndev); return 0; } +static void xgene_enet_shutdown(struct platform_device *pdev) +{ + struct xgene_enet_pdata *pdata; + + pdata = platform_get_drvdata(pdev); + if (!pdata) + return; + + if (!pdata->ndev) + return; + + xgene_enet_remove(pdev); +} + #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", XGENE_ENET1}, @@ -1721,6 +1761,7 @@ static struct platform_driver xgene_enet_driver = { }, .probe = xgene_enet_probe, .remove = xgene_enet_remove, + .shutdown = xgene_enet_shutdown, }; module_platform_driver(xgene_enet_driver); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 092fbecca..217546e57 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -38,6 +38,7 @@ #include "xgene_enet_hw.h" #include "xgene_enet_cle.h" #include "xgene_enet_ring2.h" +#include "../../../phy/mdio-xgene.h" #define XGENE_DRV_VERSION "v1.0" #define XGENE_ENET_MAX_MTU 1536 @@ -140,6 +141,7 @@ struct xgene_mac_ops { void (*rx_enable)(struct xgene_enet_pdata *pdata); void (*tx_disable)(struct xgene_enet_pdata *pdata); void (*rx_disable)(struct xgene_enet_pdata *pdata); + void (*set_speed)(struct xgene_enet_pdata *pdata); void (*set_mac_addr)(struct xgene_enet_pdata *pdata); void (*set_mss)(struct xgene_enet_pdata *pdata); void (*link_state)(struct work_struct *work); @@ -147,6 +149,8 @@ struct xgene_mac_ops { struct xgene_port_ops { int (*reset)(struct xgene_enet_pdata *pdata); + void (*clear)(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring); void (*cle_bypass)(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id); void (*shutdown)(struct xgene_enet_pdata *pdata); @@ -211,6 +215,7 @@ struct xgene_enet_pdata { u32 mss; u8 tx_delay; u8 rx_delay; + bool mdio_driver; }; struct xgene_indirect_ctl { @@ -220,34 +225,6 @@ struct xgene_indirect_ctl { void __iomem *cmd_done; }; -/* Set the specified value into a bit-field defined by its starting position - * and length within a single u64. - */ -static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val) -{ - return (val & ((1ULL << len) - 1)) << pos; -} - -#define SET_VAL(field, val) \ - xgene_enet_set_field_value(field ## _POS, field ## _LEN, val) - -#define SET_BIT(field) \ - xgene_enet_set_field_value(field ## _POS, 1, 1) - -/* Get the value from a bit-field defined by its starting position - * and length within the specified u64. - */ -static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) -{ - return (src >> pos) & ((1ULL << len) - 1); -} - -#define GET_VAL(field, src) \ - xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) - -#define GET_BIT(field, src) \ - xgene_enet_get_field_value(field ## _POS, 1, src) - static inline struct device *ndev_to_dev(struct net_device *ndev) { return ndev->dev.parent; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c index 78475512b..d12e9cbae 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c @@ -28,6 +28,12 @@ static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val) iowrite32(val, p->eth_csr_addr + offset); } +static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset, + u32 val) +{ + iowrite32(val, p->base_addr + offset); +} + static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p, u32 offset, u32 val) { @@ -93,6 +99,11 @@ static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset) return ioread32(p->eth_diag_csr_addr + offset); } +static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset) +{ + return ioread32(p->mcx_mac_csr_addr + offset); +} + static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr) { u32 rd_data; @@ -132,9 +143,17 @@ static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr) static int xgene_enet_ecc_init(struct xgene_enet_pdata *p) { struct net_device *ndev = p->ndev; - u32 data; + u32 data, shutdown; int i = 0; + shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR); + data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); + + if (!shutdown && data == ~0U) { + netdev_dbg(ndev, "+ ecc_init done, skipping\n"); + return 0; + } + xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); do { usleep_range(100, 110); @@ -230,21 +249,105 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p) data = xgene_mii_phy_read(p, INT_PHY_ADDR, SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + if (LINK_SPEED(data) == PHY_SPEED_1000) + p->phy_speed = SPEED_1000; + else if (LINK_SPEED(data) == PHY_SPEED_100) + p->phy_speed = SPEED_100; + else + p->phy_speed = SPEED_10; + return data & LINK_UP; } -static void xgene_sgmac_init(struct xgene_enet_pdata *p) +static void xgene_sgmii_configure(struct xgene_enet_pdata *p) { - u32 data, loop = 10; - u32 offset = p->port_id * 4; - u32 enet_spare_cfg_reg, rsif_config_reg; - u32 cfg_bypass_reg, rx_dv_gate_reg; - - xgene_sgmac_reset(p); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, + 0x8000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000); + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); +} - /* Enable auto-negotiation */ - xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000); +static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p) +{ + xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, + 0x8000); xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0); +} + +static void xgene_sgmii_reset(struct xgene_enet_pdata *p) +{ + u32 value; + + if (p->phy_speed == SPEED_UNKNOWN) + return; + + value = xgene_mii_phy_read(p, INT_PHY_ADDR, + SGMII_BASE_PAGE_ABILITY_ADDR >> 2); + if (!(value & LINK_UP)) + xgene_sgmii_tbi_control_reset(p); +} + +static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p) +{ + u32 icm0_addr, icm2_addr, debug_addr; + u32 icm0, icm2, intf_ctl; + u32 mc2, value; + + xgene_sgmii_reset(p); + + if (p->enet_id == XGENE_ENET1) { + icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8; + icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4; + debug_addr = DEBUG_REG_ADDR; + } else { + icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR; + icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR; + debug_addr = XG_DEBUG_REG_ADDR; + } + + icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr); + icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr); + mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); + intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR); + + switch (p->phy_speed) { + case SPEED_10: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); + CFG_MACMODE_SET(&icm0, 0); + CFG_WAITASYNCRD_SET(&icm2, 500); + break; + case SPEED_100: + ENET_INTERFACE_MODE2_SET(&mc2, 1); + intf_ctl &= ~ENET_GHD_MODE; + intf_ctl |= ENET_LHD_MODE; + CFG_MACMODE_SET(&icm0, 1); + CFG_WAITASYNCRD_SET(&icm2, 80); + break; + default: + ENET_INTERFACE_MODE2_SET(&mc2, 2); + intf_ctl &= ~ENET_LHD_MODE; + intf_ctl |= ENET_GHD_MODE; + CFG_MACMODE_SET(&icm0, 2); + CFG_WAITASYNCRD_SET(&icm2, 16); + value = xgene_enet_rd_csr(p, debug_addr); + value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; + xgene_enet_wr_csr(p, debug_addr, value); + break; + } + + mc2 |= FULL_DUPLEX2 | PAD_CRC; + xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2); + xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl); + xgene_enet_wr_mcx_csr(p, icm0_addr, icm0); + xgene_enet_wr_mcx_csr(p, icm2_addr, icm2); +} + +static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p) +{ + u32 data, loop = 10; + + xgene_sgmii_configure(p); while (loop--) { data = xgene_mii_phy_read(p, INT_PHY_ADDR, @@ -255,17 +358,27 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) } if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS)) netdev_err(p->ndev, "Auto-negotiation failed\n"); +} + +static void xgene_sgmac_init(struct xgene_enet_pdata *p) +{ + u32 enet_spare_cfg_reg, rsif_config_reg; + u32 cfg_bypass_reg, rx_dv_gate_reg; + u32 data, offset; - data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR); - ENET_INTERFACE_MODE2_SET(&data, 2); - xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2); - xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE); + if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver)) + xgene_sgmac_reset(p); + + xgene_sgmii_enable_autoneg(p); + xgene_sgmac_set_speed(p); + xgene_sgmac_set_mac_addr(p); if (p->enet_id == XGENE_ENET1) { enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR; rsif_config_reg = RSIF_CONFIG_REG_ADDR; cfg_bypass_reg = CFG_BYPASS_ADDR; - rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR; + offset = p->port_id * OFFSET_4; + rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset; } else { enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR; rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR; @@ -277,8 +390,6 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) data |= MPA_IDLE_WITH_QMI_EMPTY; xgene_enet_wr_csr(p, enet_spare_cfg_reg, data); - xgene_sgmac_set_mac_addr(p); - /* Adjust MDC clock frequency */ data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR); MGMT_CLOCK_SEL_SET(&data, 7); @@ -292,7 +403,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p) /* Bypass traffic gating */ xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84); xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX); - xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0); + xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0); } static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set) @@ -331,17 +442,43 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p) static int xgene_enet_reset(struct xgene_enet_pdata *p) { + struct device *dev = &p->pdev->dev; + if (!xgene_ring_mgr_init(p)) return -ENODEV; - if (!IS_ERR(p->clk)) { - clk_prepare_enable(p->clk); - clk_disable_unprepare(p->clk); - clk_prepare_enable(p->clk); + if (p->mdio_driver && p->enet_id == XGENE_ENET2) { + xgene_enet_config_ring_if_assoc(p); + return 0; } - xgene_enet_ecc_init(p); - xgene_enet_config_ring_if_assoc(p); + if (p->enet_id == XGENE_ENET2) + xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN); + + if (dev->of_node) { + if (!IS_ERR(p->clk)) { + clk_prepare_enable(p->clk); + udelay(5); + clk_disable_unprepare(p->clk); + udelay(5); + clk_prepare_enable(p->clk); + udelay(5); + } + } else { +#ifdef CONFIG_ACPI + if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST")) + acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), + "_RST", NULL, NULL); + else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI")) + acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev), + "_INI", NULL, NULL); +#endif + } + + if (!p->port_id) { + xgene_enet_ecc_init(p); + xgene_enet_config_ring_if_assoc(p); + } return 0; } @@ -369,10 +506,53 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data); } +static void xgene_enet_clear(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring) +{ + u32 addr, val, data; + + val = xgene_enet_ring_bufnum(ring->id); + + if (xgene_enet_is_bufpool(ring->id)) { + addr = ENET_CFGSSQMIFPRESET_ADDR; + data = BIT(val - 0x20); + } else { + addr = ENET_CFGSSQMIWQRESET_ADDR; + data = BIT(val); + } + + xgene_enet_wr_ring_if(pdata, addr, data); +} + static void xgene_enet_shutdown(struct xgene_enet_pdata *p) { - if (!IS_ERR(p->clk)) - clk_disable_unprepare(p->clk); + struct device *dev = &p->pdev->dev; + struct xgene_enet_desc_ring *ring; + u32 pb, val; + int i; + + pb = 0; + for (i = 0; i < p->rxq_cnt; i++) { + ring = p->rx_ring[i]->buf_pool; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val - 0x20); + } + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb); + + pb = 0; + for (i = 0; i < p->txq_cnt; i++) { + ring = p->tx_ring[i]; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val); + } + xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb); + + if (dev->of_node) { + if (!IS_ERR(p->clk)) + clk_disable_unprepare(p->clk); + } } static void xgene_enet_link_state(struct work_struct *work) @@ -386,10 +566,11 @@ static void xgene_enet_link_state(struct work_struct *work) if (link) { if (!netif_carrier_ok(ndev)) { netif_carrier_on(ndev); - xgene_sgmac_init(p); + xgene_sgmac_set_speed(p); xgene_sgmac_rx_enable(p); xgene_sgmac_tx_enable(p); - netdev_info(ndev, "Link is Up - 1Gbps\n"); + netdev_info(ndev, "Link is Up - %dMbps\n", + p->phy_speed); } poll_interval = PHY_POLL_LINK_ON; } else { @@ -412,12 +593,14 @@ const struct xgene_mac_ops xgene_sgmac_ops = { .tx_enable = xgene_sgmac_tx_enable, .rx_disable = xgene_sgmac_rx_disable, .tx_disable = xgene_sgmac_tx_disable, + .set_speed = xgene_sgmac_set_speed, .set_mac_addr = xgene_sgmac_set_mac_addr, .link_state = xgene_enet_link_state }; const struct xgene_port_ops xgene_sgport_ops = { .reset = xgene_enet_reset, + .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_enet_shutdown }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h index 002df5a67..3d0ba3744 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -24,6 +24,7 @@ #define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8)) #define REG_ADDR(src) ((src) & GENMASK(4, 0)) #define PHY_CONTROL(src) ((src) & GENMASK(15, 0)) +#define LINK_SPEED(src) (((src) & GENMASK(11, 10)) >> 10) #define INT_PHY_ADDR 0x1e #define SGMII_TBI_CONTROL_ADDR 0x44 #define SGMII_CONTROL_ADDR 0x00 @@ -34,6 +35,13 @@ #define LINK_UP BIT(15) #define MPA_IDLE_WITH_QMI_EMPTY BIT(12) #define SG_RX_DV_GATE_REG_0_ADDR 0x05fc +#define SGMII_EN 0x1 + +enum xgene_phy_speed { + PHY_SPEED_10, + PHY_SPEED_100, + PHY_SPEED_1000 +}; extern const struct xgene_mac_ops xgene_sgmac_ops; extern const struct xgene_port_ops xgene_sgport_ops; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c index ba030dc19..9c6ad0dce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c @@ -258,13 +258,29 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata) static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { + struct device *dev = &pdata->pdev->dev; + if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!IS_ERR(pdata->clk)) { + if (dev->of_node) { clk_prepare_enable(pdata->clk); + udelay(5); clk_disable_unprepare(pdata->clk); + udelay(5); clk_prepare_enable(pdata->clk); + udelay(5); + } else { +#ifdef CONFIG_ACPI + if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_RST", NULL, NULL); + } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), + "_INI")) { + acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), + "_INI", NULL, NULL); + } +#endif } xgene_enet_ecc_init(pdata); @@ -292,8 +308,51 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata) { - if (!IS_ERR(pdata->clk)) - clk_disable_unprepare(pdata->clk); + struct device *dev = &pdata->pdev->dev; + struct xgene_enet_desc_ring *ring; + u32 pb, val; + int i; + + pb = 0; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]->buf_pool; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val - 0x20); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb); + + pb = 0; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + + val = xgene_enet_ring_bufnum(ring->id); + pb |= BIT(val); + } + xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb); + + if (dev->of_node) { + if (!IS_ERR(pdata->clk)) + clk_disable_unprepare(pdata->clk); + } +} + +static void xgene_enet_clear(struct xgene_enet_pdata *pdata, + struct xgene_enet_desc_ring *ring) +{ + u32 addr, val, data; + + val = xgene_enet_ring_bufnum(ring->id); + + if (xgene_enet_is_bufpool(ring->id)) { + addr = ENET_CFGSSQMIFPRESET_ADDR; + data = BIT(val - 0x20); + } else { + addr = ENET_CFGSSQMIWQRESET_ADDR; + data = BIT(val); + } + + xgene_enet_wr_ring_if(pdata, addr, data); } static void xgene_enet_link_state(struct work_struct *work) @@ -340,6 +399,7 @@ const struct xgene_mac_ops xgene_xgmac_ops = { const struct xgene_port_ops xgene_xgport_ops = { .reset = xgene_enet_reset, + .clear = xgene_enet_clear, .cle_bypass = xgene_enet_xgcle_bypass, .shutdown = xgene_enet_shutdown, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h index 0a2dca8a1..f1ea485f9 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h @@ -65,9 +65,12 @@ #define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214 #define XG_LINK_STATUS_ADDR 0x0228 #define XG_TSIF_MSS_REG0_ADDR 0x02a4 +#define XG_DEBUG_REG_ADDR 0x0400 #define XG_ENET_SPARE_CFG_REG_ADDR 0x040c #define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410 #define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804 +#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0 +#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8 extern const struct xgene_mac_ops xgene_xgmac_ops; extern const struct xgene_port_ops xgene_xgport_ops; diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index ca562bc03..e4feb712d 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -134,7 +134,6 @@ struct arc_emac_priv { /* Devices */ struct device *dev; - struct phy_device *phy_dev; struct mii_bus *bus; struct arc_emac_mdio_bus_data bus_data; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index a3a9392a4..b0da9693f 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -47,7 +47,7 @@ static inline int arc_emac_tx_avail(struct arc_emac_priv *priv) static void arc_emac_adjust_link(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); - struct phy_device *phy_dev = priv->phy_dev; + struct phy_device *phy_dev = ndev->phydev; unsigned int reg, state_changed = 0; if (priv->link != phy_dev->link) { @@ -80,46 +80,6 @@ static void arc_emac_adjust_link(struct net_device *ndev) } /** - * arc_emac_get_settings - Get PHY settings. - * @ndev: Pointer to net_device structure. - * @cmd: Pointer to ethtool_cmd structure. - * - * This implements ethtool command for getting PHY settings. If PHY could - * not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to get the PHY settings. - * Issue "ethtool ethX" under linux prompt to execute this function. - */ -static int arc_emac_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct arc_emac_priv *priv = netdev_priv(ndev); - - return phy_ethtool_gset(priv->phy_dev, cmd); -} - -/** - * arc_emac_set_settings - Set PHY settings as passed in the argument. - * @ndev: Pointer to net_device structure. - * @cmd: Pointer to ethtool_cmd structure. - * - * This implements ethtool command for setting various PHY settings. If PHY - * could not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to set the PHY. - * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this - * function. - */ -static int arc_emac_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct arc_emac_priv *priv = netdev_priv(ndev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - return phy_ethtool_sset(priv->phy_dev, cmd); -} - -/** * arc_emac_get_drvinfo - Get EMAC driver information. * @ndev: Pointer to net_device structure. * @info: Pointer to ethtool_drvinfo structure. @@ -137,10 +97,10 @@ static void arc_emac_get_drvinfo(struct net_device *ndev, } static const struct ethtool_ops arc_emac_ethtool_ops = { - .get_settings = arc_emac_get_settings, - .set_settings = arc_emac_set_settings, .get_drvinfo = arc_emac_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; #define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK) @@ -403,7 +363,7 @@ static void arc_emac_poll_controller(struct net_device *dev) static int arc_emac_open(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); - struct phy_device *phy_dev = priv->phy_dev; + struct phy_device *phy_dev = ndev->phydev; int i; phy_dev->autoneg = AUTONEG_ENABLE; @@ -474,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev) /* Enable EMAC */ arc_reg_or(priv, R_CTRL, EN_MASK); - phy_start_aneg(priv->phy_dev); + phy_start_aneg(ndev->phydev); netif_start_queue(ndev); @@ -772,6 +732,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) struct device *dev = ndev->dev.parent; struct resource res_regs; struct device_node *phy_node; + struct phy_device *phydev = NULL; struct arc_emac_priv *priv; const char *mac_addr; unsigned int id, clock_frequency, irq; @@ -788,14 +749,16 @@ int arc_emac_probe(struct net_device *ndev, int interface) err = of_address_to_resource(dev->of_node, 0, &res_regs); if (err) { dev_err(dev, "failed to retrieve registers base from device tree\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_node; } /* Get IRQ from device tree */ irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { dev_err(dev, "failed to retrieve <irq> value from device tree\n"); - return -ENODEV; + err = -ENODEV; + goto out_put_node; } ndev->netdev_ops = &arc_emac_netdev_ops; @@ -808,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface) priv->dev = dev; priv->regs = devm_ioremap_resource(dev, &res_regs); - if (IS_ERR(priv->regs)) - return PTR_ERR(priv->regs); + if (IS_ERR(priv->regs)) { + err = PTR_ERR(priv->regs); + goto out_put_node; + } dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); @@ -817,7 +782,7 @@ int arc_emac_probe(struct net_device *ndev, int interface) err = clk_prepare_enable(priv->clk); if (err) { dev_err(dev, "failed to enable clock\n"); - return err; + goto out_put_node; } clock_frequency = clk_get_rate(priv->clk); @@ -826,7 +791,8 @@ int arc_emac_probe(struct net_device *ndev, int interface) if (of_property_read_u32(dev->of_node, "clock-frequency", &clock_frequency)) { dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n"); - return -EINVAL; + err = -EINVAL; + goto out_put_node; } } @@ -887,16 +853,16 @@ int arc_emac_probe(struct net_device *ndev, int interface) goto out_clken; } - priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, - interface); - if (!priv->phy_dev) { + phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, + interface); + if (!phydev) { dev_err(dev, "of_phy_connect() failed\n"); err = -ENODEV; goto out_mdio; } dev_info(dev, "connected to %s phy with id 0x%x\n", - priv->phy_dev->drv->name, priv->phy_dev->phy_id); + phydev->drv->name, phydev->phy_id); netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT); @@ -906,17 +872,20 @@ int arc_emac_probe(struct net_device *ndev, int interface) goto out_netif_api; } + of_node_put(phy_node); return 0; out_netif_api: netif_napi_del(&priv->napi); - phy_disconnect(priv->phy_dev); - priv->phy_dev = NULL; + phy_disconnect(phydev); out_mdio: arc_mdio_remove(priv); out_clken: if (priv->clk) clk_disable_unprepare(priv->clk); +out_put_node: + of_node_put(phy_node); + return err; } EXPORT_SYMBOL_GPL(arc_emac_probe); @@ -925,8 +894,7 @@ int arc_emac_remove(struct net_device *ndev) { struct arc_emac_priv *priv = netdev_priv(ndev); - phy_disconnect(priv->phy_dev); - priv->phy_dev = NULL; + phy_disconnect(ndev->phydev); arc_mdio_remove(priv); unregister_netdev(ndev); netif_napi_del(&priv->napi); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e708e360a..4eb17daef 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1251,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct alx_priv *alx; struct alx_hw *hw; bool phy_configured; - int bars, err; + int err; err = pci_enable_device_mem(pdev); if (err) @@ -1271,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - bars = pci_select_bars(pdev, IORESOURCE_MEM); - err = pci_request_selected_regions(pdev, bars, alx_drv_name); + err = pci_request_mem_regions(pdev, alx_drv_name); if (err) { dev_err(&pdev->dev, - "pci_request_selected_regions failed(bars:%d)\n", bars); + "pci_request_mem_regions failed\n"); goto out_pci_disable; } @@ -1401,7 +1400,7 @@ out_unmap: out_free_netdev: free_netdev(netdev); out_pci_release: - pci_release_selected_regions(pdev, bars); + pci_release_mem_regions(pdev); out_pci_disable: pci_disable_device(pdev); return err; @@ -1420,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev) unregister_netdev(alx->dev); iounmap(hw->hw_addr); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -1547,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = { .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, + { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500), + .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162), .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG }, { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) }, diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h index 0959e6824..1fc2d8522 100644 --- a/drivers/net/ethernet/atheros/alx/reg.h +++ b/drivers/net/ethernet/atheros/alx/reg.h @@ -38,6 +38,7 @@ #define ALX_DEV_ID_AR8161 0x1091 #define ALX_DEV_ID_E2200 0xe091 #define ALX_DEV_ID_E2400 0xe0a1 +#define ALX_DEV_ID_E2500 0xe0b1 #define ALX_DEV_ID_AR8162 0x1090 #define ALX_DEV_ID_AR8171 0x10A1 #define ALX_DEV_ID_AR8172 0x10A0 diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index 1a3555d03..b047fd607 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -632,7 +632,7 @@ static void nb8800_mac_config(struct net_device *dev) static void nb8800_pause_config(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u32 rxcr; if (priv->pause_aneg) { @@ -665,7 +665,7 @@ static void nb8800_pause_config(struct net_device *dev) static void nb8800_link_reconfigure(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int change = 0; if (phydev->link) { @@ -691,7 +691,7 @@ static void nb8800_link_reconfigure(struct net_device *dev) } if (change) - phy_print_status(priv->phydev); + phy_print_status(phydev); } static void nb8800_update_mac_addr(struct net_device *dev) @@ -936,9 +936,10 @@ static int nb8800_dma_stop(struct net_device *dev) static void nb8800_pause_adv(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; u32 adv = 0; - if (!priv->phydev) + if (!phydev) return; if (priv->pause_rx) @@ -946,13 +947,14 @@ static void nb8800_pause_adv(struct net_device *dev) if (priv->pause_tx) adv ^= ADVERTISED_Asym_Pause; - priv->phydev->supported |= adv; - priv->phydev->advertising |= adv; + phydev->supported |= adv; + phydev->advertising |= adv; } static int nb8800_open(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev; int err; /* clear any pending interrupts */ @@ -970,10 +972,10 @@ static int nb8800_open(struct net_device *dev) nb8800_mac_rx(dev, true); nb8800_mac_tx(dev, true); - priv->phydev = of_phy_connect(dev, priv->phy_node, - nb8800_link_reconfigure, 0, - priv->phy_mode); - if (!priv->phydev) + phydev = of_phy_connect(dev, priv->phy_node, + nb8800_link_reconfigure, 0, + priv->phy_mode); + if (!phydev) goto err_free_irq; nb8800_pause_adv(dev); @@ -983,7 +985,7 @@ static int nb8800_open(struct net_device *dev) netif_start_queue(dev); nb8800_start_rx(dev); - phy_start(priv->phydev); + phy_start(phydev); return 0; @@ -998,8 +1000,9 @@ err_free_dma: static int nb8800_stop(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; - phy_stop(priv->phydev); + phy_stop(phydev); netif_stop_queue(dev); napi_disable(&priv->napi); @@ -1008,8 +1011,7 @@ static int nb8800_stop(struct net_device *dev) nb8800_mac_rx(dev, false); nb8800_mac_tx(dev, false); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_disconnect(phydev); free_irq(dev->irq, dev); @@ -1020,9 +1022,7 @@ static int nb8800_stop(struct net_device *dev) static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct nb8800_priv *priv = netdev_priv(dev); - - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static const struct net_device_ops nb8800_netdev_ops = { @@ -1036,34 +1036,14 @@ static const struct net_device_ops nb8800_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct nb8800_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct nb8800_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_sset(priv->phydev, cmd); -} - static int nb8800_nway_reset(struct net_device *dev) { - struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; - if (!priv->phydev) + if (!phydev) return -ENODEV; - return genphy_restart_aneg(priv->phydev); + return genphy_restart_aneg(phydev); } static void nb8800_get_pauseparam(struct net_device *dev, @@ -1080,6 +1060,7 @@ static int nb8800_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pp) { struct nb8800_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; priv->pause_aneg = pp->autoneg; priv->pause_rx = pp->rx_pause; @@ -1089,8 +1070,8 @@ static int nb8800_set_pauseparam(struct net_device *dev, if (!priv->pause_aneg) nb8800_pause_config(dev); - else if (priv->phydev) - phy_start_aneg(priv->phydev); + else if (phydev) + phy_start_aneg(phydev); return 0; } @@ -1183,8 +1164,6 @@ static void nb8800_get_ethtool_stats(struct net_device *dev, } static const struct ethtool_ops nb8800_ethtool_ops = { - .get_settings = nb8800_get_settings, - .set_settings = nb8800_set_settings, .nway_reset = nb8800_nway_reset, .get_link = ethtool_op_get_link, .get_pauseparam = nb8800_get_pauseparam, @@ -1192,6 +1171,8 @@ static const struct ethtool_ops nb8800_ethtool_ops = { .get_sset_count = nb8800_get_sset_count, .get_strings = nb8800_get_strings, .get_ethtool_stats = nb8800_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int nb8800_hw_init(struct net_device *dev) @@ -1438,7 +1419,7 @@ static int nb8800_probe(struct platform_device *pdev) if (ops && ops->reset) { ret = ops->reset(dev); if (ret) - goto err_free_dev; + goto err_disable_clk; } bus = devm_mdiobus_alloc(&pdev->dev); @@ -1523,6 +1504,7 @@ static int nb8800_probe(struct platform_device *pdev) err_free_dma: nb8800_dma_free(dev); err_free_bus: + of_node_put(priv->phy_node); mdiobus_unregister(bus); err_disable_clk: clk_disable_unprepare(priv->clk); @@ -1538,6 +1520,7 @@ static int nb8800_remove(struct platform_device *pdev) struct nb8800_priv *priv = netdev_priv(ndev); unregister_netdev(ndev); + of_node_put(priv->phy_node); mdiobus_unregister(priv->mii_bus); diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h index e5adbc2aa..6ec4a956e 100644 --- a/drivers/net/ethernet/aurora/nb8800.h +++ b/drivers/net/ethernet/aurora/nb8800.h @@ -284,7 +284,6 @@ struct nb8800_priv { struct mii_bus *mii_bus; struct device_node *phy_node; - struct phy_device *phydev; /* PHY connection type from DT */ int phy_mode; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 18042c246..bd8c80c0b 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -139,31 +139,19 @@ config BNX2X_SRIOV Virtualization support in the 578xx and 57712 products. This allows for virtual function acceleration in virtual environments. -config BNX2X_VXLAN - bool "Virtual eXtensible Local Area Network support" - default n - depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m) - ---help--- - This enables hardward offload support for VXLAN protocol over the - NetXtremeII series adapters. - Say Y here if you want to enable hardware offload support for - Virtual eXtensible Local Area Network (VXLAN) in the driver. - -config BNX2X_GENEVE - bool "Generic Network Virtualization Encapsulation (GENEVE) support" - depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m) - ---help--- - This allows one to create GENEVE virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. GENEVE is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to enable hardware offload support for - Generic Network Virtualization Encapsulation (GENEVE) in the driver. - config BGMAC - tristate "BCMA bus GBit core support" + tristate + help + This enables the integrated ethernet controller support for many + Broadcom (mostly iProc) SoCs. An appropriate bus interface driver + needs to be enabled to select this. + +config BGMAC_BCMA + tristate "Broadcom iProc GBit BCMA support" depends on BCMA && BCMA_HOST_SOC depends on HAS_DMA depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST + select BGMAC select PHYLIB select FIXED_PHY ---help--- @@ -172,6 +160,19 @@ config BGMAC In case of using this driver on BCM4706 it's also requires to enable BCMA_DRIVER_GMAC_CMN to make it work. +config BGMAC_PLATFORM + tristate "Broadcom iProc GBit platform support" + depends on HAS_DMA + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on OF + select BGMAC + select PHYLIB + select FIXED_PHY + default ARCH_BCM_IPROC + ---help--- + Say Y here if you want to use the Broadcom iProc Gigabit Ethernet + controller through the generic platform interface + config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" depends on OF @@ -186,7 +187,6 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI - depends on VXLAN || VXLAN=n select FW_LOADER select LIBCRC32C ---help--- diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 00584d78b..79f2372c6 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -11,5 +11,7 @@ obj-$(CONFIG_BNX2X) += bnx2x/ obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_TIGON3) += tg3.o obj-$(CONFIG_BGMAC) += bgmac.o +obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o +obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 87c6b5bdd..6c8bc5fad 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev) } else { /* run platform code to initialize PHY device */ - if (pd->mii_config && + if (pd && pd->mii_config && pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, bcm_enet_mdio_write_mii)) { dev_err(&pdev->dev, "unable to configure mdio bus\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index bfa26a259..b2d30863c 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -96,28 +96,6 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv, } /* Ethtool operations */ -static int bcm_sysport_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct bcm_sysport_priv *priv = netdev_priv(dev); - - if (!netif_running(dev)) - return -EINVAL; - - return phy_ethtool_sset(priv->phydev, cmd); -} - -static int bcm_sysport_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct bcm_sysport_priv *priv = netdev_priv(dev); - - if (!netif_running(dev)) - return -EINVAL; - - return phy_ethtool_gset(priv->phydev, cmd); -} - static int bcm_sysport_set_rx_csum(struct net_device *dev, netdev_features_t wanted) { @@ -1127,7 +1105,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev) static void bcm_sysport_adj_link(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; unsigned int changed = 0; u32 cmd_bits = 0, reg; @@ -1182,7 +1160,7 @@ static void bcm_sysport_adj_link(struct net_device *dev) umac_writel(priv, reg, UMAC_CMD); } - phy_print_status(priv->phydev); + phy_print_status(phydev); } static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, @@ -1525,7 +1503,7 @@ static void bcm_sysport_netif_start(struct net_device *dev) /* Enable RX interrupt and TX ring full interrupt */ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL); - phy_start(priv->phydev); + phy_start(dev->phydev); /* Enable TX interrupts for the 32 TXQs */ intrl2_1_mask_clear(priv, 0xffffffff); @@ -1546,6 +1524,7 @@ static void rbuf_init(struct bcm_sysport_priv *priv) static int bcm_sysport_open(struct net_device *dev) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct phy_device *phydev; unsigned int i; int ret; @@ -1570,9 +1549,9 @@ static int bcm_sysport_open(struct net_device *dev) /* Read CRC forward */ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); - priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, - 0, priv->phy_interface); - if (!priv->phydev) { + phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, + 0, priv->phy_interface); + if (!phydev) { netdev_err(dev, "could not attach to PHY\n"); return -ENODEV; } @@ -1650,7 +1629,7 @@ out_free_tx_ring: out_free_irq0: free_irq(priv->irq0, dev); out_phy_disconnect: - phy_disconnect(priv->phydev); + phy_disconnect(phydev); return ret; } @@ -1661,7 +1640,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) /* stop all software from updating hardware */ netif_tx_stop_all_queues(dev); napi_disable(&priv->napi); - phy_stop(priv->phydev); + phy_stop(dev->phydev); /* mask all interrupts */ intrl2_0_mask_set(priv, 0xffffffff); @@ -1708,14 +1687,12 @@ static int bcm_sysport_stop(struct net_device *dev) free_irq(priv->irq1, dev); /* Disconnect from PHY */ - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); return 0; } static struct ethtool_ops bcm_sysport_ethtool_ops = { - .get_settings = bcm_sysport_get_settings, - .set_settings = bcm_sysport_set_settings, .get_drvinfo = bcm_sysport_get_drvinfo, .get_msglevel = bcm_sysport_get_msglvl, .set_msglevel = bcm_sysport_set_msglvl, @@ -1727,6 +1704,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = { .set_wol = bcm_sysport_set_wol, .get_coalesce = bcm_sysport_get_coalesce, .set_coalesce = bcm_sysport_set_coalesce, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops bcm_sysport_netdev_ops = { @@ -1929,7 +1908,7 @@ static int bcm_sysport_suspend(struct device *d) bcm_sysport_netif_stop(dev); - phy_suspend(priv->phydev); + phy_suspend(dev->phydev); netif_device_detach(dev); @@ -2055,7 +2034,7 @@ static int bcm_sysport_resume(struct device *d) goto out_free_rx_ring; } - phy_resume(priv->phydev); + phy_resume(dev->phydev); bcm_sysport_netif_start(dev); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f28bf545d..1c82e3da6 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -670,7 +670,6 @@ struct bcm_sysport_priv { /* PHY device */ struct device_node *phy_dn; - struct phy_device *phydev; phy_interface_t phy_interface; int old_pause; int old_link; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c new file mode 100644 index 000000000..7c19c8e2b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -0,0 +1,266 @@ +/* + * Driver for (BCM4706)? GBit MAC core on BCMA bus. + * + * Copyright (C) 2012 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com> + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bcma/bcma.h> +#include <linux/brcmphy.h> +#include "bgmac.h" + +struct bcma_mdio { + struct bcma_device *core; + u8 phyaddr; +}; + +static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask, + u32 value, int timeout) +{ + u32 val; + int i; + + for (i = 0; i < timeout / 10; i++) { + val = bcma_read32(core, reg); + if ((val & mask) == value) + return true; + udelay(10); + } + dev_err(&core->dev, "Timeout waiting for reg 0x%X\n", reg); + return false; +} + +/************************************************** + * PHY ops + **************************************************/ + +static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); + BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); + BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); + BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); + BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); + BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); + BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); + BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); + BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); + + if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bcma_mdio->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bcma_mdio->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + tmp = BGMAC_PA_START; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + bcma_write32(core, phy_access_addr, tmp); + + if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, + 1000)) { + dev_err(&core->dev, "Reading PHY %d register 0x%X failed\n", + phyaddr, reg); + return 0xffff; + } + + return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ +static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg, + u16 value) +{ + struct bcma_device *core; + u16 phy_access_addr; + u16 phy_ctl_addr; + u32 tmp; + + if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) { + core = bcma_mdio->core->bus->drv_gmac_cmn.core; + phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; + phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; + } else { + core = bcma_mdio->core; + phy_access_addr = BGMAC_PHY_ACCESS; + phy_ctl_addr = BGMAC_PHY_CNTL; + } + + tmp = bcma_read32(core, phy_ctl_addr); + tmp &= ~BGMAC_PC_EPA_MASK; + tmp |= phyaddr; + bcma_write32(core, phy_ctl_addr, tmp); + + bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO); + if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) + dev_warn(&core->dev, "Error setting MDIO int\n"); + + tmp = BGMAC_PA_START; + tmp |= BGMAC_PA_WRITE; + tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; + tmp |= reg << BGMAC_PA_REG_SHIFT; + tmp |= value; + bcma_write32(core, phy_access_addr, tmp); + + if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, + 1000)) { + dev_err(&core->dev, "Writing to PHY %d register 0x%X failed\n", + phyaddr, reg); + return -ETIMEDOUT; + } + + return 0; +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ +static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio) +{ + struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo; + u8 i; + + if (ci->id == BCMA_CHIP_ID_BCM5356) { + for (i = 0; i < 5; i++) { + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b); + bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + } + } + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { + struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc; + + bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); + bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); + for (i = 0; i < 5; i++) { + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f); + bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073); + bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6); + bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273); + bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b); + } + } +} + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ +static int bcma_mdio_phy_reset(struct mii_bus *bus) +{ + struct bcma_mdio *bcma_mdio = bus->priv; + u8 phyaddr = bcma_mdio->phyaddr; + + if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS) + return 0; + + bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET); + udelay(100); + if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET) + dev_err(&bcma_mdio->core->dev, "PHY reset failed\n"); + bcma_mdio_phy_init(bcma_mdio); + + return 0; +} + +/************************************************** + * MII + **************************************************/ + +static int bcma_mdio_mii_read(struct mii_bus *bus, int mii_id, int regnum) +{ + return bcma_mdio_phy_read(bus->priv, mii_id, regnum); +} + +static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value); +} + +struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr) +{ + struct bcma_mdio *bcma_mdio; + struct mii_bus *mii_bus; + int err; + + bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL); + if (!bcma_mdio) + return ERR_PTR(-ENOMEM); + + mii_bus = mdiobus_alloc(); + if (!mii_bus) { + err = -ENOMEM; + goto err; + } + + mii_bus->name = "bcma_mdio mii bus"; + sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num, + core->core_unit); + mii_bus->priv = bcma_mdio; + mii_bus->read = bcma_mdio_mii_read; + mii_bus->write = bcma_mdio_mii_write; + mii_bus->reset = bcma_mdio_phy_reset; + mii_bus->parent = &core->dev; + mii_bus->phy_mask = ~(1 << phyaddr); + + bcma_mdio->core = core; + bcma_mdio->phyaddr = phyaddr; + + err = mdiobus_register(mii_bus); + if (err) { + dev_err(&core->dev, "Registration of mii bus failed\n"); + goto err_free_bus; + } + + return mii_bus; + +err_free_bus: + mdiobus_free(mii_bus); +err: + kfree(bcma_mdio); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(bcma_mdio_mii_register); + +void bcma_mdio_mii_unregister(struct mii_bus *mii_bus) +{ + struct bcma_mdio *bcma_mdio; + + if (!mii_bus) + return; + + bcma_mdio = mii_bus->priv; + + mdiobus_unregister(mii_bus); + mdiobus_free(mii_bus); + kfree(bcma_mdio); +} +EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister); + +MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c new file mode 100644 index 000000000..625235db6 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -0,0 +1,315 @@ +/* + * Driver for (BCM4706)? GBit MAC core on BCMA bus. + * + * Copyright (C) 2012 RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com> + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bcma/bcma.h> +#include <linux/brcmphy.h> +#include <linux/etherdevice.h> +#include "bgmac.h" + +static inline bool bgmac_is_bcm4707_family(struct bcma_device *core) +{ + switch (core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM47094: + case BCMA_CHIP_ID_BCM53018: + return true; + default: + return false; + } +} + +/************************************************** + * BCMA bus ops + **************************************************/ + +static u32 bcma_bgmac_read(struct bgmac *bgmac, u16 offset) +{ + return bcma_read32(bgmac->bcma.core, offset); +} + +static void bcma_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + bcma_write32(bgmac->bcma.core, offset, value); +} + +static u32 bcma_bgmac_idm_read(struct bgmac *bgmac, u16 offset) +{ + return bcma_aread32(bgmac->bcma.core, offset); +} + +static void bcma_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + return bcma_awrite32(bgmac->bcma.core, offset, value); +} + +static bool bcma_bgmac_clk_enabled(struct bgmac *bgmac) +{ + return bcma_core_is_enabled(bgmac->bcma.core); +} + +static void bcma_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) +{ + bcma_core_enable(bgmac->bcma.core, flags); +} + +static void bcma_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset, + u32 mask, u32 set) +{ + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; + + bcma_chipco_chipctl_maskset(cc, offset, mask, set); +} + +static u32 bcma_bgmac_get_bus_clock(struct bgmac *bgmac) +{ + struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc; + + return bcma_pmu_get_bus_clock(cc); +} + +static void bcma_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, u32 mask, + u32 set) +{ + bcma_maskset32(bgmac->bcma.cmn, offset, mask, set); +} + +static const struct bcma_device_id bgmac_bcma_tbl[] = { + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, + BCMA_ANY_REV, BCMA_ANY_CLASS), + BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, + BCMA_ANY_CLASS), + {}, +}; +MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); + +/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ +static int bgmac_probe(struct bcma_device *core) +{ + struct ssb_sprom *sprom = &core->bus->sprom; + struct mii_bus *mii_bus; + struct bgmac *bgmac; + u8 *mac; + int err; + + bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL); + if (!bgmac) + return -ENOMEM; + + bgmac->bcma.core = core; + bgmac->dev = &core->dev; + bgmac->dma_dev = core->dma_dev; + bgmac->irq = core->irq; + + bcma_set_drvdata(core, bgmac); + + switch (core->core_unit) { + case 0: + mac = sprom->et0mac; + break; + case 1: + mac = sprom->et1mac; + break; + case 2: + mac = sprom->et2mac; + break; + default: + dev_err(bgmac->dev, "Unsupported core_unit %d\n", + core->core_unit); + err = -ENOTSUPP; + goto err; + } + + ether_addr_copy(bgmac->mac_addr, mac); + + /* On BCM4706 we need common core to access PHY */ + if (core->id.id == BCMA_CORE_4706_MAC_GBIT && + !core->bus->drv_gmac_cmn.core) { + dev_err(bgmac->dev, "GMAC CMN core not found (required for BCM4706)\n"); + err = -ENODEV; + goto err; + } + bgmac->bcma.cmn = core->bus->drv_gmac_cmn.core; + + switch (core->core_unit) { + case 0: + bgmac->phyaddr = sprom->et0phyaddr; + break; + case 1: + bgmac->phyaddr = sprom->et1phyaddr; + break; + case 2: + bgmac->phyaddr = sprom->et2phyaddr; + break; + } + bgmac->phyaddr &= BGMAC_PHY_MASK; + if (bgmac->phyaddr == BGMAC_PHY_MASK) { + dev_err(bgmac->dev, "No PHY found\n"); + err = -ENODEV; + goto err; + } + dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr, + bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + + if (!bgmac_is_bcm4707_family(core)) { + mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); + if (IS_ERR(mii_bus)) { + err = PTR_ERR(mii_bus); + goto err; + } + + bgmac->mii_bus = mii_bus; + } + + if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { + dev_err(bgmac->dev, "PCI setup not implemented\n"); + err = -ENOTSUPP; + goto err1; + } + + bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & + BGMAC_BFL_ENETROBO); + if (bgmac->has_robosw) + dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n"); + + if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) + dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n"); + + /* Feature Flags */ + switch (core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM5357: + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; + if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) { + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + } + if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358) + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; + break; + case BCMA_CHIP_ID_BCM53572: + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; + if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) { + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + } + break; + case BCMA_CHIP_ID_BCM4749: + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; + if (core->bus->chipinfo.pkg == 10) { + bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + } + break; + case BCMA_CHIP_ID_BCM4716: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + /* fallthrough */ + case BCMA_CHIP_ID_BCM47162: + bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + break; + /* bcm4707_family */ + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM47094: + case BCMA_CHIP_ID_BCM53018: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; + bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; + break; + default: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + } + + if (!bgmac_is_bcm4707_family(core) && core->id.rev > 2) + bgmac->feature_flags |= BGMAC_FEAT_MISC_PLL_REQ; + + if (core->id.id == BCMA_CORE_4706_MAC_GBIT) { + bgmac->feature_flags |= BGMAC_FEAT_CMN_PHY_CTL; + bgmac->feature_flags |= BGMAC_FEAT_NO_CLR_MIB; + } + + if (core->id.rev >= 4) { + bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; + bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; + bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; + } + + bgmac->read = bcma_bgmac_read; + bgmac->write = bcma_bgmac_write; + bgmac->idm_read = bcma_bgmac_idm_read; + bgmac->idm_write = bcma_bgmac_idm_write; + bgmac->clk_enabled = bcma_bgmac_clk_enabled; + bgmac->clk_enable = bcma_bgmac_clk_enable; + bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset; + bgmac->get_bus_clock = bcma_bgmac_get_bus_clock; + bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32; + + err = bgmac_enet_probe(bgmac); + if (err) + goto err1; + + return 0; + +err1: + bcma_mdio_mii_unregister(bgmac->mii_bus); +err: + kfree(bgmac); + bcma_set_drvdata(core, NULL); + + return err; +} + +static void bgmac_remove(struct bcma_device *core) +{ + struct bgmac *bgmac = bcma_get_drvdata(core); + + bcma_mdio_mii_unregister(bgmac->mii_bus); + bgmac_enet_remove(bgmac); + bcma_set_drvdata(core, NULL); + kfree(bgmac); +} + +static struct bcma_driver bgmac_bcma_driver = { + .name = KBUILD_MODNAME, + .id_table = bgmac_bcma_tbl, + .probe = bgmac_probe, + .remove = bgmac_remove, +}; + +static int __init bgmac_init(void) +{ + int err; + + err = bcma_driver_register(&bgmac_bcma_driver); + if (err) + return err; + pr_info("Broadcom 47xx GBit MAC driver loaded\n"); + + return 0; +} + +static void __exit bgmac_exit(void) +{ + bcma_driver_unregister(&bgmac_bcma_driver); +} + +module_init(bgmac_init) +module_exit(bgmac_exit) + +MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c new file mode 100644 index 000000000..be52f270c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -0,0 +1,185 @@ +/* + * Copyright (C) 2016 Broadcom + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bcma/bcma.h> +#include <linux/etherdevice.h> +#include <linux/of_address.h> +#include <linux/of_net.h> +#include "bgmac.h" + +static u32 platform_bgmac_read(struct bgmac *bgmac, u16 offset) +{ + return readl(bgmac->plat.base + offset); +} + +static void platform_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + writel(value, bgmac->plat.base + offset); +} + +static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset) +{ + return readl(bgmac->plat.idm_base + offset); +} + +static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + return writel(value, bgmac->plat.idm_base + offset); +} + +static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) +{ + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & + (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) + return false; + if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) + return false; + return true; +} + +static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) +{ + bgmac_idm_write(bgmac, BCMA_IOCTL, + (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); + bgmac_idm_read(bgmac, BCMA_IOCTL); + + bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0); + bgmac_idm_read(bgmac, BCMA_RESET_CTL); + udelay(1); + + bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); + bgmac_idm_read(bgmac, BCMA_IOCTL); + udelay(1); +} + +static void platform_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset, + u32 mask, u32 set) +{ + /* This shouldn't be encountered */ + WARN_ON(1); +} + +static u32 platform_bgmac_get_bus_clock(struct bgmac *bgmac) +{ + /* This shouldn't be encountered */ + WARN_ON(1); + + return 0; +} + +static void platform_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, + u32 mask, u32 set) +{ + /* This shouldn't be encountered */ + WARN_ON(1); +} + +static int bgmac_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct bgmac *bgmac; + struct resource *regs; + const u8 *mac_addr; + + bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL); + if (!bgmac) + return -ENOMEM; + + platform_set_drvdata(pdev, bgmac); + + /* Set the features of the 4707 family */ + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; + bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; + bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; + bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; + bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; + + bgmac->dev = &pdev->dev; + bgmac->dma_dev = &pdev->dev; + + mac_addr = of_get_mac_address(np); + if (mac_addr) + ether_addr_copy(bgmac->mac_addr, mac_addr); + else + dev_warn(&pdev->dev, "MAC address not present in device tree\n"); + + bgmac->irq = platform_get_irq(pdev, 0); + if (bgmac->irq < 0) { + dev_err(&pdev->dev, "Unable to obtain IRQ\n"); + return bgmac->irq; + } + + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base"); + if (!regs) { + dev_err(&pdev->dev, "Unable to obtain base resource\n"); + return -EINVAL; + } + + bgmac->plat.base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.base)) + return PTR_ERR(bgmac->plat.base); + + regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); + if (!regs) { + dev_err(&pdev->dev, "Unable to obtain idm resource\n"); + return -EINVAL; + } + + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); + + bgmac->read = platform_bgmac_read; + bgmac->write = platform_bgmac_write; + bgmac->idm_read = platform_bgmac_idm_read; + bgmac->idm_write = platform_bgmac_idm_write; + bgmac->clk_enabled = platform_bgmac_clk_enabled; + bgmac->clk_enable = platform_bgmac_clk_enable; + bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset; + bgmac->get_bus_clock = platform_bgmac_get_bus_clock; + bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32; + + return bgmac_enet_probe(bgmac); +} + +static int bgmac_remove(struct platform_device *pdev) +{ + struct bgmac *bgmac = platform_get_drvdata(pdev); + + bgmac_enet_remove(bgmac); + + return 0; +} + +static const struct of_device_id bgmac_of_enet_match[] = { + {.compatible = "brcm,amac",}, + {.compatible = "brcm,nsp-amac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, bgmac_of_enet_match); + +static struct platform_driver bgmac_enet_driver = { + .driver = { + .name = "bgmac-enet", + .of_match_table = bgmac_of_enet_match, + }, + .probe = bgmac_probe, + .remove = bgmac_remove, +}; + +module_platform_driver(bgmac_enet_driver); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 25bbae592..c4751ece7 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -6,51 +6,27 @@ * Licensed under the GNU/GPL. See COPYING for details. */ -#include "bgmac.h" -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/delay.h> +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bcma/bcma.h> #include <linux/etherdevice.h> -#include <linux/mii.h> -#include <linux/phy.h> -#include <linux/phy_fixed.h> -#include <linux/interrupt.h> -#include <linux/dma-mapping.h> #include <linux/bcm47xx_nvram.h> +#include "bgmac.h" -static const struct bcma_device_id bgmac_bcma_tbl[] = { - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), - BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), - {}, -}; -MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); - -static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) -{ - switch (bgmac->core->bus->chipinfo.id) { - case BCMA_CHIP_ID_BCM4707: - case BCMA_CHIP_ID_BCM47094: - case BCMA_CHIP_ID_BCM53018: - return true; - default: - return false; - } -} - -static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, +static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, u32 value, int timeout) { u32 val; int i; for (i = 0; i < timeout / 10; i++) { - val = bcma_read32(core, reg); + val = bgmac_read(bgmac, reg); if ((val & mask) == value) return true; udelay(10); } - pr_err("Timeout waiting for reg 0x%X\n", reg); + dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg); return false; } @@ -84,22 +60,22 @@ static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) udelay(10); } if (i) - bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", - ring->mmio_base, val); + dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n", + ring->mmio_base, val); /* Remove SUSPEND bit */ bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0); - if (!bgmac_wait_value(bgmac->core, + if (!bgmac_wait_value(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS, BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED, 10000)) { - bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", - ring->mmio_base); + dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n", + ring->mmio_base); udelay(300); val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED) - bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n", + ring->mmio_base); } } @@ -109,7 +85,7 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL); - if (bgmac->core->id.rev >= 4) { + if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) { ctl &= ~BGMAC_DMA_TX_BL_MASK; ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT; @@ -152,7 +128,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct net_device *net_dev = bgmac->net_dev; int index = ring->end % BGMAC_TX_RING_SLOTS; struct bgmac_slot_info *slot = &ring->slots[index]; @@ -161,7 +137,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, int i; if (skb->len > BGMAC_DESC_CTL1_LEN) { - bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); + netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; } @@ -174,7 +150,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, * even when ring->end overflows */ if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { - bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); + netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n"); netif_stop_queue(net_dev); return NETDEV_TX_BUSY; } @@ -241,18 +217,20 @@ err_dma: } err_dma_head: - bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", - ring->mmio_base); + netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n", + ring->mmio_base); err_drop: dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; return NETDEV_TX_OK; } /* Free transmitted packets */ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; int empty_slot; bool freed = false; unsigned bytes_compl = 0, pkts_compl = 0; @@ -285,6 +263,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) DMA_TO_DEVICE); if (slot->skb) { + bgmac->net_dev->stats.tx_bytes += slot->skb->len; + bgmac->net_dev->stats.tx_packets++; bytes_compl += slot->skb->len; pkts_compl++; @@ -313,12 +293,12 @@ static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) return; bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0); - if (!bgmac_wait_value(bgmac->core, + if (!bgmac_wait_value(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS, BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED, 10000)) - bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n", + ring->mmio_base); } static void bgmac_dma_rx_enable(struct bgmac *bgmac, @@ -327,7 +307,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, u32 ctl; ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); - if (bgmac->core->id.rev >= 4) { + if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { ctl &= ~BGMAC_DMA_RX_BL_MASK; ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; @@ -348,7 +328,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, struct bgmac_slot_info *slot) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; dma_addr_t dma_addr; struct bgmac_rx_header *rx; void *buf; @@ -367,7 +347,7 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dma_addr)) { - bgmac_err(bgmac, "DMA mapping error\n"); + netdev_err(bgmac->net_dev, "DMA mapping error\n"); put_page(virt_to_head_page(buf)); return -ENOMEM; } @@ -437,7 +417,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, end_slot /= sizeof(struct bgmac_dma_desc); while (ring->start != end_slot) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; struct sk_buff *skb; @@ -462,16 +442,19 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, /* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", - ring->start); + netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } if (len > BGMAC_RX_ALLOC_SIZE) { - bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", - ring->start); + netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n", + ring->start); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_length_errors++; + bgmac->net_dev->stats.rx_errors++; break; } @@ -480,8 +463,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); if (unlikely(!skb)) { - bgmac_err(bgmac, "build_skb failed\n"); + netdev_err(bgmac->net_dev, "build_skb failed\n"); put_page(virt_to_head_page(buf)); + bgmac->net_dev->stats.rx_errors++; break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + @@ -491,6 +475,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); + bgmac->net_dev->stats.rx_bytes += len; + bgmac->net_dev->stats.rx_packets++; napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); @@ -534,7 +520,7 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac, static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_dma_desc *dma_desc = ring->cpu_base; struct bgmac_slot_info *slot; int i; @@ -560,7 +546,7 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot; int i; @@ -581,7 +567,7 @@ static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int num_slots) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; int size; if (!ring->cpu_base) @@ -619,7 +605,7 @@ static void bgmac_dma_free(struct bgmac *bgmac) static int bgmac_dma_alloc(struct bgmac *bgmac) { - struct device *dma_dev = bgmac->core->dma_dev; + struct device *dma_dev = bgmac->dma_dev; struct bgmac_dma_ring *ring; static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; @@ -630,8 +616,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); - if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { - bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); + if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { + dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); return -ENOTSUPP; } @@ -645,8 +631,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { - bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n", + ring->mmio_base); goto err_dma_free; } @@ -670,8 +656,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { - bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", - ring->mmio_base); + dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", + ring->mmio_base); err = -ENOMEM; goto err_dma_free; } @@ -746,150 +732,6 @@ error: return err; } -/************************************************** - * PHY ops - **************************************************/ - -static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg) -{ - struct bcma_device *core; - u16 phy_access_addr; - u16 phy_ctl_addr; - u32 tmp; - - BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK); - BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK); - BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT); - BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK); - BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT); - BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE); - BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START); - BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK); - BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK); - BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT); - BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE); - - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bgmac->core->bus->drv_gmac_cmn.core; - phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; - phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; - } else { - core = bgmac->core; - phy_access_addr = BGMAC_PHY_ACCESS; - phy_ctl_addr = BGMAC_PHY_CNTL; - } - - tmp = bcma_read32(core, phy_ctl_addr); - tmp &= ~BGMAC_PC_EPA_MASK; - tmp |= phyaddr; - bcma_write32(core, phy_ctl_addr, tmp); - - tmp = BGMAC_PA_START; - tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; - tmp |= reg << BGMAC_PA_REG_SHIFT; - bcma_write32(core, phy_access_addr, tmp); - - if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { - bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n", - phyaddr, reg); - return 0xffff; - } - - return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK; -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */ -static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value) -{ - struct bcma_device *core; - u16 phy_access_addr; - u16 phy_ctl_addr; - u32 tmp; - - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) { - core = bgmac->core->bus->drv_gmac_cmn.core; - phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS; - phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL; - } else { - core = bgmac->core; - phy_access_addr = BGMAC_PHY_ACCESS; - phy_ctl_addr = BGMAC_PHY_CNTL; - } - - tmp = bcma_read32(core, phy_ctl_addr); - tmp &= ~BGMAC_PC_EPA_MASK; - tmp |= phyaddr; - bcma_write32(core, phy_ctl_addr, tmp); - - bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO); - if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO) - bgmac_warn(bgmac, "Error setting MDIO int\n"); - - tmp = BGMAC_PA_START; - tmp |= BGMAC_PA_WRITE; - tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT; - tmp |= reg << BGMAC_PA_REG_SHIFT; - tmp |= value; - bcma_write32(core, phy_access_addr, tmp); - - if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) { - bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n", - phyaddr, reg); - return -ETIMEDOUT; - } - - return 0; -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */ -static void bgmac_phy_init(struct bgmac *bgmac) -{ - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; - struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; - u8 i; - - if (ci->id == BCMA_CHIP_ID_BCM5356) { - for (i = 0; i < 5; i++) { - bgmac_phy_write(bgmac, i, 0x1f, 0x008b); - bgmac_phy_write(bgmac, i, 0x15, 0x0100); - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x12, 0x2aaa); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - } - } - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) { - bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0); - bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0); - for (i = 0; i < 5; i++) { - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x16, 0x5284); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - bgmac_phy_write(bgmac, i, 0x17, 0x0010); - bgmac_phy_write(bgmac, i, 0x1f, 0x000f); - bgmac_phy_write(bgmac, i, 0x16, 0x5296); - bgmac_phy_write(bgmac, i, 0x17, 0x1073); - bgmac_phy_write(bgmac, i, 0x17, 0x9073); - bgmac_phy_write(bgmac, i, 0x16, 0x52b6); - bgmac_phy_write(bgmac, i, 0x17, 0x9273); - bgmac_phy_write(bgmac, i, 0x1f, 0x000b); - } - } -} - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */ -static void bgmac_phy_reset(struct bgmac *bgmac) -{ - if (bgmac->phyaddr == BGMAC_PHY_NOREGS) - return; - - bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET); - udelay(100); - if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET) - bgmac_err(bgmac, "PHY reset failed\n"); - bgmac_phy_init(bgmac); -} /************************************************** * Chip ops @@ -903,14 +745,20 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set, { u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); u32 new_val = (cmdcfg & mask) | set; + u32 cmdcfg_sr; + + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; - bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr); udelay(2); if (new_val != cmdcfg || force) bgmac_write(bgmac, BGMAC_CMDCFG, new_val); - bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev)); + bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr); udelay(2); } @@ -939,7 +787,7 @@ static void bgmac_chip_stats_update(struct bgmac *bgmac) { int i; - if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) { + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) { for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++) bgmac->mib_tx_regs[i] = bgmac_read(bgmac, @@ -958,7 +806,7 @@ static void bgmac_clear_mib(struct bgmac *bgmac) { int i; - if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) + if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB) return; bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR); @@ -988,7 +836,8 @@ static void bgmac_mac_speed(struct bgmac *bgmac) set |= BGMAC_CMDCFG_ES_2500; break; default: - bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed); + dev_err(bgmac->dev, "Unsupported speed: %d\n", + bgmac->mac_speed); } if (bgmac->mac_duplex == DUPLEX_HALF) @@ -999,17 +848,16 @@ static void bgmac_mac_speed(struct bgmac *bgmac) static void bgmac_miiconfig(struct bgmac *bgmac) { - struct bcma_device *core = bgmac->core; - u8 imode; - - if (bgmac_is_bcm4707_family(bgmac)) { - bcma_awrite32(core, BCMA_IOCTL, - bcma_aread32(core, BCMA_IOCTL) | 0x40 | - BGMAC_BCMA_IOCTL_SW_CLKEN); + if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 | + BGMAC_BCMA_IOCTL_SW_CLKEN); bgmac->mac_speed = SPEED_2500; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } else { + u8 imode; + imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; if (imode == 0 || imode == 1) { @@ -1023,14 +871,11 @@ static void bgmac_miiconfig(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ static void bgmac_chip_reset(struct bgmac *bgmac) { - struct bcma_device *core = bgmac->core; - struct bcma_bus *bus = core->bus; - struct bcma_chipinfo *ci = &bus->chipinfo; - u32 flags; + u32 cmdcfg_sr; u32 iost; int i; - if (bcma_core_is_enabled(core)) { + if (bgmac_clk_enabled(bgmac)) { if (!bgmac->stats_grabbed) { /* bgmac_chip_stats_update(bgmac); */ bgmac->stats_grabbed = true; @@ -1048,38 +893,32 @@ static void bgmac_chip_reset(struct bgmac *bgmac) /* TODO: Clear software multicast filter list */ } - iost = bcma_aread32(core, BCMA_IOST); - if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) + iost = bgmac_idm_read(bgmac, BCMA_IOST); + if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) iost &= ~BGMAC_BCMA_IOST_ATTACHED; /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ - if (ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM47094) { - flags = 0; + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { + u32 flags = 0; if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; if (!bgmac->has_robosw) flags |= BGMAC_BCMA_IOCTL_SW_RESET; } - bcma_core_enable(core, flags); + bgmac_clk_enable(bgmac, flags); } /* Request Misc PLL for corerev > 2 */ - if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { + if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); - bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, + bgmac_wait_value(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 1000); } - if (ci->id == BCMA_CHIP_ID_BCM5357 || - ci->id == BCMA_CHIP_ID_BCM4749 || - ci->id == BCMA_CHIP_ID_BCM53572) { - struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; + if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) { u8 et_swtype = 0; u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | BGMAC_CHIPCTL_1_IF_TYPE_MII; @@ -1087,35 +926,37 @@ static void bgmac_chip_reset(struct bgmac *bgmac) if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { if (kstrtou8(buf, 0, &et_swtype)) - bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", - buf); + dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", + buf); et_swtype &= 0x0f; et_swtype <<= 4; sw_type = et_swtype; - } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { + } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; - } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || - (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || - (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { + } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; } - bcma_chipco_chipctl_maskset(cc, 1, - ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | - BGMAC_CHIPCTL_1_SW_TYPE_MASK), - sw_type); + bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | + BGMAC_CHIPCTL_1_SW_TYPE_MASK), + sw_type); } if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) - bcma_awrite32(core, BCMA_IOCTL, - bcma_aread32(core, BCMA_IOCTL) & - ~BGMAC_BCMA_IOCTL_SW_RESET); + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) & + ~BGMAC_BCMA_IOCTL_SW_RESET); /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to * be keps until taking MAC out of the reset. */ + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; + bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE | @@ -1133,19 +974,20 @@ static void bgmac_chip_reset(struct bgmac *bgmac) BGMAC_CMDCFG_PROM | BGMAC_CMDCFG_NLC | BGMAC_CMDCFG_CFE | - BGMAC_CMDCFG_SR(core->id.rev), + cmdcfg_sr, false); bgmac->mac_speed = SPEED_UNKNOWN; bgmac->mac_duplex = DUPLEX_UNKNOWN; bgmac_clear_mib(bgmac); - if (core->id.id == BCMA_CORE_4706_MAC_GBIT) - bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, - BCMA_GMAC_CMN_PC_MTE); + if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL) + bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0, + BCMA_GMAC_CMN_PC_MTE); else bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); bgmac_miiconfig(bgmac); - bgmac_phy_init(bgmac); + if (bgmac->mii_bus) + bgmac->mii_bus->reset(bgmac->mii_bus); netdev_reset_queue(bgmac->net_dev); } @@ -1164,50 +1006,51 @@ static void bgmac_chip_intrs_off(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */ static void bgmac_enable(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; + u32 cmdcfg_sr; u32 cmdcfg; u32 mode; - u32 rxq_ctl; - u32 fl_ctl; - u16 bp_clk; - u8 mdp; + + if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4) + cmdcfg_sr = BGMAC_CMDCFG_SR_REV4; + else + cmdcfg_sr = BGMAC_CMDCFG_SR_REV0; cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG); bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE), - BGMAC_CMDCFG_SR(bgmac->core->id.rev), true); + cmdcfg_sr, true); udelay(2); cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE; bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg); mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; - if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); - if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2) - bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0, - BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); - - switch (ci->id) { - case BCMA_CHIP_ID_BCM5357: - case BCMA_CHIP_ID_BCM4749: - case BCMA_CHIP_ID_BCM53572: - case BCMA_CHIP_ID_BCM4716: - case BCMA_CHIP_ID_BCM47162: - fl_ctl = 0x03cb04cb; - if (ci->id == BCMA_CHIP_ID_BCM5357 || - ci->id == BCMA_CHIP_ID_BCM4749 || - ci->id == BCMA_CHIP_ID_BCM53572) + if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) + bgmac_cco_ctl_maskset(bgmac, 1, ~0, + BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); + + if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 | + BGMAC_FEAT_FLW_CTRL2)) { + u32 fl_ctl; + + if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1) fl_ctl = 0x2300e1; + else + fl_ctl = 0x03cb04cb; + bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl); bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff); - break; } - if (!bgmac_is_bcm4707_family(bgmac)) { + if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) { + u32 rxq_ctl; + u16 bp_clk; + u8 mdp; + rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; - bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / - 1000000; + bp_clk = bgmac_get_bus_clock(bgmac) / 1000000; mdp = (bp_clk * 128 / 1000) - 3; rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT); bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl); @@ -1251,7 +1094,7 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id) int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX); if (int_status) - bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status); + dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status); /* Disable new interrupts until handling existing ones */ bgmac_chip_intrs_off(bgmac); @@ -1302,16 +1145,16 @@ static int bgmac_open(struct net_device *net_dev) /* Specs say about reclaiming rings here, but we do that in DMA init */ bgmac_chip_init(bgmac); - err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED, + err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED, KBUILD_MODNAME, net_dev); if (err < 0) { - bgmac_err(bgmac, "IRQ request error: %d!\n", err); + dev_err(bgmac->dev, "IRQ request error: %d!\n", err); bgmac_dma_cleanup(bgmac); return err; } napi_enable(&bgmac->napi); - phy_start(bgmac->phy_dev); + phy_start(net_dev->phydev); netif_start_queue(net_dev); @@ -1324,11 +1167,11 @@ static int bgmac_stop(struct net_device *net_dev) netif_carrier_off(net_dev); - phy_stop(bgmac->phy_dev); + phy_stop(net_dev->phydev); napi_disable(&bgmac->napi); bgmac_chip_intrs_off(bgmac); - free_irq(bgmac->core->irq, net_dev); + free_irq(bgmac->irq, net_dev); bgmac_chip_reset(bgmac); bgmac_dma_cleanup(bgmac); @@ -1362,12 +1205,10 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr) static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { - struct bgmac *bgmac = netdev_priv(net_dev); - if (!netif_running(net_dev)) return -EINVAL; - return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd); + return phy_mii_ioctl(net_dev->phydev, ifr, cmd); } static const struct net_device_ops bgmac_netdev_ops = { @@ -1384,54 +1225,151 @@ static const struct net_device_ops bgmac_netdev_ops = { * ethtool_ops **************************************************/ -static int bgmac_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +struct bgmac_stat { + u8 size; + u32 offset; + const char *name; +}; + +static struct bgmac_stat bgmac_get_strings_stats[] = { + { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, + { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, + { 8, BGMAC_TX_OCTETS, "tx_octets" }, + { 4, BGMAC_TX_PKTS, "tx_pkts" }, + { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, + { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, + { 4, BGMAC_TX_LEN_64, "tx_64" }, + { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, + { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, + { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, + { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, + { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, + { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, + { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, + { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, + { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, + { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, + { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, + { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, + { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, + { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, + { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, + { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, + { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, + { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, + { 4, BGMAC_TX_DEFERED, "tx_defered" }, + { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, + { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, + { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, + { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, + { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, + { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, + { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, + { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, + { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, + { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, + { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, + { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, + { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, + { 8, BGMAC_RX_OCTETS, "rx_octets" }, + { 4, BGMAC_RX_PKTS, "rx_pkts" }, + { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, + { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, + { 4, BGMAC_RX_LEN_64, "rx_64" }, + { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, + { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, + { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, + { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, + { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, + { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, + { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, + { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, + { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, + { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, + { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, + { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, + { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, + { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, + { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, + { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, + { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, + { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, + { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, + { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, + { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, + { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, +}; + +#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) + +static int bgmac_get_sset_count(struct net_device *dev, int string_set) { - struct bgmac *bgmac = netdev_priv(net_dev); + switch (string_set) { + case ETH_SS_STATS: + return BGMAC_STATS_LEN; + } - return phy_ethtool_gset(bgmac->phy_dev, cmd); + return -EOPNOTSUPP; } -static int bgmac_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) +static void bgmac_get_strings(struct net_device *dev, u32 stringset, + u8 *data) { - struct bgmac *bgmac = netdev_priv(net_dev); + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < BGMAC_STATS_LEN; i++) + strlcpy(data + i * ETH_GSTRING_LEN, + bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); +} + +static void bgmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *ss, uint64_t *data) +{ + struct bgmac *bgmac = netdev_priv(dev); + const struct bgmac_stat *s; + unsigned int i; + u64 val; + + if (!netif_running(dev)) + return; - return phy_ethtool_sset(bgmac->phy_dev, cmd); + for (i = 0; i < BGMAC_STATS_LEN; i++) { + s = &bgmac_get_strings_stats[i]; + val = 0; + if (s->size == 8) + val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; + val |= bgmac_read(bgmac, s->offset); + data[i] = val; + } } static void bgmac_get_drvinfo(struct net_device *net_dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); - strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info)); + strlcpy(info->bus_info, "AXI", sizeof(info->bus_info)); } static const struct ethtool_ops bgmac_ethtool_ops = { - .get_settings = bgmac_get_settings, - .set_settings = bgmac_set_settings, + .get_strings = bgmac_get_strings, + .get_sset_count = bgmac_get_sset_count, + .get_ethtool_stats = bgmac_get_ethtool_stats, .get_drvinfo = bgmac_get_drvinfo, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /************************************************** * MII **************************************************/ -static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum) -{ - return bgmac_phy_read(bus->priv, mii_id, regnum); -} - -static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum, - u16 value) -{ - return bgmac_phy_write(bus->priv, mii_id, regnum, value); -} - static void bgmac_adjust_link(struct net_device *net_dev) { struct bgmac *bgmac = netdev_priv(net_dev); - struct phy_device *phy_dev = bgmac->phy_dev; + struct phy_device *phy_dev = net_dev->phydev; bool update = false; if (phy_dev->link) { @@ -1452,7 +1390,7 @@ static void bgmac_adjust_link(struct net_device *net_dev) } } -static int bgmac_fixed_phy_register(struct bgmac *bgmac) +static int bgmac_phy_connect_direct(struct bgmac *bgmac) { struct fixed_phy_status fphy_status = { .link = 1, @@ -1464,196 +1402,76 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac) phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); if (!phy_dev || IS_ERR(phy_dev)) { - bgmac_err(bgmac, "Failed to register fixed PHY device\n"); + dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (err) { - bgmac_err(bgmac, "Connecting PHY failed\n"); + dev_err(bgmac->dev, "Connecting PHY failed\n"); return err; } - bgmac->phy_dev = phy_dev; - return err; } -static int bgmac_mii_register(struct bgmac *bgmac) +static int bgmac_phy_connect(struct bgmac *bgmac) { - struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; - int err = 0; - - if (bgmac_is_bcm4707_family(bgmac)) - return bgmac_fixed_phy_register(bgmac); - - mii_bus = mdiobus_alloc(); - if (!mii_bus) - return -ENOMEM; - - mii_bus->name = "bgmac mii bus"; - sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, - bgmac->core->core_unit); - mii_bus->priv = bgmac; - mii_bus->read = bgmac_mii_read; - mii_bus->write = bgmac_mii_write; - mii_bus->parent = &bgmac->core->dev; - mii_bus->phy_mask = ~(1 << bgmac->phyaddr); - - err = mdiobus_register(mii_bus); - if (err) { - bgmac_err(bgmac, "Registration of mii bus failed\n"); - goto err_free_bus; - } - - bgmac->mii_bus = mii_bus; /* Connect to the PHY */ - snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id, + snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id, bgmac->phyaddr); phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phy_dev)) { - bgmac_err(bgmac, "PHY connection failed\n"); - err = PTR_ERR(phy_dev); - goto err_unregister_bus; + dev_err(bgmac->dev, "PHY connecton failed\n"); + return PTR_ERR(phy_dev); } - bgmac->phy_dev = phy_dev; - - return err; -err_unregister_bus: - mdiobus_unregister(mii_bus); -err_free_bus: - mdiobus_free(mii_bus); - return err; -} - -static void bgmac_mii_unregister(struct bgmac *bgmac) -{ - struct mii_bus *mii_bus = bgmac->mii_bus; - - mdiobus_unregister(mii_bus); - mdiobus_free(mii_bus); + return 0; } -/************************************************** - * BCMA bus ops - **************************************************/ - -/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ -static int bgmac_probe(struct bcma_device *core) +int bgmac_enet_probe(struct bgmac *info) { struct net_device *net_dev; struct bgmac *bgmac; - struct ssb_sprom *sprom = &core->bus->sprom; - u8 *mac; int err; - switch (core->core_unit) { - case 0: - mac = sprom->et0mac; - break; - case 1: - mac = sprom->et1mac; - break; - case 2: - mac = sprom->et2mac; - break; - default: - pr_err("Unsupported core_unit %d\n", core->core_unit); - return -ENOTSUPP; - } - - if (!is_valid_ether_addr(mac)) { - dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac); - eth_random_addr(mac); - dev_warn(&core->dev, "Using random MAC: %pM\n", mac); - } - - /* This (reset &) enable is not preset in specs or reference driver but - * Broadcom does it in arch PCI code when enabling fake PCI device. - */ - bcma_core_enable(core, 0); - /* Allocation and references */ net_dev = alloc_etherdev(sizeof(*bgmac)); if (!net_dev) return -ENOMEM; + net_dev->netdev_ops = &bgmac_netdev_ops; - net_dev->irq = core->irq; net_dev->ethtool_ops = &bgmac_ethtool_ops; bgmac = netdev_priv(net_dev); + memcpy(bgmac, info, sizeof(*bgmac)); bgmac->net_dev = net_dev; - bgmac->core = core; - bcma_set_drvdata(core, bgmac); - - /* Defaults */ - memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); - - /* On BCM4706 we need common core to access PHY */ - if (core->id.id == BCMA_CORE_4706_MAC_GBIT && - !core->bus->drv_gmac_cmn.core) { - bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n"); - err = -ENODEV; - goto err_netdev_free; - } - bgmac->cmn = core->bus->drv_gmac_cmn.core; - - switch (core->core_unit) { - case 0: - bgmac->phyaddr = sprom->et0phyaddr; - break; - case 1: - bgmac->phyaddr = sprom->et1phyaddr; - break; - case 2: - bgmac->phyaddr = sprom->et2phyaddr; - break; - } - bgmac->phyaddr &= BGMAC_PHY_MASK; - if (bgmac->phyaddr == BGMAC_PHY_MASK) { - bgmac_err(bgmac, "No PHY found\n"); - err = -ENODEV; - goto err_netdev_free; + net_dev->irq = bgmac->irq; + SET_NETDEV_DEV(net_dev, bgmac->dev); + + if (!is_valid_ether_addr(bgmac->mac_addr)) { + dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", + bgmac->mac_addr); + eth_random_addr(bgmac->mac_addr); + dev_warn(bgmac->dev, "Using random MAC: %pM\n", + bgmac->mac_addr); } - bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr, - bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); + ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr); - if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { - bgmac_err(bgmac, "PCI setup not implemented\n"); - err = -ENOTSUPP; - goto err_netdev_free; - } + /* This (reset &) enable is not preset in specs or reference driver but + * Broadcom does it in arch PCI code when enabling fake PCI device. + */ + bgmac_clk_enable(bgmac, 0); bgmac_chip_reset(bgmac); - /* For Northstar, we have to take all GMAC core out of reset */ - if (bgmac_is_bcm4707_family(bgmac)) { - struct bcma_device *ns_core; - int ns_gmac; - - /* Northstar has 4 GMAC cores */ - for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) { - /* As Northstar requirement, we have to reset all GMACs - * before accessing one. bgmac_chip_reset() call - * bcma_core_enable() for this core. Then the other - * three GMACs didn't reset. We do it here. - */ - ns_core = bcma_find_core_unit(core->bus, - BCMA_CORE_MAC_GBIT, - ns_gmac); - if (ns_core && !bcma_core_is_enabled(ns_core)) - bcma_core_enable(ns_core, 0); - } - } - err = bgmac_dma_alloc(bgmac); if (err) { - bgmac_err(bgmac, "Unable to alloc memory for DMA\n"); + dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); goto err_netdev_free; } @@ -1661,22 +1479,14 @@ static int bgmac_probe(struct bcma_device *core) if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; - /* TODO: reset the external phy. Specs are needed */ - bgmac_phy_reset(bgmac); - - bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo & - BGMAC_BFL_ENETROBO); - if (bgmac->has_robosw) - bgmac_warn(bgmac, "Support for Roboswitch not implemented\n"); - - if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) - bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); - err = bgmac_mii_register(bgmac); + if (!bgmac->mii_bus) + err = bgmac_phy_connect_direct(bgmac); + else + err = bgmac_phy_connect(bgmac); if (err) { - bgmac_err(bgmac, "Cannot register MDIO\n"); + dev_err(bgmac->dev, "Cannot connect to phy\n"); goto err_dma_free; } @@ -1686,64 +1496,34 @@ static int bgmac_probe(struct bcma_device *core) err = register_netdev(bgmac->net_dev); if (err) { - bgmac_err(bgmac, "Cannot register net device\n"); - goto err_mii_unregister; + dev_err(bgmac->dev, "Cannot register net device\n"); + goto err_phy_disconnect; } netif_carrier_off(net_dev); return 0; -err_mii_unregister: - bgmac_mii_unregister(bgmac); +err_phy_disconnect: + phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); - err_netdev_free: - bcma_set_drvdata(core, NULL); free_netdev(net_dev); return err; } +EXPORT_SYMBOL_GPL(bgmac_enet_probe); -static void bgmac_remove(struct bcma_device *core) +void bgmac_enet_remove(struct bgmac *bgmac) { - struct bgmac *bgmac = bcma_get_drvdata(core); - unregister_netdev(bgmac->net_dev); - bgmac_mii_unregister(bgmac); + phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); - bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); } - -static struct bcma_driver bgmac_bcma_driver = { - .name = KBUILD_MODNAME, - .id_table = bgmac_bcma_tbl, - .probe = bgmac_probe, - .remove = bgmac_remove, -}; - -static int __init bgmac_init(void) -{ - int err; - - err = bcma_driver_register(&bgmac_bcma_driver); - if (err) - return err; - pr_info("Broadcom 47xx GBit MAC driver loaded\n"); - - return 0; -} - -static void __exit bgmac_exit(void) -{ - bcma_driver_unregister(&bgmac_bcma_driver); -} - -module_init(bgmac_init) -module_exit(bgmac_exit) +EXPORT_SYMBOL_GPL(bgmac_enet_remove); MODULE_AUTHOR("RafaÅ‚ MiÅ‚ecki"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 9a03c142b..24a250267 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -1,19 +1,6 @@ #ifndef _BGMAC_H #define _BGMAC_H -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#define bgmac_err(bgmac, fmt, ...) \ - dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) -#define bgmac_warn(bgmac, fmt, ...) \ - dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) -#define bgmac_info(bgmac, fmt, ...) \ - dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) -#define bgmac_dbg(bgmac, fmt, ...) \ - dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__) - -#include <linux/bcma/bcma.h> -#include <linux/brcmphy.h> #include <linux/netdevice.h> #define BGMAC_DEV_CTL 0x000 @@ -123,7 +110,7 @@ #define BGMAC_TX_LEN_1024_TO_1522 0x334 #define BGMAC_TX_LEN_1523_TO_2047 0x338 #define BGMAC_TX_LEN_2048_TO_4095 0x33c -#define BGMAC_TX_LEN_4095_TO_8191 0x340 +#define BGMAC_TX_LEN_4096_TO_8191 0x340 #define BGMAC_TX_LEN_8192_TO_MAX 0x344 #define BGMAC_TX_JABBER_PKTS 0x348 /* Error */ #define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */ @@ -166,7 +153,7 @@ #define BGMAC_RX_LEN_1024_TO_1522 0x3e4 #define BGMAC_RX_LEN_1523_TO_2047 0x3e8 #define BGMAC_RX_LEN_2048_TO_4095 0x3ec -#define BGMAC_RX_LEN_4095_TO_8191 0x3f0 +#define BGMAC_RX_LEN_4096_TO_8191 0x3f0 #define BGMAC_RX_LEN_8192_TO_MAX 0x3f4 #define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */ #define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */ @@ -201,7 +188,6 @@ #define BGMAC_CMDCFG_HD_SHIFT 10 #define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */ #define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */ -#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ #define BGMAC_CMDCFG_AE 0x00400000 #define BGMAC_CMDCFG_CFE 0x00800000 @@ -387,6 +373,24 @@ #define ETHER_MAX_LEN 1518 +/* Feature Flags */ +#define BGMAC_FEAT_TX_MASK_SETUP BIT(0) +#define BGMAC_FEAT_RX_MASK_SETUP BIT(1) +#define BGMAC_FEAT_IOST_ATTACHED BIT(2) +#define BGMAC_FEAT_NO_RESET BIT(3) +#define BGMAC_FEAT_MISC_PLL_REQ BIT(4) +#define BGMAC_FEAT_SW_TYPE_PHY BIT(5) +#define BGMAC_FEAT_SW_TYPE_EPHYRMII BIT(6) +#define BGMAC_FEAT_SW_TYPE_RGMII BIT(7) +#define BGMAC_FEAT_CMN_PHY_CTL BIT(8) +#define BGMAC_FEAT_FLW_CTRL1 BIT(9) +#define BGMAC_FEAT_FLW_CTRL2 BIT(10) +#define BGMAC_FEAT_SET_RXQ_CLK BIT(11) +#define BGMAC_FEAT_CLKCTLST BIT(12) +#define BGMAC_FEAT_NO_CLR_MIB BIT(13) +#define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14) +#define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15) + struct bgmac_slot_info { union { struct sk_buff *skb; @@ -436,12 +440,26 @@ struct bgmac_rx_header { }; struct bgmac { - struct bcma_device *core; - struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ + union { + struct { + void *base; + void *idm_base; + } plat; + struct { + struct bcma_device *core; + /* Reference to CMN core for BCM4706 */ + struct bcma_device *cmn; + } bcma; + }; + + struct device *dev; + struct device *dma_dev; + unsigned char mac_addr[ETH_ALEN]; + u32 feature_flags; + struct net_device *net_dev; struct napi_struct napi; struct mii_bus *mii_bus; - struct phy_device *phy_dev; /* DMA */ struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; @@ -453,6 +471,7 @@ struct bgmac { u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS]; /* Int */ + int irq; u32 int_mask; /* Current MAC state */ @@ -463,16 +482,71 @@ struct bgmac { bool has_robosw; bool loopback; + + u32 (*read)(struct bgmac *bgmac, u16 offset); + void (*write)(struct bgmac *bgmac, u16 offset, u32 value); + u32 (*idm_read)(struct bgmac *bgmac, u16 offset); + void (*idm_write)(struct bgmac *bgmac, u16 offset, u32 value); + bool (*clk_enabled)(struct bgmac *bgmac); + void (*clk_enable)(struct bgmac *bgmac, u32 flags); + void (*cco_ctl_maskset)(struct bgmac *bgmac, u32 offset, u32 mask, + u32 set); + u32 (*get_bus_clock)(struct bgmac *bgmac); + void (*cmn_maskset32)(struct bgmac *bgmac, u16 offset, u32 mask, + u32 set); }; +int bgmac_enet_probe(struct bgmac *info); +void bgmac_enet_remove(struct bgmac *bgmac); + +struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr); +void bcma_mdio_mii_unregister(struct mii_bus *mii_bus); + static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset) { - return bcma_read32(bgmac->core, offset); + return bgmac->read(bgmac, offset); } static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value) { - bcma_write32(bgmac->core, offset, value); + bgmac->write(bgmac, offset, value); +} + +static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset) +{ + return bgmac->idm_read(bgmac, offset); +} + +static inline void bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) +{ + bgmac->idm_write(bgmac, offset, value); +} + +static inline bool bgmac_clk_enabled(struct bgmac *bgmac) +{ + return bgmac->clk_enabled(bgmac); +} + +static inline void bgmac_clk_enable(struct bgmac *bgmac, u32 flags) +{ + bgmac->clk_enable(bgmac, flags); +} + +static inline void bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset, + u32 mask, u32 set) +{ + bgmac->cco_ctl_maskset(bgmac, offset, mask, set); +} + +static inline u32 bgmac_get_bus_clock(struct bgmac *bgmac) +{ + return bgmac->get_bus_clock(bgmac); +} + +static inline void bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, + u32 mask, u32 set) +{ + bgmac->cmn_maskset32(bgmac, offset, mask, set); } static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, @@ -490,5 +564,4 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set) { bgmac_maskset(bgmac, offset, ~0, set); } - #endif /* _BGMAC_H */ diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 8e1231521..8c2220b59 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6352,10 +6352,6 @@ bnx2_open(struct net_device *dev) struct bnx2 *bp = netdev_priv(dev); int rc; - rc = bnx2_request_firmware(bp); - if (rc < 0) - goto out; - netif_carrier_off(dev); bnx2_disable_int(bp); @@ -6424,7 +6420,6 @@ open_err: bnx2_free_irq(bp); bnx2_free_mem(bp); bnx2_del_napi(bp); - bnx2_release_firmware(bp); goto out; } @@ -8571,6 +8566,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); + rc = bnx2_request_firmware(bp); + if (rc < 0) + goto error; + + + bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | @@ -8603,6 +8604,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; error: + bnx2_release_firmware(bp); pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 9089404bb..e447c8ef4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -59,9 +59,6 @@ #include <linux/semaphore.h> #include <linux/stringify.h> #include <linux/vmalloc.h> -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) -#include <net/geneve.h> -#endif #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" @@ -769,6 +766,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl) (bp->common.bc_ver & 0xff00) >> 8, (bp->common.bc_ver & 0xff)); + if (pci_channel_offline(bp->pdev)) { + BNX2X_ERR("Cannot dump MCP info while in PCI error\n"); + return; + } + val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER); if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER)) BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val); @@ -9412,10 +9414,16 @@ unload_error: /* Release IRQs */ bnx2x_free_irq(bp); - /* Reset the chip */ - rc = bnx2x_reset_hw(bp, reset_code); - if (rc) - BNX2X_ERR("HW_RESET failed\n"); + /* Reset the chip, unless PCI function is offline. If we reach this + * point following a PCI error handling, it means device is really + * in a bad state and we're about to remove it, so reset the chip + * is not a good idea. + */ + if (!pci_channel_offline(bp->pdev)) { + rc = bnx2x_reset_hw(bp, reset_code); + if (rc) + BNX2X_ERR("HW_RESET failed\n"); + } /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, keep_link); @@ -10070,7 +10078,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } -#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; @@ -10171,47 +10178,42 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port, DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", type, port); } -#endif - -#ifdef CONFIG_BNX2X_VXLAN -static void bnx2x_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); - - __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); -} - -static void bnx2x_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); - - __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); -} -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) -static void bnx2x_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void bnx2x_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + break; + default: + break; + } } -static void bnx2x_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void bnx2x_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct bnx2x *bp = netdev_priv(netdev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); + break; + default: + break; + } } -#endif static int bnx2x_close(struct net_device *dev); @@ -10319,7 +10321,6 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); -#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { if (bnx2x_udp_port_update(bp)) { @@ -10329,20 +10330,14 @@ sp_rtnl_not_reset: BNX2X_UDP_PORT_MAX); } else { /* Since we don't store additional port information, - * if no port is configured for any feature ask for + * if no ports are configured for any feature ask for * information about currently configured ports. */ -#ifdef CONFIG_BNX2X_VXLAN - if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) - vxlan_get_rx_port(bp->dev); -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) - if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) - geneve_get_rx_port(bp->dev); -#endif + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count && + !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) + udp_tunnel_get_rx_info(bp->dev); } } -#endif /* work which needs rtnl lock not-taken (as it takes the lock itself and * can be called from other contexts as well) @@ -12545,14 +12540,8 @@ static int bnx2x_open(struct net_device *dev) if (rc) return rc; -#ifdef CONFIG_BNX2X_VXLAN if (IS_PF(bp)) - vxlan_get_rx_port(dev); -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) - if (IS_PF(bp)) - geneve_get_rx_port(dev); -#endif + udp_tunnel_get_rx_info(dev); return 0; } @@ -13039,14 +13028,8 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_features_check = bnx2x_features_check, -#ifdef CONFIG_BNX2X_VXLAN - .ndo_add_vxlan_port = bnx2x_add_vxlan_port, - .ndo_del_vxlan_port = bnx2x_del_vxlan_port, -#endif -#if IS_ENABLED(CONFIG_BNX2X_GENEVE) - .ndo_add_geneve_port = bnx2x_add_geneve_port, - .ndo_del_geneve_port = bnx2x_del_geneve_port, -#endif + .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add, + .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del, }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e655b76e8..228c964e7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -37,9 +37,7 @@ #include <net/udp.h> #include <net/checksum.h> #include <net/ip6_checksum.h> -#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) -#include <net/vxlan.h> -#endif +#include <net/udp_tunnel.h> #ifdef CONFIG_NET_RX_BUSY_POLL #include <net/busy_poll.h> #endif @@ -75,12 +73,32 @@ enum board_idx { BCM57301, BCM57302, BCM57304, + BCM57417_NPAR, + BCM58700, + BCM57311, + BCM57312, BCM57402, BCM57404, BCM57406, + BCM57402_NPAR, + BCM57407, + BCM57412, + BCM57414, + BCM57416, + BCM57417, + BCM57412_NPAR, BCM57314, + BCM57417_SFP, + BCM57416_SFP, + BCM57404_NPAR, + BCM57406_NPAR, + BCM57407_SFP, + BCM57414_NPAR, + BCM57416_NPAR, BCM57304_VF, BCM57404_VF, + BCM57414_VF, + BCM57314_VF, }; /* indexed by enum above */ @@ -90,25 +108,65 @@ static const struct { { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" }, { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" }, + { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" }, + { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, + { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, + { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, + { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" }, + { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" }, + { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" }, + { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" }, { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, + { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" }, + { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 }, { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 }, { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 }, + { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 }, + { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 }, + { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 }, { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, + { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 }, + { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 }, + { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 }, + { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 }, + { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 }, + { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR }, { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, + { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP }, + { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR }, + { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, + { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF }, + { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF }, #endif { 0 } }; @@ -125,12 +183,14 @@ static const u16 bnxt_async_events_arr[] = { HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, }; static bool bnxt_vf_pciid(enum board_idx idx) { - return (idx == BCM57304_VF || idx == BCM57404_VF); + return (idx == BCM57304_VF || idx == BCM57404_VF || + idx == BCM57314_VF || idx == BCM57414_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -920,6 +980,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, } tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2); tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata); + tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info); rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); @@ -938,32 +999,102 @@ static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi, bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs); } +static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + struct tcphdr *th; + int len, nw_off; + u16 outer_ip_off, inner_ip_off, inner_mac_off; + u32 hdr_info = tpa_info->hdr_info; + bool loopback = false; + + inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info); + inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info); + outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info); + + /* If the packet is an internal loopback packet, the offsets will + * have an extra 4 bytes. + */ + if (inner_mac_off == 4) { + loopback = true; + } else if (inner_mac_off > 4) { + __be16 proto = *((__be16 *)(skb->data + inner_ip_off - + ETH_HLEN - 2)); + + /* We only support inner iPv4/ipv6. If we don't see the + * correct protocol ID, it must be a loopback packet where + * the offsets are off by 4. + */ + if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6)) + loopback = true; + } + if (loopback) { + /* internal loopback packet, subtract all offsets by 4 */ + inner_ip_off -= 4; + inner_mac_off -= 4; + outer_ip_off -= 4; + } + + nw_off = inner_ip_off - ETH_HLEN; + skb_set_network_header(skb, nw_off); + if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) { + struct ipv6hdr *iph = ipv6_hdr(skb); + + skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0); + } else { + struct iphdr *iph = ip_hdr(skb); + + skb_set_transport_header(skb, nw_off + sizeof(struct iphdr)); + len = skb->len - skb_transport_offset(skb); + th = tcp_hdr(skb); + th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0); + } + + if (inner_mac_off) { /* tunnel */ + struct udphdr *uh = NULL; + __be16 proto = *((__be16 *)(skb->data + outer_ip_off - + ETH_HLEN - 2)); + + if (proto == htons(ETH_P_IP)) { + struct iphdr *iph = (struct iphdr *)skb->data; + + if (iph->protocol == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; + + if (iph->nexthdr == IPPROTO_UDP) + uh = (struct udphdr *)(iph + 1); + } + if (uh) { + if (uh->check) + skb_shinfo(skb)->gso_type |= + SKB_GSO_UDP_TUNNEL_CSUM; + else + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; + } + } +#endif + return skb; +} + #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr)) #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr)) -static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, - struct rx_tpa_end_cmp *tpa_end, - struct rx_tpa_end_cmp_ext *tpa_end1, +static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, + int payload_off, int tcp_ts, struct sk_buff *skb) { #ifdef CONFIG_INET struct tcphdr *th; - int payload_off, tcp_opt_len = 0; - int len, nw_off; - u16 segs; - - segs = TPA_END_TPA_SEGS(tpa_end); - if (segs == 1) - return skb; + int len, nw_off, tcp_opt_len; - NAPI_GRO_CB(skb)->count = segs; - skb_shinfo(skb)->gso_size = - le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); - skb_shinfo(skb)->gso_type = tpa_info->gso_type; - payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & - RX_TPA_END_CMP_PAYLOAD_OFFSET) >> - RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; - if (TPA_END_GRO_TS(tpa_end)) + if (tcp_ts) tcp_opt_len = 12; if (tpa_info->gso_type == SKB_GSO_TCPV4) { @@ -1020,6 +1151,32 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info, return skb; } +static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, + struct bnxt_tpa_info *tpa_info, + struct rx_tpa_end_cmp *tpa_end, + struct rx_tpa_end_cmp_ext *tpa_end1, + struct sk_buff *skb) +{ +#ifdef CONFIG_INET + int payload_off; + u16 segs; + + segs = TPA_END_TPA_SEGS(tpa_end); + if (segs == 1) + return skb; + + NAPI_GRO_CB(skb)->count = segs; + skb_shinfo(skb)->gso_size = + le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len); + skb_shinfo(skb)->gso_type = tpa_info->gso_type; + payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_PAYLOAD_OFFSET) >> + RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; + skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); +#endif + return skb; +} + static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, @@ -1130,7 +1287,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, } if (TPA_END_GRO(tpa_end)) - skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb); + skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb); return skb; } @@ -1358,6 +1515,11 @@ static int bnxt_async_event_process(struct bnxt *bp, set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); break; } + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: + if (BNXT_PF(bp)) + goto async_event_process_exit; + set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); + break; default: netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", event_id); @@ -1536,6 +1698,76 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) return rx_pkts; } +static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget) +{ + struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); + struct bnxt *bp = bnapi->bp; + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; + struct tx_cmp *txcmp; + struct rx_cmp_ext *rxcmp1; + u32 cp_cons, tmp_raw_cons; + u32 raw_cons = cpr->cp_raw_cons; + u32 rx_pkts = 0; + bool agg_event = false; + + while (1) { + int rc; + + cp_cons = RING_CMP(raw_cons); + txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!TX_CMP_VALID(txcmp, raw_cons)) + break; + + if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) { + tmp_raw_cons = NEXT_RAW_CMP(raw_cons); + cp_cons = RING_CMP(tmp_raw_cons); + rxcmp1 = (struct rx_cmp_ext *) + &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)]; + + if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons)) + break; + + /* force an error to recycle the buffer */ + rxcmp1->rx_cmp_cfa_code_errors_v2 |= + cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR); + + rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event); + if (likely(rc == -EIO)) + rx_pkts++; + else if (rc == -EBUSY) /* partial completion */ + break; + } else if (unlikely(TX_CMP_TYPE(txcmp) == + CMPL_BASE_TYPE_HWRM_DONE)) { + bnxt_hwrm_handler(bp, txcmp); + } else { + netdev_err(bp->dev, + "Invalid completion received on special ring\n"); + } + raw_cons = NEXT_RAW_CMP(raw_cons); + + if (rx_pkts == budget) + break; + } + + cpr->cp_raw_cons = raw_cons; + BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell); + + if (agg_event) { + writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); + writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell); + } + + if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) { + napi_complete(napi); + BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); + } + return rx_pkts; +} + static int bnxt_poll(struct napi_struct *napi, int budget) { struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi); @@ -2208,6 +2440,9 @@ static int bnxt_alloc_vnics(struct bnxt *bp) num_vnics += bp->rx_nr_rings; #endif + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + num_vnics++; + bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info), GFP_KERNEL); if (!bp->vnic_info) @@ -2225,7 +2460,8 @@ static void bnxt_init_vnics(struct bnxt *bp) struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; vnic->fw_vnic_id = INVALID_HW_RING_ID; - vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; + vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; vnic->fw_l2_ctx_id = INVALID_HW_RING_ID; if (bp->vnic_info[i].rss_hash_key) { @@ -2262,7 +2498,7 @@ static void bnxt_set_tpa_flags(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_TPA; if (bp->dev->features & NETIF_F_LRO) bp->flags |= BNXT_FLAG_LRO; - if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + if (bp->dev->features & NETIF_F_GRO) bp->flags |= BNXT_FLAG_GRO; } @@ -2529,7 +2765,7 @@ static int bnxt_alloc_stats(struct bnxt *bp) cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } - if (BNXT_PF(bp)) { + if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { bp->hw_port_stats_size = sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024; @@ -3031,7 +3267,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); - req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0]; + req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); @@ -3068,8 +3304,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); - req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX | - CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); + req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + req.flags |= + cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); req.enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | @@ -3176,7 +3414,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_rss_cfg_input req = {0}; - if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID) + if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); @@ -3188,10 +3426,14 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) req.hash_type = cpu_to_le32(vnic->hash_type); - if (vnic->flags & BNXT_VNIC_RSS_FLAG) - max_rings = bp->rx_nr_rings; - else + if (vnic->flags & BNXT_VNIC_RSS_FLAG) { + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + max_rings = bp->rx_nr_rings - 1; + else + max_rings = bp->rx_nr_rings; + } else { max_rings = 1; + } /* Fill the RSS indirection table with ring group ids */ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) { @@ -3204,7 +3446,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); + req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -3227,32 +3469,35 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } -static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id) +static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, + u16 ctx_idx) { struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); req.rss_cos_lb_ctx_id = - cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx); + cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) { - int i; + int i, j; for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, i); + for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) { + if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, i, j); + } } bp->rsscos_nr_ctxs = 0; } -static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) +static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { int rc; struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; @@ -3265,7 +3510,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); mutex_unlock(&bp->hwrm_cmd_lock); @@ -3277,17 +3522,34 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_cfg_input req = {0}; + u16 def_vlan = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + + req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ - req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP | - VNIC_CFG_REQ_ENABLES_RSS_RULE); - req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx); - req.cos_rule = cpu_to_le16(0xffff); + if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { + req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + VNIC_CFG_REQ_ENABLES_MRU); + } else { + req.rss_rule = cpu_to_le16(0xffff); + } + + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && + (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { + req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + } else { + req.cos_rule = cpu_to_le16(0xffff); + } + if (vnic->flags & BNXT_VNIC_RSS_FLAG) ring = 0; else if (vnic->flags & BNXT_VNIC_RFS_FLAG) ring = vnic_id - 1; + else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp)) + ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); @@ -3297,7 +3559,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); - if (bp->flags & BNXT_FLAG_STRIP_VLAN) +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) + def_vlan = bp->vf.vlan; +#endif + if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -3351,7 +3617,8 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, bp->grp_info[grp_idx].fw_grp_id; } - bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID; + bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID; if (vnic_id == 0) req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); @@ -3784,6 +4051,9 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (!bp->bnapi) return 0; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); @@ -3812,9 +4082,12 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) struct hwrm_stat_ctx_alloc_input req = {0}; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); - req.update_period_ms = cpu_to_le32(1000); + req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { @@ -3836,6 +4109,39 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) return 0; } +static int bnxt_hwrm_func_qcfg(struct bnxt *bp) +{ + struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto func_qcfg_exit; + +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) { + struct bnxt_vf_info *vf = &bp->vf; + + vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; + } +#endif + switch (resp->port_partition_type) { + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5: + case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0: + bp->port_partition_type = resp->port_partition_type; + break; + } + +func_qcfg_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + int bnxt_hwrm_func_qcaps(struct bnxt *bp) { int rc = 0; @@ -3855,6 +4161,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); + bp->dev->dev_port = pf->port_id; memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); @@ -3990,6 +4297,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) if (resp->hwrm_intf_maj >= 1) bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len); + bp->chip_num = le16_to_cpu(resp->chip_num); + if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev && + !resp->chip_metal) + bp->flags |= BNXT_FLAG_CHIP_NITRO_A0; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4078,7 +4390,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) int rc; /* allocate context for vnic */ - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id); + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); if (rc) { netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", vnic_id, rc); @@ -4086,6 +4398,16 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) } bp->rsscos_nr_ctxs++; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1); + if (rc) { + netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n", + vnic_id, rc); + goto vnic_setup_err; + } + bp->rsscos_nr_ctxs++; + } + /* configure default vnic, ring grp */ rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); if (rc) { @@ -4143,6 +4465,36 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) #endif } +/* Allow PF and VF with default VLAN to be in promiscuous mode */ +static bool bnxt_promisc_ok(struct bnxt *bp) +{ +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp) && !bp->vf.vlan) + return false; +#endif + return true; +} + +static int bnxt_setup_nitroa0_vnic(struct bnxt *bp) +{ + unsigned int rc = 0; + + rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1); + if (rc) { + netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", + rc); + return rc; + } + + rc = bnxt_hwrm_vnic_cfg(bp, 1); + if (rc) { + netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n", + rc); + return rc; + } + return rc; +} + static int bnxt_cfg_rx_mode(struct bnxt *); static bool bnxt_mc_list_updated(struct bnxt *, u32 *); @@ -4150,6 +4502,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int rc = 0; + unsigned int rx_nr_rings = bp->rx_nr_rings; if (irq_re_init) { rc = bnxt_hwrm_stat_ctx_alloc(bp); @@ -4172,8 +4525,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) goto err_out; } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + rx_nr_rings--; + /* default vnic 0 */ - rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings); + rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings); if (rc) { netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc); goto err_out; @@ -4208,7 +4564,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; - if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; if (bp->dev->flags & IFF_ALLMULTI) { @@ -4228,7 +4584,19 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rc = bnxt_hwrm_set_coal(bp); if (rc) netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n", - rc); + rc); + + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + rc = bnxt_setup_nitroa0_vnic(bp); + if (rc) + netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n", + rc); + } + + if (BNXT_VF(bp)) { + bnxt_hwrm_func_qcfg(bp); + netdev_update_features(bp->dev); + } return 0; @@ -4532,14 +4900,23 @@ static void bnxt_del_napi(struct bnxt *bp) static void bnxt_init_napi(struct bnxt *bp) { int i; + unsigned int cp_nr_rings = bp->cp_nr_rings; struct bnxt_napi *bnapi; if (bp->flags & BNXT_FLAG_USING_MSIX) { - for (i = 0; i < bp->cp_nr_rings; i++) { + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + cp_nr_rings--; + for (i = 0; i < cp_nr_rings; i++) { bnapi = bp->bnapi[i]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); } + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bnapi = bp->bnapi[cp_nr_rings]; + netif_napi_add(bp->dev, &bnapi->napi, + bnxt_poll_nitroa0, 64); + napi_hash_add(&bnapi->napi); + } } else { bnapi = bp->bnapi[0]; netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64); @@ -4580,9 +4957,7 @@ static void bnxt_tx_disable(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; txq = netdev_get_tx_queue(bp->dev, i); - __netif_tx_lock(txq, smp_processor_id()); txr->dev_state = BNXT_DEV_STATE_CLOSING; - __netif_tx_unlock(txq); } } /* Stop all TX queues */ @@ -4644,6 +5019,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) int rc = 0; struct hwrm_port_phy_qcaps_input req = {0}; struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_link_info *link_info = &bp->link_info; if (bp->hwrm_spec_code < 0x10201) return 0; @@ -4666,6 +5042,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; } + link_info->support_auto_speeds = + le16_to_cpu(resp->supported_speeds_auto_mode); hwrm_phy_qcaps_exit: mutex_unlock(&bp->hwrm_cmd_lock); @@ -4923,7 +5301,7 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) { struct hwrm_port_phy_cfg_input req = {0}; - if (BNXT_VF(bp)) + if (!BNXT_SINGLE_PF(bp)) return 0; if (pci_num_vf(bp->pdev)) @@ -5073,15 +5451,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) netdev_warn(bp->dev, "failed to update phy settings\n"); } - if (irq_re_init) { -#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE) - vxlan_get_rx_port(bp->dev); -#endif - if (!bnxt_hwrm_tunnel_dst_port_alloc( - bp, htons(0x17c1), - TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE)) - bp->nge_port_cnt = 1; - } + if (irq_re_init) + udp_tunnel_get_rx_info(bp->dev); set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); @@ -5122,12 +5493,19 @@ static int bnxt_open(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); int rc = 0; - rc = bnxt_hwrm_func_reset(bp); - if (rc) { - netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", - rc); - rc = -1; - return rc; + if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) { + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n", + rc); + rc = -EBUSY; + return rc; + } + /* Do func_reset during the 1st PF open only to prevent killing + * the VFs when the PF is brought down and up. + */ + if (BNXT_PF(bp)) + set_bit(BNXT_STATE_FN_RST_DONE, &bp->state); } return __bnxt_open_nic(bp, true, true); } @@ -5347,8 +5725,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); - /* Only allow PF to be in promiscuous mode */ - if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp)) + if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; uc_update = bnxt_uc_list_updated(bp); @@ -5440,8 +5817,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp) return false; vnics = 1 + bp->rx_nr_rings; - if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) + if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { + netdev_warn(bp->dev, + "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", + min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); return false; + } return true; #else @@ -5454,7 +5835,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); - if (!bnxt_rfs_capable(bp)) + if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp)) features &= ~NETIF_F_NTUPLE; /* Both CTAG and STAG VLAN accelaration on the RX side have to be @@ -5469,7 +5850,14 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; } - +#ifdef CONFIG_BNXT_SRIOV + if (BNXT_VF(bp)) { + if (bp->vf.vlan) { + features &= ~(NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_RX); + } + } +#endif return features; } @@ -5483,7 +5871,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) bool update_tpa = false; flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS; - if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0)) + if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) flags |= BNXT_FLAG_GRO; if (features & NETIF_F_LRO) flags |= BNXT_FLAG_LRO; @@ -5585,9 +5973,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) } } -static void bnxt_reset_task(struct bnxt *bp) +static void bnxt_reset_task(struct bnxt *bp, bool silent) { - bnxt_dbg_dump_states(bp); + if (!silent) + bnxt_dbg_dump_states(bp); if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); bnxt_open_nic(bp, false, false); @@ -5638,6 +6027,23 @@ bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } +/* Only called from bnxt_sp_task() */ +static void bnxt_reset(struct bnxt *bp, bool silent) +{ + /* bnxt_reset_task() calls bnxt_close_nic() which waits + * for BNXT_STATE_IN_SP_TASK to clear. + * If there is a parallel dev_close(), bnxt_close() may be holding + * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we + * must clear BNXT_STATE_IN_SP_TASK before holding rtnl(). + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); + if (test_bit(BNXT_STATE_OPEN, &bp->state)) + bnxt_reset_task(bp, silent); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); +} + static void bnxt_cfg_ntp_filters(struct bnxt *); static void bnxt_sp_task(struct work_struct *work) @@ -5674,16 +6080,20 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); } - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { - /* bnxt_reset_task() calls bnxt_close_nic() which waits - * for BNXT_STATE_IN_SP_TASK to clear. - */ - clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_lock(); - bnxt_reset_task(bp); - set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); - rtnl_unlock(); + if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_alloc( + bp, bp->nge_port, + TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); + } + if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) { + bnxt_hwrm_tunnel_dst_port_free( + bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, false); + + if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event)) + bnxt_reset(bp, true); if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) bnxt_get_port_module_status(bp); @@ -5774,6 +6184,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->tx_coal_ticks_irq = 2; bp->tx_coal_bufs_irq = 2; + bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; + init_timer(&bp->timer); bp->timer.data = (unsigned long)bp; bp->timer.function = bnxt_timer; @@ -5839,7 +6251,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) { struct bnxt *bp = netdev_priv(dev); - if (new_mtu < 60 || new_mtu > 9000) + if (new_mtu < 60 || new_mtu > 9500) return -EINVAL; if (netif_running(dev)) @@ -5918,7 +6330,8 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1, keys1->ports.ports == keys2->ports.ports && keys1->basic.ip_proto == keys2->basic.ip_proto && keys1->basic.n_proto == keys2->basic.n_proto && - ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr)) + ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) && + ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr)) return true; return false; @@ -5931,12 +6344,28 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, struct bnxt_ntuple_filter *fltr, *new_fltr; struct flow_keys *fkeys; struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); - int rc = 0, idx, bit_id; + int rc = 0, idx, bit_id, l2_idx = 0; struct hlist_head *head; if (skb->encapsulation) return -EPROTONOSUPPORT; + if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + int off = 0, j; + + netif_addr_lock_bh(dev); + for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) { + if (ether_addr_equal(eth->h_dest, + vnic->uc_list + off)) { + l2_idx = j + 1; + break; + } + } + netif_addr_unlock_bh(dev); + if (!l2_idx) + return -EINVAL; + } new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC); if (!new_fltr) return -ENOMEM; @@ -5954,6 +6383,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, goto err_free; } + memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK; @@ -5979,6 +6409,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, new_fltr->sw_id = (u16)bit_id; new_fltr->flow_id = flow_id; + new_fltr->l2_fltr_idx = l2_idx; new_fltr->rxq = rxq_index; hlist_add_head_rcu(&new_fltr->hash, head); bp->ntp_fltr_count++; @@ -6048,47 +6479,83 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) #endif /* CONFIG_RFS_ACCEL */ -static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void bnxt_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct bnxt *bp = netdev_priv(dev); - if (!netif_running(dev)) + if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) return; - if (sa_family != AF_INET6 && sa_family != AF_INET) + if (!netif_running(dev)) return; - if (bp->vxlan_port_cnt && bp->vxlan_port != port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port) + return; - bp->vxlan_port_cnt++; - if (bp->vxlan_port_cnt == 1) { - bp->vxlan_port = port; - set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + bp->vxlan_port_cnt++; + if (bp->vxlan_port_cnt == 1) { + bp->vxlan_port = ti->port; + set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (bp->nge_port_cnt && bp->nge_port != ti->port) + return; + + bp->nge_port_cnt++; + if (bp->nge_port_cnt == 1) { + bp->nge_port = ti->port; + set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event); + } + break; + default: + return; } + + schedule_work(&bp->sp_task); } -static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void bnxt_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct bnxt *bp = netdev_priv(dev); - if (!netif_running(dev)) + if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET) return; - if (sa_family != AF_INET6 && sa_family != AF_INET) + if (!netif_running(dev)) return; - if (bp->vxlan_port_cnt && bp->vxlan_port == port) { + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port) + return; bp->vxlan_port_cnt--; - if (bp->vxlan_port_cnt == 0) { - set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); - } + if (bp->vxlan_port_cnt != 0) + return; + + set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!bp->nge_port_cnt || bp->nge_port != ti->port) + return; + bp->nge_port_cnt--; + + if (bp->nge_port_cnt != 0) + return; + + set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event); + break; + default: + return; } + + schedule_work(&bp->sp_task); } static const struct net_device_ops bnxt_netdev_ops = { @@ -6119,8 +6586,8 @@ static const struct net_device_ops bnxt_netdev_ops = { #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = bnxt_rx_flow_steer, #endif - .ndo_add_vxlan_port = bnxt_add_vxlan_port, - .ndo_del_vxlan_port = bnxt_del_vxlan_port, + .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, + .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = bnxt_busy_poll, #endif @@ -6169,6 +6636,12 @@ static int bnxt_probe_phy(struct bnxt *bp) return rc; } + /* Older firmware does not have supported_auto_speeds, so assume + * that all supported speeds can be autonegotiated. + */ + if (link_info->auto_link_speeds && !link_info->support_auto_speeds) + link_info->support_auto_speeds = link_info->support_speeds; + /*initialize the ethool setting copy with NVM settings */ if (BNXT_AUTO_MODE(link_info->auto_mode)) { link_info->autoneg = BNXT_AUTONEG_SPEED; @@ -6224,7 +6697,10 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs); max_ring_grps = bp->pf.max_hw_ring_grps; } - + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) { + *max_cp -= 1; + *max_rx -= 2; + } if (bp->flags & BNXT_FLAG_AGG_RINGS) *max_rx >>= 1; *max_rx = min_t(int, *max_rx, max_ring_grps); @@ -6260,6 +6736,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { + bp->rx_nr_rings++; + bp->cp_nr_rings++; + } return rc; } @@ -6286,6 +6766,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) struct bnxt *bp; int rc, max_irqs; + if (pdev->device == 0x16cd && pci_is_bridge(pdev)) + return -ENODEV; + if (version_printed++ == 0) pr_info("%s", version); @@ -6312,13 +6795,25 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, dev); + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) + goto init_err; + + mutex_init(&bp->hwrm_cmd_lock); + rc = bnxt_hwrm_ver_get(bp); + if (rc) + goto init_err; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | - NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; + NETIF_F_RXCSUM | NETIF_F_GRO; + + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) + dev->hw_features |= NETIF_F_LRO; dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | @@ -6337,12 +6832,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); #endif - rc = bnxt_alloc_hwrm_resources(bp); - if (rc) - goto init_err; - - mutex_init(&bp->hwrm_cmd_lock); - bnxt_hwrm_ver_get(bp); + bp->gro_func = bnxt_gro_func_5730x; + if (BNXT_CHIP_NUM_57X1X(bp->chip_num)) + bp->gro_func = bnxt_gro_func_5731x; rc = bnxt_hwrm_func_drv_rgtr(bp); if (rc) @@ -6365,6 +6857,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto init_err; } + bnxt_hwrm_func_qcfg(bp); + bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); if (BNXT_PF(bp)) @@ -6375,7 +6869,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #endif bnxt_set_dflt_rings(bp); - if (BNXT_PF(bp)) { + if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { dev->hw_features |= NETIF_F_NTUPLE; if (bnxt_rfs_capable(bp)) { bp->flags |= BNXT_FLAG_RFS; @@ -6424,6 +6918,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); netdev_info(netdev, "PCI I/O error detected\n"); @@ -6438,6 +6933,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev); + /* So that func_reset will be done during slot_reset */ + clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state); pci_disable_device(pdev); rtnl_unlock(); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2824d65b2..23e04a614 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -11,10 +11,10 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.2.0" +#define DRV_MODULE_VERSION "1.3.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 0 +#define DRV_VER_MIN 3 #define DRV_VER_UPD 0 struct tx_bd { @@ -298,13 +298,14 @@ struct rx_tpa_start_cmp_ext { #define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1) #define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2) #define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) + #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8) __le32 rx_tpa_start_cmp_metadata; __le32 rx_tpa_start_cmp_cfa_code_v2; #define RX_TPA_START_CMP_V2 (0x1 << 0) #define RX_TPA_START_CMP_CFA_CODE (0xffff << 16) #define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16 - __le32 rx_tpa_start_cmp_unused5; + __le32 rx_tpa_start_cmp_hdr_info; }; struct rx_tpa_end_cmp { @@ -358,7 +359,8 @@ struct rx_tpa_end_cmp { RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO) #define TPA_END_GRO_TS(rx_tpa_end) \ - ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS)) + (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \ + cpu_to_le32(RX_TPA_END_GRO_TS))) struct rx_tpa_end_cmp_ext { __le32 rx_tpa_end_cmp_dup_acks; @@ -584,6 +586,19 @@ struct bnxt_tpa_info { u32 metadata; enum pkt_hash_types hash_type; u32 rss_hash; + u32 hdr_info; + +#define BNXT_TPA_L4_SIZE(hdr_info) \ + (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) + +#define BNXT_TPA_INNER_L3_OFF(hdr_info) \ + (((hdr_info) >> 18) & 0x1ff) + +#define BNXT_TPA_INNER_L2_OFF(hdr_info) \ + (((hdr_info) >> 9) & 0x1ff) + +#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ + ((hdr_info) & 0x1ff) }; struct bnxt_rx_ring_info { @@ -680,7 +695,8 @@ struct bnxt_ring_grp_info { struct bnxt_vnic_info { u16 fw_vnic_id; /* returned by Chimp during alloc */ - u16 fw_rss_cos_lb_ctx; +#define BNXT_MAX_CTX_PER_VNIC 2 + u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC]; u16 fw_l2_ctx_id; #define BNXT_MAX_UC_ADDRS 4 __le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS]; @@ -739,8 +755,8 @@ struct bnxt_vf_info { struct bnxt_pf_info { #define BNXT_FIRST_PF_FID 1 #define BNXT_FIRST_VF_FID 128 - u32 fw_fid; - u8 port_id; + u16 fw_fid; + u16 port_id; u8 mac_addr[ETH_ALEN]; u16 max_rsscos_ctxs; u16 max_cp_rings; @@ -769,10 +785,12 @@ struct bnxt_pf_info { struct bnxt_ntuple_filter { struct hlist_node hash; + u8 dst_mac_addr[ETH_ALEN]; u8 src_mac_addr[ETH_ALEN]; struct flow_keys fkeys; __le64 filter_id; u16 sw_id; + u8 l2_fltr_idx; u16 rxq; u32 flow_id; unsigned long state; @@ -835,6 +853,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB + u16 support_auto_speeds; u16 lp_auto_link_speeds; u16 force_link_speed; u32 preemphasis; @@ -873,6 +892,45 @@ struct bnxt { void __iomem *bar2; u32 reg_base; + u16 chip_num; +#define CHIP_NUM_57301 0x16c8 +#define CHIP_NUM_57302 0x16c9 +#define CHIP_NUM_57304 0x16ca +#define CHIP_NUM_58700 0x16cd +#define CHIP_NUM_57402 0x16d0 +#define CHIP_NUM_57404 0x16d1 +#define CHIP_NUM_57406 0x16d2 + +#define CHIP_NUM_57311 0x16ce +#define CHIP_NUM_57312 0x16cf +#define CHIP_NUM_57314 0x16df +#define CHIP_NUM_57412 0x16d6 +#define CHIP_NUM_57414 0x16d7 +#define CHIP_NUM_57416 0x16d8 +#define CHIP_NUM_57417 0x16d9 + +#define BNXT_CHIP_NUM_5730X(chip_num) \ + ((chip_num) >= CHIP_NUM_57301 && \ + (chip_num) <= CHIP_NUM_57304) + +#define BNXT_CHIP_NUM_5740X(chip_num) \ + ((chip_num) >= CHIP_NUM_57402 && \ + (chip_num) <= CHIP_NUM_57406) + +#define BNXT_CHIP_NUM_5731X(chip_num) \ + ((chip_num) == CHIP_NUM_57311 || \ + (chip_num) == CHIP_NUM_57312 || \ + (chip_num) == CHIP_NUM_57314) + +#define BNXT_CHIP_NUM_5741X(chip_num) \ + ((chip_num) >= CHIP_NUM_57412 && \ + (chip_num) <= CHIP_NUM_57417) + +#define BNXT_CHIP_NUM_57X0X(chip_num) \ + (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num)) + +#define BNXT_CHIP_NUM_57X1X(chip_num) \ + (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num)) struct net_device *dev; struct pci_dev *pdev; @@ -900,6 +958,7 @@ struct bnxt { #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_EEE_CAP 0x1000 + #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -907,12 +966,18 @@ struct bnxt { #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) +#define BNXT_NPAR(bp) ((bp)->port_partition_type) +#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) +#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) struct bnxt_napi **bnapi; struct bnxt_rx_ring_info *rx_ring; struct bnxt_tx_ring_info *tx_ring; + struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int, + struct sk_buff *); + u32 rx_buf_size; u32 rx_buf_use_size; /* useable size */ u32 rx_ring_size; @@ -959,6 +1024,7 @@ struct bnxt { unsigned long state; #define BNXT_STATE_OPEN 0 #define BNXT_STATE_IN_SP_TASK 1 +#define BNXT_STATE_FN_RST_DONE 2 struct bnxt_irq *irq_tbl; u8 mac_addr[ETH_ALEN]; @@ -991,8 +1057,10 @@ struct bnxt { __be16 vxlan_port; u8 vxlan_port_cnt; __le16 vxlan_fw_dst_port_id; + __be16 nge_port; u8 nge_port_cnt; __le16 nge_fw_dst_port_id; + u8 port_partition_type; u16 rx_coal_ticks; u16 rx_coal_ticks_irq; @@ -1005,6 +1073,11 @@ struct bnxt { #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) + u32 stats_coal_ticks; +#define BNXT_DEF_STATS_COAL_TICKS 1000000 +#define BNXT_MIN_STATS_COAL_TICKS 250000 +#define BNXT_MAX_STATS_COAL_TICKS 1000000 + struct work_struct sp_task; unsigned long sp_event; #define BNXT_RX_MASK_SP_EVENT 0 @@ -1018,6 +1091,9 @@ struct bnxt { #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 #define BNXT_PERIODIC_STATS_SP_EVENT 9 #define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 +#define BNXT_RESET_TASK_SILENT_SP_EVENT 11 +#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 +#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1b0ae4a72..b83e17403 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev, coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; + coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; + return 0; } @@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct bnxt *bp = netdev_priv(dev); + bool update_stats = false; int rc = 0; bp->rx_coal_ticks = coal->rx_coalesce_usecs; @@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev, bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; - if (netif_running(dev)) - rc = bnxt_hwrm_set_coal(bp); + if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { + u32 stats_ticks = coal->stats_block_coalesce_usecs; + + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); + stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); + bp->stats_coal_ticks = stats_ticks; + update_stats = true; + } + + if (netif_running(dev)) { + if (update_stats) { + rc = bnxt_close_nic(bp, true, false); + if (!rc) + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_hwrm_set_coal(bp); + } + } return rc; } @@ -341,9 +362,13 @@ static void bnxt_get_channels(struct net_device *dev, channel->max_other = 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) { channel->combined_count = bp->rx_nr_rings; + if (BNXT_CHIP_TYPE_NITRO_A0(bp)) + channel->combined_count--; } else { - channel->rx_count = bp->rx_nr_rings; - channel->tx_count = bp->tx_nr_rings_per_tc; + if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) { + channel->rx_count = bp->rx_nr_rings; + channel->tx_count = bp->tx_nr_rings_per_tc; + } } } @@ -366,6 +391,10 @@ static int bnxt_set_channels(struct net_device *dev, (channel->rx_count || channel->tx_count)) return -EINVAL; + if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count || + channel->tx_count)) + return -EINVAL; + if (channel->combined_count) sh = true; @@ -628,7 +657,66 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) return speed_mask; } -static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\ +{ \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 100baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 1000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 10000baseT_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 25000baseCR_Full); \ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 40000baseCR4_Full);\ + if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + 50000baseCR2_Full);\ + if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Pause); \ + if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \ + ethtool_link_ksettings_add_link_mode( \ + lk_ksettings, name, Asym_Pause);\ + } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \ + ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\ + Asym_Pause); \ + } \ +} + +#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \ +{ \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 100baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Full) || \ + ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 1000baseT_Half)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 10000baseT_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 25000baseCR_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 40000baseCR4_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \ + if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \ + 50000baseCR2_Full)) \ + (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \ +} + +static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->auto_link_speeds; u8 fw_pause = 0; @@ -636,10 +724,11 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->auto_pause_setting; - return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising); } -static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) +static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->lp_auto_link_speeds; u8 fw_pause = 0; @@ -647,16 +736,24 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) fw_pause = link_info->lp_pause; - return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, + lp_advertising); } -static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info, + struct ethtool_link_ksettings *lk_ksettings) { u16 fw_speeds = link_info->support_speeds; - u32 supported; - supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); - return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; + BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported); + + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Asym_Pause); + + if (link_info->support_auto_speeds) + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + Autoneg); } u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) @@ -683,65 +780,62 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) } } -static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnxt_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *lk_ksettings) { struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - u16 ethtool_speed; + struct ethtool_link_settings *base = &lk_ksettings->base; + u32 ethtool_speed; - cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); - - if (link_info->auto_link_speeds) - cmd->supported |= SUPPORTED_Autoneg; + ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); + bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); + ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); if (link_info->autoneg) { - cmd->advertising = - bnxt_fw_to_ethtool_advertised_spds(link_info); - cmd->advertising |= ADVERTISED_Autoneg; - cmd->autoneg = AUTONEG_ENABLE; + bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings); + ethtool_link_ksettings_add_link_mode(lk_ksettings, + advertising, Autoneg); + base->autoneg = AUTONEG_ENABLE; if (link_info->phy_link_status == BNXT_LINK_LINK) - cmd->lp_advertising = - bnxt_fw_to_ethtool_lp_adv(link_info); + bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings); ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); if (!netif_carrier_ok(dev)) - cmd->duplex = DUPLEX_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; else - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; } else { - cmd->autoneg = AUTONEG_DISABLE; - cmd->advertising = 0; + base->autoneg = AUTONEG_DISABLE; ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed); - cmd->duplex = DUPLEX_HALF; + base->duplex = DUPLEX_HALF; if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; + base->duplex = DUPLEX_FULL; } - ethtool_cmd_speed_set(cmd, ethtool_speed); + base->speed = ethtool_speed; - cmd->port = PORT_NONE; + base->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { - cmd->port = PORT_TP; - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; + base->port = PORT_TP; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + TP); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + TP); } else { - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, + FIBRE); + ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising, + FIBRE); if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC) - cmd->port = PORT_DA; + base->port = PORT_DA; else if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE) - cmd->port = PORT_FIBRE; + base->port = PORT_FIBRE; } - - if (link_info->transceiver == - PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) - cmd->transceiver = XCVR_INTERNAL; - else - cmd->transceiver = XCVR_EXTERNAL; - cmd->phy_address = link_info->phy_addr; + base->phy_address = link_info->phy_addr; return 0; } @@ -815,37 +909,25 @@ u16 bnxt_get_fw_auto_link_speeds(u32 advertising) return fw_speed_mask; } -static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int bnxt_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *lk_ksettings) { - int rc = 0; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; + const struct ethtool_link_settings *base = &lk_ksettings->base; u32 speed, fw_advertising = 0; bool set_pause = false; + int rc = 0; - if (BNXT_VF(bp)) - return rc; - - if (cmd->autoneg == AUTONEG_ENABLE) { - u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; - if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | - ADVERTISED_TP | ADVERTISED_FIBRE)) { - netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } - fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising); - if (fw_advertising & ~link_info->support_speeds) { - netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n", - cmd->advertising); - rc = -EINVAL; - goto set_setting_exit; - } + if (base->autoneg == AUTONEG_ENABLE) { + BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, + advertising); link_info->autoneg |= BNXT_AUTONEG_SPEED; if (!fw_advertising) - link_info->advertising = link_info->support_speeds; + link_info->advertising = link_info->support_auto_speeds; else link_info->advertising = fw_advertising; /* any change to autoneg will cause link change, therefore the @@ -863,16 +945,12 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) rc = -EINVAL; goto set_setting_exit; } - /* TODO: currently don't support half duplex */ - if (cmd->duplex == DUPLEX_HALF) { + if (base->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); rc = -EINVAL; goto set_setting_exit; } - /* If received a request for an unknown duplex, assume full*/ - if (cmd->duplex == DUPLEX_UNKNOWN) - cmd->duplex = DUPLEX_FULL; - speed = ethtool_cmd_speed(cmd); + speed = base->speed; fw_speed = bnxt_get_fw_speed(dev, speed); if (!fw_speed) { rc = -EINVAL; @@ -911,8 +989,8 @@ static int bnxt_set_pauseparam(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info = &bp->link_info; - if (BNXT_VF(bp)) - return rc; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; if (epause->autoneg) { if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) @@ -1010,6 +1088,8 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNX_DIR_TYPE_APE_FW: case BNX_DIR_TYPE_APE_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT; + /* Self-reset APE upon next PCIe reset: */ + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST; break; case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: @@ -1043,9 +1123,27 @@ static int bnxt_flash_firmware(struct net_device *dev, case BNX_DIR_TYPE_BOOTCODE_2: code_type = CODE_BOOT; break; + case BNX_DIR_TYPE_CHIMP_PATCH: + code_type = CODE_CHIMP_PATCH; + break; case BNX_DIR_TYPE_APE_FW: code_type = CODE_MCTP_PASSTHRU; break; + case BNX_DIR_TYPE_APE_PATCH: + code_type = CODE_APE_PATCH; + break; + case BNX_DIR_TYPE_KONG_FW: + code_type = CODE_KONG_FW; + break; + case BNX_DIR_TYPE_KONG_PATCH: + code_type = CODE_KONG_PATCH; + break; + case BNX_DIR_TYPE_BONO_FW: + code_type = CODE_BONO_FW; + break; + case BNX_DIR_TYPE_BONO_PATCH: + code_type = CODE_BONO_PATCH; + break; default: netdev_err(dev, "Unsupported directory entry type: %u\n", dir_type); @@ -1100,6 +1198,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type) case BNX_DIR_TYPE_APE_PATCH: case BNX_DIR_TYPE_KONG_FW: case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: return true; } @@ -1137,7 +1237,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, const struct firmware *fw; int rc; - if (bnxt_dir_type_is_executable(dir_type) == false) + if (dir_type != BNX_DIR_TYPE_UPDATE && + bnxt_dir_type_is_executable(dir_type) == false) return -EINVAL; rc = request_firmware(&fw, filename, &dev->dev); @@ -1433,8 +1534,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); int rc = 0; - if (BNXT_VF(bp)) - return 0; + if (!BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; @@ -1618,8 +1719,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev, } const struct ethtool_ops bnxt_ethtool_ops = { - .get_settings = bnxt_get_settings, - .set_settings = bnxt_set_settings, + .get_link_ksettings = bnxt_get_link_ksettings, + .set_link_ksettings = bnxt_set_link_ksettings, .get_pauseparam = bnxt_get_pauseparam, .set_pauseparam = bnxt_set_pauseparam, .get_drvinfo = bnxt_get_drvinfo, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h index 461675caa..82bf44ab8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -70,6 +70,7 @@ enum SUPPORTED_CODE { CODE_KONG_PATCH, /* 18 - KONG Patch firmware */ CODE_BONO_FW, /* 19 - BONO firmware */ CODE_BONO_PATCH, /* 20 - BONO Patch firmware */ + CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */ MAX_CODE_TYPE, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 05e3c49a7..517567f6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -105,6 +105,7 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) @@ -484,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL }; -/* HW Resource Manager Specification 1.2.2 */ +/* HW Resource Manager Specification 1.3.0 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 2 -#define HWRM_VERSION_UPDATE 2 +#define HWRM_VERSION_MINOR 3 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_STR "1.2.2" +#define HWRM_VERSION_STR "1.3.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -611,6 +612,9 @@ struct cmd_nums { #define HWRM_FWD_RESP (0xd2UL) #define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL) #define HWRM_TEMP_MONITOR_QUERY (0xe0UL) + #define HWRM_WOL_FILTER_ALLOC (0xf0UL) + #define HWRM_WOL_FILTER_FREE (0xf1UL) + #define HWRM_WOL_FILTER_QCFG (0xf2UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) @@ -1020,6 +1024,10 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL + #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1066,8 +1074,9 @@ struct hwrm_func_qcfg_output { __le16 fid; __le16 port_id; __le16 vlan; - u8 unused_0; - u8 unused_1; + __le16 flags; + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL + #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1086,23 +1095,23 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0) #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0) - u8 unused_2; + u8 unused_0; __le16 dflt_vnic_id; - u8 unused_3; - u8 unused_4; + u8 unused_1; + u8 unused_2; __le32 min_bw; __le32 max_bw; u8 evb_mode; #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0) #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0) - u8 unused_5; - __le16 unused_6; + u8 unused_3; + __le16 unused_4; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; + u8 unused_5; + u8 unused_6; u8 unused_7; - u8 unused_8; - u8 unused_9; u8 valid; }; @@ -1410,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input { #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0) - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0) - #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0) + #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0) #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0) __le16 req_buf_len; __le16 resp_buf_len; @@ -1499,6 +1508,12 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL + #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL __le32 enables; #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL @@ -1815,13 +1830,22 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) - __le32 unused_1; + __le16 fec_cfg; + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL + #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 unused_1; + u8 unused_2; char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - __le32 unused_2; - u8 unused_3; + __le32 unused_3; u8 unused_4; u8 unused_5; + u8 unused_6; u8 valid; }; @@ -1842,6 +1866,8 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL + #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL __le32 enables; #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL @@ -2127,6 +2153,7 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_queue_qportcfg */ /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { __le16 req_type; @@ -2382,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; u8 queue_id1; @@ -2392,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id1_pri_lvl; u8 queue_id1_bw_weight; u8 queue_id2; @@ -2402,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id2_pri_lvl; u8 queue_id2_bw_weight; u8 queue_id3; @@ -2412,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id3_pri_lvl; u8 queue_id3_bw_weight; u8 queue_id4; @@ -2422,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id4_pri_lvl; u8 queue_id4_bw_weight; u8 queue_id5; @@ -2432,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id5_pri_lvl; u8 queue_id5_bw_weight; u8 queue_id6; @@ -2442,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id6_pri_lvl; u8 queue_id6_bw_weight; u8 queue_id7; @@ -2452,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0) #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0) u8 queue_id7_pri_lvl; u8 queue_id7_bw_weight; u8 unused_1[5]; @@ -3150,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output { }; /* hwrm_cfa_l2_set_rx_mask */ -/* Input (40 bytes) */ +/* Input (56 bytes) */ struct hwrm_cfa_l2_set_rx_mask_input { __le16 req_type; __le16 cmpl_ring; @@ -3165,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input { #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL + #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL __le64 mc_tbl_addr; __le32 num_mc_entries; __le32 unused_0; + __le64 vlan_tag_tbl_addr; + __le32 num_vlan_tags; + __le32 unused_1; }; /* Output (16 bytes) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 40a7b0e09..73f224955 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -13,6 +13,7 @@ enum bnxt_nvm_directory_type { BNX_DIR_TYPE_UNUSED = 0, BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_UPDATE = 2, BNX_DIR_TYPE_CHIMP_PATCH = 3, BNX_DIR_TYPE_BOOTCODE = 4, BNX_DIR_TYPE_VPD = 5, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 363884dd9..50d2007a2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -143,6 +143,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) u16 vlan_tag; int rc; + if (bp->hwrm_spec_code < 0x10201) + return -ENOTSUPP; + rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 6ad2b4a51..6c4652b52 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -12543,10 +12543,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, info->data = TG3_RSS_MAX_NUM_QS; } - /* The first interrupt vector only - * handles link interrupts. - */ - info->data -= 1; return 0; default: @@ -14005,7 +14001,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) } if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) || + (!ec->rx_coalesce_usecs) || (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) || + (!ec->tx_coalesce_usecs) || (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) || (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) || (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) || @@ -14016,16 +14014,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) (ec->stats_block_coalesce_usecs < min_stat_coal_ticks)) return -EINVAL; - /* No rx interrupts will be generated if both are zero */ - if ((ec->rx_coalesce_usecs == 0) && - (ec->rx_max_coalesced_frames == 0)) - return -EINVAL; - - /* No tx interrupts will be generated if both are zero */ - if ((ec->tx_coalesce_usecs == 0) && - (ec->tx_max_coalesced_frames == 0)) - return -EINVAL; - /* Only copy relevant parameters, ignore all others. */ tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs; tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs; @@ -18125,14 +18113,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, rtnl_lock(); - /* We needn't recover from permanent error */ - if (state == pci_channel_io_frozen) - tp->pcierr_recovery = true; - /* We probably don't have netdev yet */ if (!netdev || !netif_running(netdev)) goto done; + /* We needn't recover from permanent error */ + if (state == pci_channel_io_frozen) + tp->pcierr_recovery = true; + tg3_phy_stop(tp); tg3_netif_stop(tp); @@ -18229,7 +18217,7 @@ static void tg3_io_resume(struct pci_dev *pdev) rtnl_lock(); - if (!netif_running(netdev)) + if (!netdev || !netif_running(netdev)) goto done; tg3_full_lock(tp, 0); diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 8fc246ea1..05c1c1dd7 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -312,7 +312,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, struct bnad_debug_info *regrd_debug = file->private_data; struct bnad *bnad = (struct bnad *)regrd_debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, len, rc, i; + int rc, i; + u32 addr, len; u32 *regbuf; void __iomem *rb, *reg_addr; unsigned long flags; @@ -372,7 +373,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf, struct bnad_debug_info *debug = file->private_data; struct bnad *bnad = (struct bnad *)debug->i_private; struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; - int addr, val, rc; + int rc; + u32 addr, val; void __iomem *reg_addr; unsigned long flags; void *kern_buf; diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index d95098223..47b1bbb95 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -31,15 +31,10 @@ #define BNAD_NUM_TXF_COUNTERS 12 #define BNAD_NUM_RXF_COUNTERS 10 #define BNAD_NUM_CQ_COUNTERS (3 + 5) -#define BNAD_NUM_RXQ_COUNTERS 6 +#define BNAD_NUM_RXQ_COUNTERS 7 #define BNAD_NUM_TXQ_COUNTERS 5 -#define BNAD_ETHTOOL_STATS_NUM \ - (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \ - sizeof(struct bnad_drv_stats) / sizeof(u64) + \ - offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64)) - -static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { +static const char *bnad_net_stats_strings[] = { "rx_packets", "tx_packets", "rx_bytes", @@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "tx_dropped", "multicast", "collisions", - "rx_length_errors", - "rx_over_errors", "rx_crc_errors", "rx_frame_errors", - "rx_fifo_errors", - "rx_missed_errors", - - "tx_aborted_errors", - "tx_carrier_errors", "tx_fifo_errors", - "tx_heartbeat_errors", - "tx_window_errors", - - "rx_compressed", - "tx_compressed", "netif_queue_stop", "netif_queue_wakeup", @@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = { "fc_tx_fid_parity_errors", }; +#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings) + static int bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { @@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_allocbuf_failed", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_mapbuf_failed", q_num); + string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_producer_index", q_num); string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_consumer_index", q_num); @@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) sprintf(string, "rxq%d_allocbuf_failed", q_num); string += ETH_GSTRING_LEN; + sprintf(string, "rxq%d_mapbuf_failed", + q_num); + string += ETH_GSTRING_LEN; sprintf(string, "rxq%d_producer_index", q_num); string += ETH_GSTRING_LEN; @@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *buf) { struct bnad *bnad = netdev_priv(netdev); - int i, j, bi; + int i, j, bi = 0; unsigned long flags; - struct rtnl_link_stats64 *net_stats64; + struct rtnl_link_stats64 net_stats64; u64 *stats64; u32 bmap; @@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, * under the same lock */ spin_lock_irqsave(&bnad->bna_lock, flags); - bi = 0; - memset(buf, 0, stats->n_stats * sizeof(u64)); - - net_stats64 = (struct rtnl_link_stats64 *)buf; - bnad_netdev_qstats_fill(bnad, net_stats64); - bnad_netdev_hwstats_fill(bnad, net_stats64); - bi = sizeof(*net_stats64) / sizeof(u64); + memset(&net_stats64, 0, sizeof(net_stats64)); + bnad_netdev_qstats_fill(bnad, &net_stats64); + bnad_netdev_hwstats_fill(bnad, &net_stats64); + + buf[bi++] = net_stats64.rx_packets; + buf[bi++] = net_stats64.tx_packets; + buf[bi++] = net_stats64.rx_bytes; + buf[bi++] = net_stats64.tx_bytes; + buf[bi++] = net_stats64.rx_errors; + buf[bi++] = net_stats64.tx_errors; + buf[bi++] = net_stats64.rx_dropped; + buf[bi++] = net_stats64.tx_dropped; + buf[bi++] = net_stats64.multicast; + buf[bi++] = net_stats64.collisions; + buf[bi++] = net_stats64.rx_length_errors; + buf[bi++] = net_stats64.rx_crc_errors; + buf[bi++] = net_stats64.rx_frame_errors; + buf[bi++] = net_stats64.tx_fifo_errors; /* Get netif_queue_stopped from stack */ bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index cb07d95e3..d954a97b0 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -304,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) static void macb_handle_link_change(struct net_device *dev) { struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -414,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev) bp->link = 0; bp->speed = 0; bp->duplex = -1; - bp->phy_dev = phydev; return 0; } @@ -1324,6 +1323,24 @@ dma_error: return 0; } +static inline int macb_clear_csum(struct sk_buff *skb) +{ + /* no change for packets without checksum offloading */ + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + /* make sure we can modify the header */ + if (unlikely(skb_cow_head(skb, 0))) + return -1; + + /* initialize checksum field + * This is required - at least for Zynq, which otherwise calculates + * wrong UDP header checksums for UDP packets with UDP data len <=2 + */ + *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0; + return 0; +} + static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) { u16 queue_index = skb_get_queue_mapping(skb); @@ -1363,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + if (macb_clear_csum(skb)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* Map socket buffer for DMA transfer */ if (!macb_tx_map(bp, queue, skb)) { dev_kfree_skb_any(skb); @@ -1886,7 +1908,7 @@ static int macb_open(struct net_device *dev) netif_carrier_off(dev); /* if the phy is not yet register, retry later*/ - if (!bp->phy_dev) + if (!dev->phydev) return -EAGAIN; /* RX buffers initialization */ @@ -1905,7 +1927,7 @@ static int macb_open(struct net_device *dev) macb_init_hw(bp); /* schedule a link state check */ - phy_start(bp->phy_dev); + phy_start(dev->phydev); netif_tx_start_all_queues(dev); @@ -1920,8 +1942,8 @@ static int macb_close(struct net_device *dev) netif_tx_stop_all_queues(dev); napi_disable(&bp->napi); - if (bp->phy_dev) - phy_stop(bp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -2092,28 +2114,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return nstat; } -static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static int macb_get_regs_len(struct net_device *netdev) { return MACB_GREGS_NBR * sizeof(u32); @@ -2186,19 +2186,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) } static const struct ethtool_ops macb_ethtool_ops = { - .get_settings = macb_get_settings, - .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, .get_wol = macb_get_wol, .set_wol = macb_set_wol, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct ethtool_ops gem_ethtool_ops = { - .get_settings = macb_get_settings, - .set_settings = macb_set_settings, .get_regs_len = macb_get_regs_len, .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, @@ -2206,12 +2204,13 @@ static const struct ethtool_ops gem_ethtool_ops = { .get_ethtool_stats = gem_get_ethtool_stats, .get_strings = gem_get_ethtool_strings, .get_sset_count = gem_get_sset_count, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct macb *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -2570,7 +2569,7 @@ static int at91ether_open(struct net_device *dev) MACB_BIT(HRESP)); /* schedule a link state check */ - phy_start(lp->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -3010,7 +3009,7 @@ static int macb_probe(struct platform_device *pdev) if (err) goto err_out_free_netdev; - phydev = bp->phy_dev; + phydev = dev->phydev; netif_carrier_off(dev); @@ -3029,7 +3028,7 @@ static int macb_probe(struct platform_device *pdev) return 0; err_out_unregister_mdio: - phy_disconnect(bp->phy_dev); + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); @@ -3057,8 +3056,8 @@ static int macb_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); - if (bp->phy_dev) - phy_disconnect(bp->phy_dev); + if (dev->phydev) + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 644743c9c..b6fcf1062 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -823,7 +823,6 @@ struct macb { struct macb_or_gem_ops macbgem_ops; struct mii_bus *mii_bus; - struct phy_device *phy_dev; int link; int speed; int duplex; diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c index 8ad7425f8..c03d37016 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -19,26 +19,16 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" int lio_cn6xxx_soft_reset(struct octeon_device *oct) { @@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) u32 val; pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); - if (val & 0x000f0000) { + if (val & 0x000c0000) { dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", - val & 0x000f0000); + val & 0x000c0000); } val |= 0xf; /* Enable Link error reporting */ @@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); - /* / Select ES,RO,NS setting from register for Output Queue Packet + /* Select ES, RO, NS setting from register for Output Queue Packet * Address */ octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); @@ -367,7 +357,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct) void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) { - u32 mask, i, loop = HZ; + int i; + u32 mask, loop = HZ; u32 d32; /* Reset the Enable bits for Input Queues. */ @@ -376,7 +367,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); /* Wait until hardware indicates that the queues are out of reset. */ - mask = oct->io_qmask.iq; + mask = (u32)oct->io_qmask.iq; d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); while (((d32 & mask) != mask) && loop--) { d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); @@ -384,8 +375,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) } /* Reset the doorbell register for each Input queue. */ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); @@ -398,7 +389,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Wait until hardware indicates that the queues are out of reset. */ loop = HZ; - mask = oct->io_qmask.oq; + mask = (u32)oct->io_qmask.oq; d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); while (((d32 & mask) != mask) && loop--) { d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); @@ -408,8 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) /* Reset the doorbell register for each Output queue. */ /* for (i = 0; i < oct->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); @@ -429,16 +420,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) void lio_cn6xxx_reinit_regs(struct octeon_device *oct) { - u32 i; + int i; - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; oct->fn_list.setup_iq_regs(oct, i); } - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; oct->fn_list.setup_oq_regs(oct, i); } @@ -450,8 +441,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct) oct->fn_list.enable_io_queues(oct); /* for (i = 0; i < oct->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); } @@ -495,8 +486,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) } u32 -lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), - struct octeon_instr_queue *iq) +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) { u32 new_idx = readl(iq->inst_cnt_reg); @@ -547,17 +537,18 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); } -void +static void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) { dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", CVM_CAST64(intr64)); } -int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) +static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) { struct octeon_droq *droq; - u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb; + int oq_no; + u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; u32 droq_cnt_enb, droq_cnt_mask; droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); @@ -573,12 +564,12 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) oct->droq_intr = 0; /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { - if (!(droq_mask & (1 << oq_no))) + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { + if (!(droq_mask & (1ULL << oq_no))) continue; droq = oct->droq[oq_no]; - pkt_count = octeon_droq_check_hw_for_pkts(oct, droq); + pkt_count = octeon_droq_check_hw_for_pkts(droq); if (pkt_count) { oct->droq_intr |= (1ULL << oq_no); if (droq->ops.poll_mode) { diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h index f77918779..28c472242 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no); void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no); void lio_cn6xxx_enable_io_queues(struct octeon_device *oct); void lio_cn6xxx_disable_io_queues(struct octeon_device *oct); -void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64); -int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct); irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev); void lio_cn6xxx_reinit_regs(struct octeon_device *oct); void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, @@ -91,8 +89,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask); u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx); u32 -lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), - struct octeon_instr_queue *iq); +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq); void lio_cn6xxx_enable_interrupt(void *chip); void lio_cn6xxx_disable_interrupt(void *chip); void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c index 8e830d0c0..29755bc68 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c @@ -19,28 +19,17 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" #include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) { @@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct) pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); } -int lio_is_210nv(struct octeon_device *oct) +static int lio_is_210nv(struct octeon_device *oct) { u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h index d4e1c9fb0..ea7bdcce6 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h @@ -28,6 +28,5 @@ #define __CN68XX_DEVICE_H__ int lio_setup_cn68xx_octeon_device(struct octeon_device *oct); -int lio_is_210nv(struct octeon_device *oct); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h index 38cddbd10..d45a0f4aa 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h @@ -29,7 +29,6 @@ #ifndef __CN68XX_REGS_H__ #define __CN68XX_REGS_H__ -#include "cn66xx_regs.h" /*###################### REQUEST QUEUE #########################*/ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 245c063ed..289eb8907 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -19,13 +19,9 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> #include <linux/netdevice.h> #include <linux/net_tstamp.h> -#include <linux/ethtool.h> -#include <linux/dma-mapping.h> #include <linux/pci.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -36,9 +32,8 @@ #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" + +static int octnet_get_link_stats(struct net_device *netdev); struct oct_mdio_cmd_context { int octeon_id; @@ -71,34 +66,126 @@ enum { INTERFACE_MODE_RXAUI, INTERFACE_MODE_QSGMII, INTERFACE_MODE_AGL, + INTERFACE_MODE_XLAUI, + INTERFACE_MODE_XFI, + INTERFACE_MODE_10G_KR, + INTERFACE_MODE_40G_KR4, + INTERFACE_MODE_MIXED, }; #define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) #define OCT_ETHTOOL_REGDUMP_LEN 4096 #define OCT_ETHTOOL_REGSVER 1 +/* statistics of PF */ +static const char oct_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", /*jabber_err+l2_err+frame_err */ + "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */ + "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd + *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop + */ + "tx_dropped", + + "tx_total_sent", + "tx_total_fwd", + "tx_err_pko", + "tx_err_link", + "tx_err_drop", + + "tx_tso", + "tx_tso_packets", + "tx_tso_err", + "tx_vxlan", + + "mac_tx_total_pkts", + "mac_tx_total_bytes", + "mac_tx_mcast_pkts", + "mac_tx_bcast_pkts", + "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */ + "mac_tx_total_collisions", + "mac_tx_one_collision", + "mac_tx_multi_collison", + "mac_tx_max_collision_fail", + "mac_tx_max_deferal_fail", + "mac_tx_fifo_err", + "mac_tx_runts", + + "rx_total_rcvd", + "rx_total_fwd", + "rx_jabber_err", + "rx_l2_err", + "rx_frame_err", + "rx_err_pko", + "rx_err_link", + "rx_err_drop", + + "rx_vxlan", + "rx_vxlan_err", + + "rx_lro_pkts", + "rx_lro_bytes", + "rx_total_lro", + + "rx_lro_aborts", + "rx_lro_aborts_port", + "rx_lro_aborts_seq", + "rx_lro_aborts_tsval", + "rx_lro_aborts_timer", + "rx_fwd_rate", + + "mac_rx_total_rcvd", + "mac_rx_bytes", + "mac_rx_total_bcst", + "mac_rx_total_mcst", + "mac_rx_runts", + "mac_rx_ctl_packets", + "mac_rx_fifo_err", + "mac_rx_dma_drop", + "mac_rx_fcs_err", + + "link_state_changes", +}; + +/* statistics of host tx queue */ static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { - "Instr posted", - "Instr processed", - "Instr dropped", - "Bytes Sent", - "Sgentry_sent", - "Inst cntreg", - "Tx done", - "Tx Iq busy", - "Tx dropped", - "Tx bytes", + "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/ + "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/ + "dropped", + "iq_busy", + "sgentry_sent", + + "fw_instr_posted", + "fw_instr_processed", + "fw_instr_dropped", + "fw_bytes_sent", + + "tso", + "vxlan", + "txq_restart", }; +/* statistics of host rx queue */ static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { - "OQ Pkts Received", - "OQ Bytes Received", - "Dropped no dispatch", - "Dropped nomem", - "Dropped toomany", - "Stack RX cnt", - "Stack RX Bytes", - "RX dropped", + "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */ + "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */ + "dropped", /*oct->droq[oq_no]->stats.rx_dropped+ + *oct->droq[oq_no]->stats.dropped_nodispatch+ + *oct->droq[oq_no]->stats.dropped_toomany+ + *oct->droq[oq_no]->stats.dropped_nomem + */ + "dropped_nomem", + "dropped_toomany", + "fw_dropped", + "fw_pkts_received", + "fw_bytes_received", + "fw_dropped_nodispatch", + + "vxlan", + "buffer_alloc_failure", }; #define OCTNIC_NCMD_AUTONEG_ON 0x1 @@ -112,8 +199,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) linfo = &lio->linfo; - if (linfo->link.s.interface == INTERFACE_MODE_XAUI || - linfo->link.s.interface == INTERFACE_MODE_RXAUI) { + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { ecmd->port = PORT_FIBRE; ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | @@ -124,10 +212,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->autoneg = AUTONEG_DISABLE; } else { - dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n"); + dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n", + linfo->link.s.if_mode); } - if (linfo->link.s.status) { + if (linfo->link.s.link_up) { ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); ecmd->duplex = linfo->link.s.duplex; } else { @@ -222,23 +311,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = addr; - nctrl.ncmd.s.param3 = val; + nctrl.ncmd.s.param1 = addr; + nctrl.ncmd.s.param2 = val; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); return -EINVAL; @@ -253,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct, u32 status, void *buf) { - struct oct_mdio_cmd_resp *mdio_cmd_rsp; struct oct_mdio_cmd_context *mdio_cmd_ctx; struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; oct = lio_get_device(mdio_cmd_ctx->octeon_id); if (status) { dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", CVM_CAST64(status)); - ACCESS_ONCE(mdio_cmd_ctx->cond) = -1; + WRITE_ONCE(mdio_cmd_ctx->cond, -1); } else { - ACCESS_ONCE(mdio_cmd_ctx->cond) = 1; + WRITE_ONCE(mdio_cmd_ctx->cond, 1); } wake_up_interruptible(&mdio_cmd_ctx->wc); } @@ -297,15 +381,16 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; - ACCESS_ONCE(mdio_cmd_ctx->cond) = 0; + WRITE_ONCE(mdio_cmd_ctx->cond, 0); mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); mdio_cmd->op = op; mdio_cmd->mdio_addr = loc; if (op) mdio_cmd->value1 = *value; - mdio_cmd->value2 = lio->linfo.ifidx; octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 0, 0, 0); @@ -317,7 +402,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) retval = octeon_send_soft_command(oct_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&oct_dev->pci_dev->dev, "octnet_mdio45_access instruction failed status: %x\n", retval); @@ -335,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), sizeof(struct oct_mdio_cmd) / 8); - if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) { + if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { if (!op) *value = mdio_cmd_rsp->resp.value1; } else { @@ -379,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev, /* Configure Beacon values */ value = LIO68XX_LED_BEACON_CFGON; - ret = - octnet_mdio45_access(lio, 1, - LIO68XX_LED_BEACON_ADDR, - &value); + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_BEACON_ADDR, + &value); if (ret) return ret; value = LIO68XX_LED_CTRL_CFGON; - ret = - octnet_mdio45_access(lio, 1, - LIO68XX_LED_CTRL_ADDR, - &value); + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_CTRL_ADDR, + &value); if (ret) return ret; } else { @@ -469,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev, tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); } - if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) { + if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { ering->rx_pending = 0; ering->rx_max_pending = 0; ering->rx_mini_pending = 0; @@ -503,10 +586,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { if (msglvl & NETIF_MSG_HW) liquidio_set_feature(netdev, - OCTNET_CMD_VERBOSE_ENABLE); + OCTNET_CMD_VERBOSE_ENABLE, 0); else liquidio_set_feature(netdev, - OCTNET_CMD_VERBOSE_DISABLE); + OCTNET_CMD_VERBOSE_DISABLE, 0); } lio->msg_enable = msglvl; @@ -518,61 +601,279 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) /* Notes: Not supporting any auto negotiation in these * drivers. Just report pause frame support. */ - pause->tx_pause = 1; - pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + pause->autoneg = 0; + + pause->tx_pause = oct->tx_pause; + pause->rx_pause = oct->rx_pause; } static void lio_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) + struct ethtool_stats *stats __attribute__((unused)), + u64 *data) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; + struct net_device_stats *netstats = &netdev->stats; int i = 0, j; - for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) { - if (!(oct_dev->io_qmask.iq & (1UL << j))) + netdev->netdev_ops->ndo_get_stats(netdev); + octnet_get_link_stats(netdev); + + /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ + data[i++] = CVM_CAST64(netstats->rx_packets); + /*sum of oct->instr_queue[iq_no]->stats.tx_done */ + data[i++] = CVM_CAST64(netstats->tx_packets); + /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ + data[i++] = CVM_CAST64(netstats->rx_bytes); + /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ + data[i++] = CVM_CAST64(netstats->tx_bytes); + data[i++] = CVM_CAST64(netstats->rx_errors); + data[i++] = CVM_CAST64(netstats->tx_errors); + /*sum of oct->droq[oq_no]->stats->rx_dropped + + *oct->droq[oq_no]->stats->dropped_nodispatch + + *oct->droq[oq_no]->stats->dropped_toomany + + *oct->droq[oq_no]->stats->dropped_nomem + */ + data[i++] = CVM_CAST64(netstats->rx_dropped); + /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ + data[i++] = CVM_CAST64(netstats->tx_dropped); + + /*data[i++] = CVM_CAST64(stats->multicast); */ + /*data[i++] = CVM_CAST64(stats->collisions); */ + + /* firmware tx stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. + *fromhost.fw_total_sent + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); + /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tso_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_tso + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tx_vxlan + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); + + /* mac tx statistics */ + /*CVMX_BGXX_CMRX_TX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); + /*CVMX_BGXX_CMRX_TX_STAT15 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT14 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT17 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); + /*CVMX_BGXX_CMRX_TX_STAT3 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT2 */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); + /*CVMX_BGXX_CMRX_TX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); + /*CVMX_BGXX_CMRX_TX_STAT16 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); + /*CVMX_BGXX_CMRX_TX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); + + /* RX firmware stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_rcvd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_err_pko + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_rx_vxlan + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_rx_vxlan_err + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); + + /* LRO */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_pkts + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_octs + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); + /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_port + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_seq + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_tsval + */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_timer + */ + /* intrmod: packet forward rate */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); + + /* mac: link-level stats */ + /*CVMX_BGXX_CMRX_RX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); + /*CVMX_BGXX_CMRX_RX_STAT2 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); + /*CVMX_BGXX_CMRX_RX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); + /*lio->link_changes*/ + data[i++] = CVM_CAST64(lio->link_changes); + + /* TX -- lio_update_stats(lio); */ + for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.iq & (1ULL << j))) continue; + /*packets to network port*/ + /*# of packets tx to network */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + /*# of bytes tx to network */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); - data[i++] = - CVM_CAST64( - oct_dev->instr_queue[j]->stats.instr_processed); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + /*# of packets dropped */ data[i++] = - CVM_CAST64( - oct_dev->instr_queue[j]->stats.instr_dropped); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + /*# of tx fails due to queue full */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + /*XXX gather entries sent */ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); + + /*instruction to firmware: data and control */ + /*# of instructions to the queue */ data[i++] = - readl(oct_dev->instr_queue[j]->inst_cnt_reg); - data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); - data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); + /*# of instructions processed */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> + stats.instr_processed); + /*# of instructions could not be processed */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]-> + stats.instr_dropped); + /*bytes sent through the queue */ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + + /*tso request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); + /*vxlan request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); + /*txq restart*/ data[i++] = - CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); } - /* for (j = 0; j < oct_dev->num_oqs; j++){ */ - for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) { - if (!(oct_dev->io_qmask.oq & (1UL << j))) + /* RX */ + /* for (j = 0; j < oct_dev->num_oqs; j++) { */ + for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.oq & (1ULL << j))) continue; - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); - data[i++] = - CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); - data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + + /*packets send to TCP/IP network stack */ + /*# of packets to network stack */ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); + /*# of bytes to network stack */ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); + /*# of packets dropped */ + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + + oct_dev->droq[j]->stats.dropped_toomany + + oct_dev->droq[j]->stats.rx_dropped); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + + /*control and data path*/ + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); } } @@ -581,26 +882,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; int num_iq_stats, num_oq_stats, i, j; + int num_stats; - num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct_dev->io_qmask.iq & (1UL << i))) - continue; - for (j = 0; j < num_iq_stats; j++) { - sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]); + switch (stringset) { + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(oct_stats_strings); + for (j = 0; j < num_stats; j++) { + sprintf(data, "%s", oct_stats_strings[j]); data += ETH_GSTRING_LEN; } - } - num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); - /* for (i = 0; i < oct_dev->num_oqs; i++) { */ - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct_dev->io_qmask.oq & (1UL << i))) - continue; - for (j = 0; j < num_oq_stats; j++) { - sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]); - data += ETH_GSTRING_LEN; + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.iq & (1ULL << i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "tx-%d-%s", i, + oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + /* for (i = 0; i < oct_dev->num_oqs; i++) { */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.oq & (1ULL << i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "rx-%d-%s", i, + oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + break; + + default: + netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); + break; } } @@ -609,8 +927,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; - return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) + - (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + switch (sset) { + case ETH_SS_STATS: + return (ARRAY_SIZE(oct_stats_strings) + + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + default: + return -EOPNOTSUPP; + } } static int lio_get_intr_coalesce(struct net_device *netdev, @@ -618,50 +942,49 @@ static int lio_get_intr_coalesce(struct net_device *netdev, { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; struct octeon_instr_queue *iq; struct oct_intrmod_cfg *intrmod_cfg; intrmod_cfg = &oct->intrmod; switch (oct->chip_id) { - /* case OCTEON_CN73XX: Todo */ - /* break; */ case OCTEON_CN68XX: - case OCTEON_CN66XX: - if (!intrmod_cfg->intrmod_enable) { + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intrmod_cfg->rx_enable) { intr_coal->rx_coalesce_usecs = CFG_GET_OQ_INTR_TIME(cn6xxx->conf); intr_coal->rx_max_coalesced_frames = CFG_GET_OQ_INTR_PKT(cn6xxx->conf); - } else { - intr_coal->use_adaptive_rx_coalesce = - intrmod_cfg->intrmod_enable; - intr_coal->rate_sample_interval = - intrmod_cfg->intrmod_check_intrvl; - intr_coal->pkt_rate_high = - intrmod_cfg->intrmod_maxpkt_ratethr; - intr_coal->pkt_rate_low = - intrmod_cfg->intrmod_minpkt_ratethr; - intr_coal->rx_max_coalesced_frames_high = - intrmod_cfg->intrmod_maxcnt_trigger; - intr_coal->rx_coalesce_usecs_high = - intrmod_cfg->intrmod_maxtmr_trigger; - intr_coal->rx_coalesce_usecs_low = - intrmod_cfg->intrmod_mintmr_trigger; - intr_coal->rx_max_coalesced_frames_low = - intrmod_cfg->intrmod_mincnt_trigger; } - - iq = oct->instr_queue[lio->linfo.txpciq[0]]; + iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; intr_coal->tx_max_coalesced_frames = iq->fill_threshold; break; - + } default: netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); return -EINVAL; } - + if (intrmod_cfg->rx_enable) { + intr_coal->use_adaptive_rx_coalesce = + intrmod_cfg->rx_enable; + intr_coal->rate_sample_interval = + intrmod_cfg->check_intrvl; + intr_coal->pkt_rate_high = + intrmod_cfg->maxpkt_ratethr; + intr_coal->pkt_rate_low = + intrmod_cfg->minpkt_ratethr; + intr_coal->rx_max_coalesced_frames_high = + intrmod_cfg->rx_maxcnt_trigger; + intr_coal->rx_coalesce_usecs_high = + intrmod_cfg->rx_maxtmr_trigger; + intr_coal->rx_coalesce_usecs_low = + intrmod_cfg->rx_mintmr_trigger; + intr_coal->rx_max_coalesced_frames_low = + intrmod_cfg->rx_mincnt_trigger; + } return 0; } @@ -681,19 +1004,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev, else dev_info(&oct_dev->pci_dev->dev, "Rx-Adaptive Interrupt moderation enabled:%llx\n", - oct_dev->intrmod.intrmod_enable); + oct_dev->intrmod.rx_enable); octeon_free_soft_command(oct_dev, sc); } /* Configure interrupt moderation parameters */ -static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) +static int octnet_set_intrmod_cfg(struct lio *lio, + struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; struct oct_intrmod_cmd *cmd; struct oct_intrmod_cfg *cfg; int retval; - struct octeon_device *oct_dev = (struct octeon_device *)oct; + struct octeon_device *oct_dev = lio->oct_dev; /* Alloc soft command */ sc = (struct octeon_soft_command *) @@ -714,6 +1038,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) cmd->cfg = cfg; cmd->oct_dev = oct_dev; + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); @@ -722,17 +1048,171 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) sc->wait_time = 1000; retval = octeon_send_soft_command(oct_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + return 0; +} + +static void +octnet_nic_stats_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *) + sc->virtrptr; + struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *) + sc->ctxptr; + struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; + struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; + + struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; + struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; + + if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->stats, + (sizeof(struct oct_link_stats)) >> 3); + + /* RX link-level stats */ + rstats->total_rcvd = rsp_rstats->total_rcvd; + rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; + rstats->total_bcst = rsp_rstats->total_bcst; + rstats->total_mcst = rsp_rstats->total_mcst; + rstats->runts = rsp_rstats->runts; + rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; + /* Accounts for over/under-run of buffers */ + rstats->fifo_err = rsp_rstats->fifo_err; + rstats->dmac_drop = rsp_rstats->dmac_drop; + rstats->fcs_err = rsp_rstats->fcs_err; + rstats->jabber_err = rsp_rstats->jabber_err; + rstats->l2_err = rsp_rstats->l2_err; + rstats->frame_err = rsp_rstats->frame_err; + + /* RX firmware stats */ + rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; + rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_err_pko = rsp_rstats->fw_err_pko; + rstats->fw_err_link = rsp_rstats->fw_err_link; + rstats->fw_err_drop = rsp_rstats->fw_err_drop; + rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; + rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; + + /* Number of packets that are LROed */ + rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; + /* Number of octets that are LROed */ + rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; + /* Number of LRO packets formed */ + rstats->fw_total_lro = rsp_rstats->fw_total_lro; + /* Number of times lRO of packet aborted */ + rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; + rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; + rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; + rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; + rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + rstats->fwd_rate = rsp_rstats->fwd_rate; + + /* TX link-level stats */ + tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; + tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; + tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; + tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; + tstats->ctl_sent = rsp_tstats->ctl_sent; + /* Packets sent after one collision*/ + tstats->one_collision_sent = rsp_tstats->one_collision_sent; + /* Packets sent after multiple collision*/ + tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; + /* Packets not sent due to max collisions */ + tstats->max_collision_fail = rsp_tstats->max_collision_fail; + /* Packets not sent due to max deferrals */ + tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; + /* Accounts for over/under-run of buffers */ + tstats->fifo_err = rsp_tstats->fifo_err; + tstats->runts = rsp_tstats->runts; + /* Total number of collisions detected */ + tstats->total_collisions = rsp_tstats->total_collisions; + + /* firmware stats */ + tstats->fw_total_sent = rsp_tstats->fw_total_sent; + tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_link = rsp_tstats->fw_err_link; + tstats->fw_err_drop = rsp_tstats->fw_err_drop; + tstats->fw_tso = rsp_tstats->fw_tso; + tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; + tstats->fw_err_tso = rsp_tstats->fw_err_tso; + tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; + + resp->status = 1; + } else { + resp->status = -1; + } + complete(&ctrl->complete); +} + +/* Configure interrupt moderation parameters */ +static int octnet_get_link_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + struct octeon_soft_command *sc; + struct oct_nic_stats_ctrl *ctrl; + struct oct_nic_stats_resp *resp; + + int retval; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + sizeof(struct octnic_ctrl_pkt)); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; + memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); + ctrl->netdev = netdev; + init_completion(&ctrl->complete); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + sc->callback = octnet_nic_stats_callback; + sc->callback_arg = sc; + sc->wait_time = 500; /*in milli seconds*/ + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + + if (resp->status != 1) { octeon_free_soft_command(oct_dev, sc); + return -EINVAL; } + octeon_free_soft_command(oct_dev, sc); + return 0; } /* Enable/Disable auto interrupt Moderation */ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce - *intr_coal, int adaptive) + *intr_coal) { int ret = 0; struct octeon_device *oct = lio->oct_dev; @@ -740,59 +1220,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce intrmod_cfg = &oct->intrmod; - if (adaptive) { + if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) { if (intr_coal->rate_sample_interval) - intrmod_cfg->intrmod_check_intrvl = + intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; else - intrmod_cfg->intrmod_check_intrvl = + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; if (intr_coal->pkt_rate_high) - intrmod_cfg->intrmod_maxpkt_ratethr = + intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; else - intrmod_cfg->intrmod_maxpkt_ratethr = + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; if (intr_coal->pkt_rate_low) - intrmod_cfg->intrmod_minpkt_ratethr = + intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; else - intrmod_cfg->intrmod_minpkt_ratethr = + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - + } + if (oct->intrmod.rx_enable) { if (intr_coal->rx_max_coalesced_frames_high) - intrmod_cfg->intrmod_maxcnt_trigger = + intrmod_cfg->rx_maxcnt_trigger = intr_coal->rx_max_coalesced_frames_high; else - intrmod_cfg->intrmod_maxcnt_trigger = - LIO_INTRMOD_MAXCNT_TRIGGER; + intrmod_cfg->rx_maxcnt_trigger = + LIO_INTRMOD_RXMAXCNT_TRIGGER; if (intr_coal->rx_coalesce_usecs_high) - intrmod_cfg->intrmod_maxtmr_trigger = + intrmod_cfg->rx_maxtmr_trigger = intr_coal->rx_coalesce_usecs_high; else - intrmod_cfg->intrmod_maxtmr_trigger = - LIO_INTRMOD_MAXTMR_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = + LIO_INTRMOD_RXMAXTMR_TRIGGER; if (intr_coal->rx_coalesce_usecs_low) - intrmod_cfg->intrmod_mintmr_trigger = + intrmod_cfg->rx_mintmr_trigger = intr_coal->rx_coalesce_usecs_low; else - intrmod_cfg->intrmod_mintmr_trigger = - LIO_INTRMOD_MINTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = + LIO_INTRMOD_RXMINTMR_TRIGGER; if (intr_coal->rx_max_coalesced_frames_low) - intrmod_cfg->intrmod_mincnt_trigger = + intrmod_cfg->rx_mincnt_trigger = intr_coal->rx_max_coalesced_frames_low; else - intrmod_cfg->intrmod_mincnt_trigger = - LIO_INTRMOD_MINCNT_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = + LIO_INTRMOD_RXMINCNT_TRIGGER; + } + if (oct->intrmod.tx_enable) { + if (intr_coal->tx_max_coalesced_frames_high) + intrmod_cfg->tx_maxcnt_trigger = + intr_coal->tx_max_coalesced_frames_high; + else + intrmod_cfg->tx_maxcnt_trigger = + LIO_INTRMOD_TXMAXCNT_TRIGGER; + if (intr_coal->tx_max_coalesced_frames_low) + intrmod_cfg->tx_mincnt_trigger = + intr_coal->tx_max_coalesced_frames_low; + else + intrmod_cfg->tx_mincnt_trigger = + LIO_INTRMOD_TXMINCNT_TRIGGER; } - intrmod_cfg->intrmod_enable = adaptive; - ret = octnet_set_intrmod_cfg(oct, intrmod_cfg); + ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); return ret; } @@ -800,54 +1294,82 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce static int oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) { - int ret; struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; u32 rx_max_coalesced_frames; - if (!intr_coal->rx_max_coalesced_frames) - rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; - else - rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames; - - /* Disable adaptive interrupt modulation */ - ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); - if (ret) - return ret; - /* Config Cnt based interrupt values */ - octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, - rx_max_coalesced_frames); - CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + rx_max_coalesced_frames); + CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + break; + } + default: + return -EINVAL; + } return 0; } static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce *intr_coal) { - int ret; struct octeon_device *oct = lio->oct_dev; - struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; u32 time_threshold, rx_coalesce_usecs; - if (!intr_coal->rx_coalesce_usecs) - rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; - else - rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + /* Config Time based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; - /* Disable adaptive interrupt modulation */ - ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); - if (ret) - return ret; + time_threshold = lio_cn6xxx_get_oq_ticks(oct, + rx_coalesce_usecs); + octeon_write_csr(oct, + CN6XXX_SLI_OQ_INT_LEVEL_TIME, + time_threshold); - /* Config Time based interrupt values */ - time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs); - octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); - CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + break; + } + default: + return -EINVAL; + } return 0; } +static int +oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal + __attribute__((unused))) +{ + struct octeon_device *oct = lio->oct_dev; + + /* Config Cnt based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + break; + default: + return -EINVAL; + } + return 0; +} + static int lio_set_intr_coalesce(struct net_device *netdev, struct ethtool_coalesce *intr_coal) { @@ -855,59 +1377,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev, int ret; struct octeon_device *oct = lio->oct_dev; u32 j, q_no; + int db_max, db_min; - if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) && - (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) { - for (j = 0; j < lio->linfo.num_txpciq; j++) { - q_no = lio->linfo.txpciq[j]; - oct->instr_queue[q_no]->fill_threshold = - intr_coal->tx_max_coalesced_frames; + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + db_min = CN6XXX_DB_MIN; + db_max = CN6XXX_DB_MAX; + if ((intr_coal->tx_max_coalesced_frames >= db_min) && + (intr_coal->tx_max_coalesced_frames <= db_max)) { + for (j = 0; j < lio->linfo.num_txpciq; j++) { + q_no = lio->linfo.txpciq[j].s.q_no; + oct->instr_queue[q_no]->fill_threshold = + intr_coal->tx_max_coalesced_frames; + } + } else { + dev_err(&oct->pci_dev->dev, + "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", + intr_coal->tx_max_coalesced_frames, db_min, + db_max); + return -EINVAL; } - } else { - dev_err(&oct->pci_dev->dev, - "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", - intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN, - CN6XXX_DB_MAX); + break; + default: return -EINVAL; } - /* User requested adaptive-rx on */ - if (intr_coal->use_adaptive_rx_coalesce) { - ret = oct_cfg_adaptive_intr(lio, intr_coal, 1); - if (ret) - goto ret_intrmod; - } + oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; + oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; - /* User requested adaptive-rx off and rx coalesce */ - if ((intr_coal->rx_coalesce_usecs) && - (!intr_coal->use_adaptive_rx_coalesce)) { + ret = oct_cfg_adaptive_intr(lio, intr_coal); + + if (!intr_coal->use_adaptive_rx_coalesce) { ret = oct_cfg_rx_intrtime(lio, intr_coal); if (ret) goto ret_intrmod; - } - /* User requested adaptive-rx off and rx coalesce */ - if ((intr_coal->rx_max_coalesced_frames) && - (!intr_coal->use_adaptive_rx_coalesce)) { ret = oct_cfg_rx_intrcnt(lio, intr_coal); if (ret) goto ret_intrmod; } - - /* User requested adaptive-rx off, so use default coalesce params */ - if ((!intr_coal->rx_max_coalesced_frames) && - (!intr_coal->use_adaptive_rx_coalesce) && - (!intr_coal->rx_coalesce_usecs)) { - dev_info(&oct->pci_dev->dev, - "Turning off adaptive-rx interrupt moderation\n"); - dev_info(&oct->pci_dev->dev, - "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n", - CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT); - ret = oct_cfg_rx_intrtime(lio, intr_coal); - if (ret) - goto ret_intrmod; - - ret = oct_cfg_rx_intrcnt(lio, intr_coal); + if (!intr_coal->use_adaptive_tx_coalesce) { + ret = oct_cfg_tx_intrcnt(lio, intr_coal); if (ret) goto ret_intrmod; } @@ -923,23 +1434,28 @@ static int lio_get_ts_info(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); info->so_timestamping = +#ifdef PTP_HARDWARE_TIMESTAMPING SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | +#endif SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + SOF_TIMESTAMPING_SOFTWARE; if (lio->ptp_clock) info->phc_index = ptp_clock_index(lio->ptp_clock); else info->phc_index = -1; +#ifdef PTP_HARDWARE_TIMESTAMPING info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); +#endif return 0; } @@ -950,7 +1466,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) struct octeon_device *oct = lio->oct_dev; struct oct_link_info *linfo; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; /* get the link info */ @@ -965,12 +1480,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->duplex != DUPLEX_FULL))) return -EINVAL; - /* Ethtool Support is not provided for XAUI and RXAUI Interfaces + /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces * as they operate at fixed Speed and Duplex settings */ - if (linfo->link.s.interface == INTERFACE_MODE_XAUI || - linfo->link.s.interface == INTERFACE_MODE_RXAUI) { - dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n"); + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + dev_info(&oct->pci_dev->dev, + "Autonegotiation, duplex and speed settings cannot be modified.\n"); return -EINVAL; } @@ -978,9 +1495,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 1000; nctrl.netpndev = (u64)netdev; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex @@ -990,19 +1507,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* Autoneg ON */ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON | OCTNIC_NCMD_AUTONEG_ON; - nctrl.ncmd.s.param2 = ecmd->advertising; + nctrl.ncmd.s.param1 = ecmd->advertising; } else { /* Autoneg OFF */ nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON; - nctrl.ncmd.s.param3 = ecmd->duplex; + nctrl.ncmd.s.param2 = ecmd->duplex; - nctrl.ncmd.s.param2 = ecmd->speed; + nctrl.ncmd.s.param1 = ecmd->speed; } - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to set settings\n"); return -1; @@ -1026,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev) } /* Return register dump len. */ -static int lio_get_regs_len(struct net_device *dev) +static int lio_get_regs_len(struct net_device *dev __attribute__((unused))) { return OCT_ETHTOOL_REGDUMP_LEN; } @@ -1170,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev, int len = 0; struct octeon_device *oct = lio->oct_dev; - memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); regs->version = OCT_ETHTOOL_REGSVER; switch (oct->chip_id) { - /* case OCTEON_CN73XX: Todo */ case OCTEON_CN68XX: case OCTEON_CN66XX: + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); len += cn6xxx_read_csr_reg(regbuf + len, oct); len += cn6xxx_read_config_reg(regbuf + len, oct); break; @@ -1186,6 +1700,23 @@ static void lio_get_regs(struct net_device *dev, } } +static u32 lio_get_priv_flags(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + return lio->oct_dev->priv_flags; +} + +static int lio_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct lio *lio = GET_LIO(netdev); + bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); + + lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, + intr_by_tx_bytes); + return 0; +} + static const struct ethtool_ops lio_ethtool_ops = { .get_settings = lio_get_settings, .get_link = ethtool_op_get_link, @@ -1207,6 +1738,8 @@ static const struct ethtool_ops lio_ethtool_ops = { .set_settings = lio_set_settings, .get_coalesce = lio_get_intr_coalesce, .set_coalesce = lio_set_intr_coalesce, + .get_priv_flags = lio_get_priv_flags, + .set_priv_flags = lio_set_priv_flags, .get_ts_info = lio_get_ts_info, }; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 7708d6b14..f00b44bf9 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -20,24 +20,12 @@ * Contact Cavium, Inc. for more information **********************************************************************/ #include <linux/version.h> -#include <linux/module.h> -#include <linux/crc32.h> -#include <linux/dma-mapping.h> #include <linux/pci.h> -#include <linux/pci_ids.h> -#include <linux/ip.h> -#include <net/ip.h> -#include <linux/ipv6.h> #include <linux/net_tstamp.h> #include <linux/if_vlan.h> #include <linux/firmware.h> -#include <linux/ethtool.h> #include <linux/ptp_clock_kernel.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/workqueue.h> -#include <linux/interrupt.h> -#include "octeon_config.h" +#include <net/vxlan.h> #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -48,7 +36,6 @@ #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" #include "cn68xx_device.h" #include "liquidio_image.h" @@ -70,6 +57,9 @@ MODULE_PARM_DESC(console_bitmask, #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) +#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ + (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) + static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); @@ -82,6 +72,8 @@ static int conf_type; module_param(conf_type, int, 0); MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); +static int ptp_enable = 1; + /* Bit mask values for lio->ifstate */ #define LIO_IFSTATE_DROQ_OPS 0x01 #define LIO_IFSTATE_REGISTERED 0x02 @@ -164,6 +156,8 @@ struct octnic_gather { * received from the IP layer. */ struct octeon_sg_entry *sg; + + u64 sg_dma_ptr; }; /** This structure is used by NIC driver to store information required @@ -218,8 +212,8 @@ static void octeon_droq_bh(unsigned long pdev) (struct octeon_device_priv *)oct->priv; /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ - for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { - if (!(oct->io_qmask.oq & (1UL << q_no))) + for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { + if (!(oct->io_qmask.oq & (1ULL << q_no))) continue; reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], MAX_PACKET_BUDGET); @@ -239,11 +233,10 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) do { pending_pkts = 0; - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; - pkt_cnt += octeon_droq_check_hw_for_pkts(oct, - oct->droq[i]); + pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); } if (pkt_cnt > 0) { pending_pkts += pkt_cnt; @@ -359,7 +352,7 @@ static int wait_for_pending_requests(struct octeon_device *oct) [OCTEON_ORDERED_SC_LIST].pending_req_count); if (pcount) schedule_timeout_uninterruptible(HZ / 10); - else + else break; } @@ -390,10 +383,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); /* Force all requests waiting to be fetched by OCTEON to complete. */ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { struct octeon_instr_queue *iq; - if (!(oct->io_qmask.iq & (1UL << i))) + if (!(oct->io_qmask.iq & (1ULL << i))) continue; iq = oct->instr_queue[i]; @@ -403,7 +396,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct) iq->octeon_read_index = iq->host_write_index; iq->stats.instr_processed += atomic_read(&iq->instr_pending); - lio_process_iq_request_list(oct, iq); + lio_process_iq_request_list(oct, iq, 0); spin_unlock_bh(&iq->lock); } } @@ -498,7 +491,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, * \brief mmio handler * @param pdev Pointer to PCI device */ -static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) +static pci_ers_result_t liquidio_pcie_mmio_enabled( + struct pci_dev *pdev __attribute__((unused))) { /* We should never hit this since we never ask for a reset for a Fatal * Error. We always return DISCONNECT in io_error above. @@ -514,7 +508,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) * Restart the card from scratch, as if from a cold-boot. Implementation * resembles the first-half of the octeon_resume routine. */ -static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) +static pci_ers_result_t liquidio_pcie_slot_reset( + struct pci_dev *pdev __attribute__((unused))) { /* We should never hit this since we never ask for a reset for a Fatal * Error. We always return DISCONNECT in io_error above. @@ -531,7 +526,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) * its OK to resume normal operation. Implementation resembles the * second-half of the octeon_resume routine. */ -static void liquidio_pcie_resume(struct pci_dev *pdev) +static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused))) { /* Nothing to be done here. */ } @@ -542,7 +537,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev) * @param pdev Pointer to PCI device * @param state state to suspend to */ -static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) +static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)), + pm_message_t state __attribute__((unused))) { return 0; } @@ -551,7 +547,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) * \brief called when resuming * @param pdev Pointer to PCI device */ -static int liquidio_resume(struct pci_dev *pdev) +static int liquidio_resume(struct pci_dev *pdev __attribute__((unused))) { return 0; } @@ -676,12 +672,24 @@ static inline void txqs_start(struct net_device *netdev) */ static inline void txqs_wake(struct net_device *netdev) { + struct lio *lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { int i; - for (i = 0; i < netdev->num_tx_queues; i++) - netif_wake_subqueue(netdev, i); + for (i = 0; i < netdev->num_tx_queues; i++) { + int qno = lio->linfo.txpciq[i % + (lio->linfo.num_txpciq)].s.q_no; + + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); + netif_wake_subqueue(netdev, i); + } + } } else { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); netif_wake_queue(netdev); } } @@ -703,7 +711,7 @@ static void start_txq(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (lio->linfo.link.s.status) { + if (lio->linfo.link.s.link_up) { txqs_start(netdev); return; } @@ -750,16 +758,23 @@ static inline int check_txq_status(struct lio *lio) /* check each sub-queue state */ for (q = 0; q < numqs; q++) { - iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; + iq = lio->linfo.txpciq[q % + (lio->linfo.num_txpciq)].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) continue; - wake_q(lio->netdev, q); - ret_val++; + if (__netif_subqueue_stopped(lio->netdev, q)) { + wake_q(lio->netdev, q); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, + tx_restart, 1); + ret_val++; + } } } else { if (octnet_iq_is_full(lio->oct_dev, lio->txq)) return 0; wake_q(lio->netdev, lio->txq); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); ret_val = 1; } return ret_val; @@ -785,64 +800,116 @@ static inline struct list_head *list_delete_head(struct list_head *root) } /** - * \brief Delete gather list + * \brief Delete gather lists * @param lio per-network private data */ -static void delete_glist(struct lio *lio) +static void delete_glists(struct lio *lio) { struct octnic_gather *g; + int i; - do { - g = (struct octnic_gather *) - list_delete_head(&lio->glist); - if (g) { - if (g->sg) - kfree((void *)((unsigned long)g->sg - - g->adjust)); - kfree(g); - } - } while (g); + if (!lio->glist) + return; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + do { + g = (struct octnic_gather *) + list_delete_head(&lio->glist[i]); + if (g) { + if (g->sg) { + dma_unmap_single(&lio->oct_dev-> + pci_dev->dev, + g->sg_dma_ptr, + g->sg_size, + DMA_TO_DEVICE); + kfree((void *)((unsigned long)g->sg - + g->adjust)); + } + kfree(g); + } + } while (g); + } + + kfree((void *)lio->glist); } /** - * \brief Setup gather list + * \brief Setup gather lists * @param lio per-network private data */ -static int setup_glist(struct lio *lio) +static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { - int i; + int i, j; struct octnic_gather *g; - INIT_LIST_HEAD(&lio->glist); + lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock), + GFP_KERNEL); + if (!lio->glist_lock) + return 1; - for (i = 0; i < lio->tx_qsize; i++) { - g = kzalloc(sizeof(*g), GFP_KERNEL); - if (!g) - break; + lio->glist = kcalloc(num_iqs, sizeof(*lio->glist), + GFP_KERNEL); + if (!lio->glist) { + kfree((void *)lio->glist_lock); + return 1; + } - g->sg_size = - ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + for (i = 0; i < num_iqs; i++) { + int numa_node = cpu_to_node(i % num_online_cpus()); - g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); - if (!g->sg) { - kfree(g); - break; + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * + OCT_SG_ENTRY_SIZE); + + g->sg = kmalloc_node(g->sg_size + 8, + GFP_KERNEL, numa_node); + if (!g->sg) + g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); + if (!g->sg) { + kfree(g); + break; + } + + /* The gather component should be aligned on 64-bit + * boundary + */ + if (((unsigned long)g->sg) & 7) { + g->adjust = 8 - (((unsigned long)g->sg) & 7); + g->sg = (struct octeon_sg_entry *) + ((unsigned long)g->sg + g->adjust); + } + g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev, + g->sg, g->sg_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg_dma_ptr)) { + kfree((void *)((unsigned long)g->sg - + g->adjust)); + kfree(g); + break; + } + + list_add_tail(&g->list, &lio->glist[i]); } - /* The gather component should be aligned on 64-bit boundary */ - if (((unsigned long)g->sg) & 7) { - g->adjust = 8 - (((unsigned long)g->sg) & 7); - g->sg = (struct octeon_sg_entry *) - ((unsigned long)g->sg + g->adjust); + if (j != lio->tx_qsize) { + delete_glists(lio); + return 1; } - list_add_tail(&g->list, &lio->glist); } - if (i == lio->tx_qsize) - return 0; - - delete_glist(lio); - return 1; + return 0; } /** @@ -856,7 +923,7 @@ static void print_link_info(struct net_device *netdev) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { struct oct_link_info *linfo = &lio->linfo; - if (linfo->link.s.status) { + if (linfo->link.s.link_up) { netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", linfo->link.s.speed, (linfo->link.s.duplex) ? "Full" : "Half"); @@ -878,13 +945,15 @@ static inline void update_link_status(struct net_device *netdev, union oct_link_status *ls) { struct lio *lio = GET_LIO(netdev); + int changed = (lio->linfo.link.u64 != ls->u64); - if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { - lio->linfo.link.u64 = ls->u64; + lio->linfo.link.u64 = ls->u64; + if ((lio->intf_open) && (changed)) { print_link_info(netdev); + lio->link_changes++; - if (lio->linfo.link.s.status) { + if (lio->linfo.link.s.link_up) { netif_carrier_on(netdev); /* start_txq(netdev); */ txqs_wake(netdev); @@ -895,6 +964,42 @@ static inline void update_link_status(struct net_device *netdev, } } +/* Runs in interrupt context. */ +static void update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct net_device *netdev; + struct lio *lio; + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + + /*octeon_update_iq_read_idx(oct, iq);*/ + + netdev = oct->props[iq->ifidx].netdev; + + /* This is needed because the first IQ does not have + * a netdev associated with it. + */ + if (!netdev) + return; + + lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + netif_wake_subqueue(netdev, iq->q_index); + } else { + if (!octnet_iq_is_full(oct, lio->txq)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, + lio->txq, + tx_restart, 1); + wake_q(netdev, lio->txq); + } + } + } +} + /** * \brief Droq packet processor sceduler * @param oct octeon device @@ -908,8 +1013,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) struct octeon_droq *droq; if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { - if (!(oct->droq_intr & (1 << oq_no))) + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); + oq_no++) { + if (!(oct->droq_intr & (1ULL << oq_no))) continue; droq = oct->droq[oq_no]; @@ -985,7 +1091,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct) * @param pdev PCI device structure * @param ent unused */ -static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int +liquidio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent __attribute__((unused))) { struct octeon_device *oct_dev = NULL; struct handshake *hs; @@ -1020,6 +1128,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; } + oct_dev->rx_pause = 1; + oct_dev->tx_pause = 1; + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); return 0; @@ -1085,19 +1196,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->flags & LIO_FLAG_MSI_ENABLED) pci_disable_msi(oct->pci_dev); - /* Soft reset the octeon device before exiting */ - oct->fn_list.soft_reset(oct); - - /* Disable the device, releasing the PCI INT */ - pci_disable_device(oct->pci_dev); - /* fallthrough */ case OCT_DEV_IN_RESET: case OCT_DEV_DROQ_INIT_DONE: /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ mdelay(100); - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - if (!(oct->io_qmask.oq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & (1ULL << i))) continue; octeon_delete_droq(oct, i); } @@ -1124,8 +1229,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_INSTR_QUEUE_INIT_DONE: - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; octeon_delete_instr_queue(oct, i); } @@ -1137,14 +1242,21 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_PCI_MAP_DONE: + + /* Soft reset the octeon device before exiting */ + oct->fn_list.soft_reset(oct); + octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); /* fallthrough */ case OCT_DEV_BEGIN_STATE: + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + /* Nothing to be done here either */ break; - } /* end switch(oct->status) */ + } /* end switch (oct->status) */ tasklet_kill(&oct_priv->droq_tasklet); } @@ -1157,18 +1269,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = start_stop; + nctrl.ncmd.s.param1 = start_stop; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)lio->netdev; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0) + if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0) netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); } @@ -1184,6 +1293,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; struct lio *lio; + struct napi_struct *napi, *n; if (!netdev) { dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", @@ -1200,13 +1310,22 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) txqs_stop(netdev); + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + } + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); - delete_glist(lio); + delete_glists(lio); free_netdev(netdev); + oct->props[ifidx].gmxport = -1; + oct->props[ifidx].netdev = NULL; } @@ -1225,10 +1344,15 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) return 1; } + spin_lock_bh(&oct->cmd_resp_wqlock); + oct->cmd_resp_state = OCT_DRV_OFFLINE; + spin_unlock_bh(&oct->cmd_resp_wqlock); + for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); for (j = 0; j < lio->linfo.num_rxpciq; j++) - octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); + octeon_unregister_droq_ops(oct, + lio->linfo.rxpciq[j].s.q_no); } for (i = 0; i < oct->ifcount; i++) @@ -1272,6 +1396,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) { u32 dev_id, rev_id; int ret = 1; + char *s; pci_read_config_dword(oct->pci_dev, 0, &dev_id); pci_read_config_dword(oct->pci_dev, 8, &rev_id); @@ -1281,22 +1406,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN68XX_PCIID: oct->chip_id = OCTEON_CN68XX; ret = lio_setup_cn68xx_octeon_device(oct); + s = "CN68XX"; break; case OCTEON_CN66XX_PCIID: oct->chip_id = OCTEON_CN66XX; ret = lio_setup_cn66xx_octeon_device(oct); + s = "CN66XX"; break; + default: + s = "?"; dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", dev_id); } if (!ret) - dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", + dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s, OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), - octeon_get_conf(oct)->card_name); + octeon_get_conf(oct)->card_name, + LIQUIDIO_VERSION); return ret; } @@ -1324,6 +1454,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct) return 0; } +static inline int skb_iq(struct lio *lio, struct sk_buff *skb) +{ + int q = 0; + + if (netif_is_multiqueue(lio->netdev)) + q = skb->queue_mapping % lio->linfo.num_txpciq; + + return q; +} + /** * \brief Check Tx queue state for a given network buffer * @param lio per-network private data @@ -1335,14 +1475,19 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; + iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; } else { iq = lio->txq; + q = iq; } if (octnet_iq_is_full(lio->oct_dev, iq)) return 0; - wake_q(lio->netdev, q); + + if (__netif_subqueue_stopped(lio->netdev, q)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1); + wake_q(lio->netdev, q); + } return 1; } @@ -1365,7 +1510,7 @@ static void free_netbuf(void *buf) check_txq_state(lio, skb); - recv_buffer_free((struct sk_buff *)skb); + tx_buffer_free(skb); } /** @@ -1378,7 +1523,7 @@ static void free_netsgbuf(void *buf) struct sk_buff *skb; struct lio *lio; struct octnic_gather *g; - int i, frags; + int i, frags, iq; finfo = (struct octnet_buf_free_info *)buf; skb = finfo->skb; @@ -1400,17 +1545,17 @@ static void free_netsgbuf(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); + dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, + g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); - spin_lock(&lio->lock); - list_add_tail(&g->list, &lio->glist); - spin_unlock(&lio->lock); + iq = skb_iq(lio, skb); + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); check_txq_state(lio, skb); /* mq support: sub-queue state check */ - recv_buffer_free((struct sk_buff *)skb); + tx_buffer_free(skb); } /** @@ -1424,7 +1569,7 @@ static void free_netsgbuf_with_resp(void *buf) struct sk_buff *skb; struct lio *lio; struct octnic_gather *g; - int i, frags; + int i, frags, iq; sc = (struct octeon_soft_command *)buf; skb = (struct sk_buff *)sc->callback_arg; @@ -1448,13 +1593,14 @@ static void free_netsgbuf_with_resp(void *buf) i++; } - dma_unmap_single(&lio->oct_dev->pci_dev->dev, - finfo->dptr, g->sg_size, - DMA_TO_DEVICE); + dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev, + g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE); + + iq = skb_iq(lio, skb); - spin_lock(&lio->lock); - list_add_tail(&g->list, &lio->glist); - spin_unlock(&lio->lock); + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); /* Don't free the skb yet */ @@ -1567,8 +1713,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp, * @param rq request * @param on is it on */ -static int liquidio_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +static int +liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)), + struct ptp_clock_request *rq __attribute__((unused)), + int on __attribute__((unused))) { return -EOPNOTSUPP; } @@ -1655,6 +1803,7 @@ static int load_firmware(struct octeon_device *oct) if (ret) { dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", fw_name); + release_firmware(fw); return ret; } @@ -1708,7 +1857,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, * @param buf pointer to resp structure */ static void if_cfg_callback(struct octeon_device *oct, - u32 status, + u32 status __attribute__((unused)), void *buf) { struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; @@ -1722,7 +1871,10 @@ static void if_cfg_callback(struct octeon_device *oct, if (resp->status) dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", CVM_CAST64(resp->status)); - ACCESS_ONCE(ctx->cond) = 1; + WRITE_ONCE(ctx->cond, 1); + + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", + resp->cfg_info.liquidio_firmware_version); /* This barrier is required to be sure that the response has been * written fully before waking up the handler @@ -1739,16 +1891,16 @@ static void if_cfg_callback(struct octeon_device *oct, * @returns selected queue number */ static u16 select_q(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) + void *accel_priv __attribute__((unused)), + select_queue_fallback_t fallback __attribute__((unused))) { - int qindex; + u32 qindex = 0; struct lio *lio; lio = GET_LIO(dev); - /* select queue on chosen queue_mapping or core */ - qindex = skb_rx_queue_recorded(skb) ? - skb_get_rx_queue(skb) : smp_processor_id(); - return (u16)(qindex & (lio->linfo.num_txpciq - 1)); + qindex = skb_tx_hash(dev, skb); + + return (u16)(qindex % (lio->linfo.num_txpciq)); } /** Routine to push packets arriving on Octeon interface upto network layer. @@ -1757,26 +1909,28 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb, * @param len - size of total data received. * @param rh - Control header associated with the packet * @param param - additional control data with the packet + * @param arg - farg registered in droq_ops */ static void -liquidio_push_packet(u32 octeon_id, +liquidio_push_packet(u32 octeon_id __attribute__((unused)), void *skbuff, u32 len, union octeon_rh *rh, - void *param) + void *param, + void *arg) { struct napi_struct *napi = param; - struct octeon_device *oct = lio_get_device(octeon_id); struct sk_buff *skb = (struct sk_buff *)skbuff; struct skb_shared_hwtstamps *shhwtstamps; u64 ns; - struct net_device *netdev = - (struct net_device *)oct->props[rh->r_dh.link].netdev; + u16 vtag = 0; + struct net_device *netdev = (struct net_device *)arg; struct octeon_droq *droq = container_of(param, struct octeon_droq, napi); if (netdev) { int packet_was_received; struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; /* Do not proceed if the interface is not in RUNNING state. */ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { @@ -1787,32 +1941,86 @@ liquidio_push_packet(u32 octeon_id, skb->dev = netdev; - if (rh->r_dh.has_hwtstamp) { - /* timestamp is included from the hardware at the - * beginning of the packet. - */ - if (ifstate_check(lio, - LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { - /* Nanoseconds are in the first 64-bits - * of the packet. + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + if (((oct->chip_id == OCTEON_CN66XX) || + (oct->chip_id == OCTEON_CN68XX)) && + ptp_enable) { + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at + * the beginning of the packet. */ - memcpy(&ns, (skb->data), sizeof(ns)); - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = - ns_to_ktime(ns + lio->ptp_adjust); + if (ifstate_check + (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data), sizeof(ns)); + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + + lio->ptp_adjust); + } + skb_pull(skb, sizeof(ns)); } - skb_pull(skb, sizeof(ns)); } skb->protocol = eth_type_trans(skb, skb->dev); - if ((netdev->features & NETIF_F_RXCSUM) && - (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED)) + (((rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || + (!(rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) /* checksum has already been verified */ skb->ip_summed = CHECKSUM_UNNECESSARY; else skb->ip_summed = CHECKSUM_NONE; + /* Setting Encapsulation field on basis of status received + * from the firmware + */ + if (rh->r_dh.encap_on) { + skb->encapsulation = 1; + skb->csum_level = 1; + droq->stats.rx_vxlan++; + } + + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (rh->r_dh.vlan != 0)) { + u16 vid = rh->r_dh.vlan; + u16 priority = rh->r_dh.priority; + + vtag = priority << 13 | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; if (packet_was_received) { @@ -1867,39 +2075,6 @@ static void liquidio_napi_drv_callback(void *arg) } /** - * \brief Main NAPI poll function - * @param droq octeon output queue - * @param budget maximum number of items to process - */ -static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget) -{ - int work_done; - struct lio *lio = GET_LIO(droq->napi.dev); - struct octeon_device *oct = lio->oct_dev; - - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - if (work_done < 0) { - netif_info(lio, rx_err, lio->netdev, - "Receive work_done < 0, rxq:%d\n", droq->q_no); - goto octnet_napi_finish; - } - - if (work_done > budget) - dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n", - __func__, work_done, budget); - - return work_done; - -octnet_napi_finish: - napi_complete(&droq->napi); - octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, - 0); - return 0; -} - -/** * \brief Entry point for NAPI polling * @param napi NAPI structure * @param budget maximum number of items to process @@ -1908,35 +2083,57 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) { struct octeon_droq *droq; int work_done; + int tx_done = 0, iq_no; + struct octeon_instr_queue *iq; + struct octeon_device *oct; droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + /* Handle Droq descriptors */ + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); - work_done = liquidio_napi_do_rx(droq, budget); + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, 1, budget); + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + update_txq_status(oct, iq_no); + /*tx_done = (iq->flush_index == iq->octeon_read_index);*/ + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } - if (work_done < budget) { + if ((work_done < budget) && (tx_done)) { napi_complete(napi); octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, POLL_EVENT_ENABLE_INTR, 0); return 0; } - return work_done; + return (!tx_done) ? (budget) : (work_done); } /** * \brief Setup input and output queues * @param octeon_dev octeon device - * @param net_device Net device + * @param ifidx Interface Index * * Note: Queues are with respect to the octeon device. Thus * an input queue is for egress packets, and output queues * are for ingress packets. */ static inline int setup_io_queues(struct octeon_device *octeon_dev, - struct net_device *net_device) + int ifidx) { - static int first_time = 1; - static struct octeon_droq_ops droq_ops; + struct octeon_droq_ops droq_ops; + struct net_device *netdev; static int cpu_id; static int cpu_id_modulus; struct octeon_droq *droq; @@ -1945,23 +2142,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, struct lio *lio; int num_tx_descs; - lio = GET_LIO(net_device); - if (first_time) { - first_time = 0; - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + netdev = octeon_dev->props[ifidx].netdev; - droq_ops.fptr = liquidio_push_packet; + lio = GET_LIO(netdev); - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - } + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = (void *)netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); /* set up DROQs. */ for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q]; - + q_no = lio->linfo.rxpciq[q].s.q_no; + dev_dbg(&octeon_dev->pci_dev->dev, + "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", + q, q_no); retval = octeon_setup_droq(octeon_dev, q_no, CFG_GET_NUM_RX_DESCS_NIC_IF (octeon_get_conf(octeon_dev), @@ -1978,7 +2178,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, droq = octeon_dev->droq[q_no]; napi = &droq->napi; - netif_napi_add(net_device, napi, liquidio_napi_poll, 64); + dev_dbg(&octeon_dev->pci_dev->dev, + "netif_napi_add netdev:%llx oct:%llx\n", + (u64)netdev, + (u64)octeon_dev); + netif_napi_add(netdev, napi, liquidio_napi_poll, 64); /* designate a CPU for this droq */ droq->cpu_id = cpu_id; @@ -1994,9 +2198,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf (octeon_dev), lio->ifidx); - retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], - num_tx_descs, - netdev_get_tx_queue(net_device, q)); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); if (retval) { dev_err(&octeon_dev->pci_dev->dev, " %s : Runtime IQ(TxQ) creation failed.\n", @@ -2034,7 +2238,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - lio->txq_status_wq.wq = create_workqueue("txq-status"); + lio->txq_status_wq.wq = alloc_workqueue("txq-status", + WQ_MEM_RECLAIM, 0); if (!lio->txq_status_wq.wq) { dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); return; @@ -2046,6 +2251,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev) &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); } +static inline void cleanup_tx_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); + destroy_workqueue(lio->txq_status_wq.wq); +} + /** * \brief Net device open for LiquidIO * @param netdev network device @@ -2056,17 +2269,22 @@ static int liquidio_open(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_enable(napi); + if (oct->props[lio->ifidx].napi_enabled == 0) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct->props[lio->ifidx].napi_enabled = 1; + } oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); + setup_tx_poll_fn(netdev); + start_txq(netdev); netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); - try_module_get(THIS_MODULE); /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -2086,41 +2304,36 @@ static int liquidio_open(struct net_device *netdev) */ static int liquidio_stop(struct net_device *netdev) { - struct napi_struct *napi, *n; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + netif_tx_disable(netdev); + /* Inform that netif carrier is down */ + netif_carrier_off(netdev); lio->intf_open = 0; - lio->linfo.link.s.status = 0; + lio->linfo.link.s.link_up = 0; + lio->link_changes++; - netif_carrier_off(netdev); + /* Pause for a moment and wait for Octeon to flush out (to the wire) any + * egress packets that are in-flight. + */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(100)); - /* tell Octeon to stop forwarding packets to host */ + /* Now it should be safe to tell Octeon that nic interface is down. */ send_rx_ctrl_cmd(lio, 0); - cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); - flush_workqueue(lio->txq_status_wq.wq); - destroy_workqueue(lio->txq_status_wq.wq); + cleanup_tx_poll_fn(netdev); if (lio->ptp_clock) { ptp_clock_unregister(lio->ptp_clock); lio->ptp_clock = NULL; } - ifstate_reset(lio, LIO_IFSTATE_RUNNING); - - /* This is a hack that allows DHCP to continue working. */ - set_bit(__LINK_STATE_START, &lio->netdev->state); - - list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) - napi_disable(napi); - - txqs_stop(netdev); - dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); - module_put(THIS_MODULE); return 0; } @@ -2131,6 +2344,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) struct net_device *netdev = (struct net_device *)nctrl->netpndev; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + u8 *mac; switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: @@ -2138,22 +2352,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) break; case OCTNET_CMD_CHANGE_MACADDR: - /* If command is successful, change the MACADDR. */ - netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n", - CVM_CAST64(nctrl->udd[0])); - dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n", - netdev->name, CVM_CAST64(nctrl->udd[0])); - memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN); + mac = ((u8 *)&nctrl->udd[0]) + 2; + netif_info(lio, probe, lio->netdev, + "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", + "MACAddr changed to", mac[0], mac[1], + mac[2], mac[3], mac[4], mac[5]); break; case OCTNET_CMD_CHANGE_MTU: /* If command is successful, change the MTU. */ netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n", - netdev->mtu, nctrl->ncmd.s.param2); + netdev->mtu, nctrl->ncmd.s.param1); dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", netdev->name, netdev->mtu, - nctrl->ncmd.s.param2); - netdev->mtu = nctrl->ncmd.s.param2; + nctrl->ncmd.s.param1); + rtnl_lock(); + netdev->mtu = nctrl->ncmd.s.param1; + call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev); + rtnl_unlock(); break; case OCTNET_CMD_GPIO_ACCESS: @@ -2179,11 +2395,79 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) netdev->name); break; + case OCTNET_CMD_ENABLE_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n", + netdev->name); + break; + + case OCTNET_CMD_ADD_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", + netdev->name, nctrl->ncmd.s.param1); + break; + + case OCTNET_CMD_DEL_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", + netdev->name, nctrl->ncmd.s.param1); + break; + case OCTNET_CMD_SET_SETTINGS: dev_info(&oct->pci_dev->dev, "%s settings changed\n", netdev->name); break; + /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL" + * Command passed by NIC driver + */ + case OCTNET_CMD_TNL_RX_CSUM_CTL: + if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) { + netif_info(lio, probe, lio->netdev, + "%s RX Checksum Offload Enabled\n", + netdev->name); + } else if (nctrl->ncmd.s.param1 == + OCTNET_CMD_RXCSUM_DISABLE) { + netif_info(lio, probe, lio->netdev, + "%s RX Checksum Offload Disabled\n", + netdev->name); + } + break; + + /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL" + * Command passed by NIC driver + */ + case OCTNET_CMD_TNL_TX_CSUM_CTL: + if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) { + netif_info(lio, probe, lio->netdev, + "%s TX Checksum Offload Enabled\n", + netdev->name); + } else if (nctrl->ncmd.s.param1 == + OCTNET_CMD_TXCSUM_DISABLE) { + netif_info(lio, probe, lio->netdev, + "%s TX Checksum Offload Disabled\n", + netdev->name); + } + break; + + /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG" + * Command passed by NIC driver + */ + case OCTNET_CMD_VXLAN_PORT_CONFIG: + if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) { + netif_info(lio, probe, lio->netdev, + "%s VxLAN Destination UDP PORT:%d ADDED\n", + netdev->name, + nctrl->ncmd.s.param1); + } else if (nctrl->ncmd.s.more == + OCTNET_CMD_VXLAN_PORT_DEL) { + netif_info(lio, probe, lio->netdev, + "%s VxLAN Destination UDP PORT:%d DELETED\n", + netdev->name, + nctrl->ncmd.s.param1); + } + break; + + case OCTNET_CMD_SET_FLOW_CTL: + netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); + break; default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, @@ -2233,10 +2517,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; struct netdev_hw_addr *ha; u64 *mc; - int ret, i; + int ret; int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); @@ -2244,15 +2527,14 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Create a ctrl pkt command to be sent to core app. */ nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = get_new_flags(netdev); - nctrl.ncmd.s.param3 = mc_count; + nctrl.ncmd.s.param1 = get_new_flags(netdev); + nctrl.ncmd.s.param2 = mc_count; nctrl.ncmd.s.more = mc_count; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; /* copy all the addresses into the udd */ - i = 0; mc = &nctrl.udd[0]; netdev_for_each_mc_addr(ha, netdev) { *mc = 0; @@ -2268,9 +2550,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev) */ nctrl.wait_time = 0; - nparams.resp_order = OCTEON_RESP_NORESPONSE; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); @@ -2288,19 +2568,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) struct octeon_device *oct = lio->oct_dev; struct sockaddr *addr = (struct sockaddr *)p; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; - if ((!is_valid_ether_addr(addr->sa_data)) || - (ifstate_check(lio, LIO_IFSTATE_RUNNING))) + if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = 0; + nctrl.ncmd.s.param1 = 0; nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; nctrl.wait_time = 100; @@ -2309,9 +2587,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) /* The MAC Address is presented in network byte order. */ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; @@ -2339,7 +2615,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; for (i = 0; i < lio->linfo.num_txpciq; i++) { - iq_no = lio->linfo.txpciq[i]; + iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; drop += iq_stats->tx_dropped; @@ -2355,7 +2631,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) bytes = 0; for (i = 0; i < lio->linfo.num_rxpciq; i++) { - oq_no = lio->linfo.rxpciq[i]; + oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; drop += (oq_stats->rx_dropped + @@ -2381,19 +2657,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; - int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; int ret = 0; - /* Limit the MTU to make sure the ethernet packets are between 64 bytes - * and 65535 bytes + /* Limit the MTU to make sure the ethernet packets are between 68 bytes + * and 16000 bytes */ - if ((max_frm_size < OCTNET_MIN_FRM_SIZE) || - (max_frm_size > OCTNET_MAX_FRM_SIZE)) { + if ((new_mtu < LIO_MIN_MTU_SIZE) || + (new_mtu > LIO_MAX_MTU_SIZE)) { dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", - (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE), - (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)); + LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE); return -EINVAL; } @@ -2401,15 +2674,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = new_mtu; + nctrl.ncmd.s.param1 = new_mtu; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_ORDERED; - - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); return -1; @@ -2426,7 +2697,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) * @param ifr interface request * @param cmd command */ -static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) { struct hwtstamp_config conf; struct lio *lio = GET_LIO(netdev); @@ -2487,7 +2758,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCSHWTSTAMP: - return hwtstamp_ioctl(netdev, ifr, cmd); + return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; } @@ -2534,7 +2805,7 @@ static void handle_timestamp(struct octeon_device *oct, } octeon_free_soft_command(oct, sc); - recv_buffer_free(skb); + tx_buffer_free(skb); } /* \brief Send a data packet that will be timestamped @@ -2549,10 +2820,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, { int retval; struct octeon_soft_command *sc; - struct octeon_instr_ih *ih; - struct octeon_instr_rdp *rdp; struct lio *lio; int ring_doorbell; + u32 len; lio = finfo->lio; @@ -2574,14 +2844,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, sc->callback_arg = finfo->skb; sc->iq_no = ndata->q_no; - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz; ring_doorbell = !xmit_more; retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, - sc, ih->dlengsz, ndata->reqtype); + sc, len, ndata->reqtype); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", retval); octeon_free_soft_command(oct, sc); @@ -2592,68 +2861,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, return retval; } -static inline int is_ipv4(struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IP)) && - (ip_hdr(skb)->version == 4); -} - -static inline int is_vlan(struct sk_buff *skb) -{ - return skb->protocol == htons(ETH_P_8021Q); -} - -static inline int is_ip_fragmented(struct sk_buff *skb) -{ - /* The Don't fragment and Reserved flag fields are ignored. - * IP is fragmented if - * - the More fragments bit is set (indicating this IP is a fragment - * with more to follow; the current offset could be 0 ). - * - ths offset field is non-zero. - */ - return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0; -} - -static inline int is_ipv6(struct sk_buff *skb) -{ - return (skb->protocol == htons(ETH_P_IPV6)) && - (ipv6_hdr(skb)->version == 6); -} - -static inline int is_with_extn_hdr(struct sk_buff *skb) -{ - return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && - (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP); -} - -static inline int is_tcpudp(struct sk_buff *skb) -{ - return (ip_hdr(skb)->protocol == IPPROTO_TCP) || - (ip_hdr(skb)->protocol == IPPROTO_UDP); -} - -static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb) -{ - u32 tag; - struct iphdr *iphdr = ip_hdr(skb); - - tag = crc32(0, &iphdr->protocol, 1); - tag = crc32(tag, (u8 *)&iphdr->saddr, 8); - tag = crc32(tag, skb_transport_header(skb), 4); - return tag; -} - -static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb) -{ - u32 tag; - struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); - - tag = crc32(0, &ipv6hdr->nexthdr, 1); - tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32); - tag = crc32(tag, skb_transport_header(skb), 4); - return tag; -} - /** \brief Transmit networks packets to the Octeon interface * @param skbuff skbuff struct to be passed to network layer. * @param netdev pointer to network device @@ -2668,18 +2875,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct octnic_data_pkt ndata; struct octeon_device *oct; struct oct_iq_stats *stats; - int cpu = 0, status = 0; + struct octeon_instr_irh *irh; + union tx_info *tx_info; + int status = 0; int q_idx = 0, iq_no = 0; - int xmit_more; + int xmit_more, j; + u64 dptr = 0; u32 tag = 0; lio = GET_LIO(netdev); oct = lio->oct_dev; if (netif_is_multiqueue(netdev)) { - cpu = skb->queue_mapping; - q_idx = (cpu & (lio->linfo.num_txpciq - 1)); - iq_no = lio->linfo.txpciq[q_idx]; + q_idx = skb->queue_mapping; + q_idx = (q_idx % (lio->linfo.num_txpciq)); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; } else { iq_no = lio->txq; } @@ -2690,11 +2901,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) * transmitted. */ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || - (!lio->linfo.link.s.status) || + (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", - lio->linfo.link.s.status); + lio->linfo.link.s.link_up); goto lio_xmit_failed; } @@ -2726,62 +2937,25 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) /* defer sending if queue is full */ stats->tx_iq_busy++; netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", - ndata.q_no); + lio->txq); return NETDEV_TX_BUSY; } } /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", - * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); + * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); */ ndata.datasize = skb->len; cmdsetup.u64 = 0; - cmdsetup.s.ifidx = lio->linfo.ifidx; + cmdsetup.s.iq_no = iq_no; if (skb->ip_summed == CHECKSUM_PARTIAL) { - if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) { - tag = get_ipv4_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; - - if (ip_hdr(skb)->ihl > 5) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV4OPTS; - - } else if (is_ipv6(skb)) { - tag = get_ipv6_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; - - if (is_with_extn_hdr(skb)) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV6EXTHDR; - - } else if (is_vlan(skb)) { - if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto - == htons(ETH_P_IP) && - !is_ip_fragmented(skb) && is_tcpudp(skb)) { - tag = get_ipv4_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = - sizeof(struct vlan_ethhdr) + 1; - - if (ip_hdr(skb)->ihl > 5) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV4OPTS; - - } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto - == htons(ETH_P_IPV6)) { - tag = get_ipv6_5tuple_tag(skb); - - cmdsetup.s.cksum_offset = - sizeof(struct vlan_ethhdr) + 1; - - if (is_with_extn_hdr(skb)) - cmdsetup.s.ipv4opts_ipv6exthdr = - OCT_PKT_PARAM_IPV6EXTHDR; - } + if (skb->encapsulation) { + cmdsetup.s.tnl_csum = 1; + stats->tx_vxlan++; + } else { + cmdsetup.s.transport_csum = 1; } } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { @@ -2791,20 +2965,21 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (skb_shinfo(skb)->nr_frags == 0) { cmdsetup.s.u.datasize = skb->len; - octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + /* Offload checksum calculation for TCP/UDP packets */ - ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, - skb->data, - skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { + dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", __func__); return NETDEV_TX_BUSY; } - finfo->dptr = ndata.cmd.dptr; - + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; ndata.reqtype = REQTYPE_NORESP_NET; } else { @@ -2812,19 +2987,20 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct skb_frag_struct *frag; struct octnic_gather *g; - spin_lock(&lio->lock); - g = (struct octnic_gather *)list_delete_head(&lio->glist); - spin_unlock(&lio->lock); + spin_lock(&lio->glist_lock[q_idx]); + g = (struct octnic_gather *) + list_delete_head(&lio->glist[q_idx]); + spin_unlock(&lio->glist_lock[q_idx]); if (!g) { netif_info(lio, tx_err, lio->netdev, "Transmit scatter gather: glist null!\n"); - goto lio_xmit_dma_failed; + goto lio_xmit_failed; } cmdsetup.s.gather = 1; cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); - octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); memset(g->sg, 0, g->sg_size); @@ -2851,36 +3027,52 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) frag->size, DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg[i >> 2].ptr[i & 3])) { + dma_unmap_single(&oct->pci_dev->dev, + g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j < i; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + frag->size, + DMA_TO_DEVICE); + } + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); i++; } - ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, - g->sg, g->sg_size, - DMA_TO_DEVICE); - if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { - dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", - __func__); - dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], - skb->len - skb->data_len, - DMA_TO_DEVICE); - return NETDEV_TX_BUSY; - } + dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr, + g->sg_size, DMA_TO_DEVICE); + dptr = g->sg_dma_ptr; - finfo->dptr = ndata.cmd.dptr; + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; finfo->g = g; ndata.reqtype = REQTYPE_NORESP_NET_SG; } - if (skb_shinfo(skb)->gso_size) { - struct octeon_instr_irh *irh = - (struct octeon_instr_irh *)&ndata.cmd.irh; - union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0]; + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; - irh->len = 1; /* to indicate that ossp[0] contains tx_info */ + if (skb_shinfo(skb)->gso_size) { tx_info->s.gso_size = skb_shinfo(skb)->gso_size; tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + stats->tx_gso++; + } + + /* HW insert VLAN tag */ + if (skb_vlan_tag_present(skb)) { + irh->priority = skb_vlan_tag_get(skb) >> 13; + irh->vlan = skb_vlan_tag_get(skb) & 0xfff; } xmit_more = skb->xmit_more; @@ -2890,7 +3082,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) else status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); if (status == IQ_SEND_FAILED) - goto lio_xmit_dma_failed; + goto lio_xmit_failed; netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); @@ -2899,19 +3091,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) netif_trans_update(netdev); - stats->tx_done++; + if (skb_shinfo(skb)->gso_size) + stats->tx_done += skb_shinfo(skb)->gso_segs; + else + stats->tx_done++; stats->tx_tot_bytes += skb->len; return NETDEV_TX_OK; -lio_xmit_dma_failed: - dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, - ndata.datasize, DMA_TO_DEVICE); lio_xmit_failed: stats->tx_dropped++; netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", iq_no, stats->tx_dropped); - recv_buffer_free(skb); + if (dptr) + dma_unmap_single(&oct->pci_dev->dev, dptr, + ndata.datasize, DMA_TO_DEVICE); + tx_buffer_free(skb); return NETDEV_TX_OK; } @@ -2931,27 +3126,145 @@ static void liquidio_tx_timeout(struct net_device *netdev) txqs_wake(netdev); } -int liquidio_set_feature(struct net_device *netdev, int cmd) +static int liquidio_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct octnic_ctrl_params nparams; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); nctrl.ncmd.u64 = 0; - nctrl.ncmd.s.cmd = cmd; - nctrl.ncmd.s.param1 = lio->linfo.ifidx; - nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + + return ret; +} + +static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nparams.resp_order = OCTEON_RESP_NORESPONSE; + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + } + return ret; +} + +/** Sending command to enable/disable RX checksum offload + * @param netdev pointer to network device + * @param command OCTNET_CMD_TNL_RX_CSUM_CTL + * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ + * OCTNET_CMD_RXCSUM_DISABLE + * @returns SUCCESS or FAILURE + */ +int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; - ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.param1 = rx_cmd; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, + "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", + ret); + } + return ret; +} + +/** Sending command to add/delete VxLAN UDP port to firmware + * @param netdev pointer to network device + * @param command OCTNET_CMD_VXLAN_PORT_CONFIG + * @param vxlan_port VxLAN port to be added or deleted + * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, + * OCTNET_CMD_VXLAN_PORT_DEL + * @returns SUCCESS or FAILURE + */ +static int liquidio_vxlan_port_command(struct net_device *netdev, int command, + u16 vxlan_port, u8 vxlan_cmd_bit) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.more = vxlan_cmd_bit; + nctrl.ncmd.s.param1 = vxlan_port; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, + "VxLAN port add/delete failed in core (ret:0x%x)\n", + ret); + } + return ret; +} + +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = cmd; + nctrl.ncmd.s.param1 = param1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); @@ -3007,14 +3320,55 @@ static int liquidio_set_features(struct net_device *netdev, return 0; if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) - liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); else if (!(features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) - liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + /* Sending command to firmware to enable/disable RX checksum + * offload settings using ethtool + */ + if (!(netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + (features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, + OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + else if ((netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + !(features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_DISABLE); return 0; } +static void liquidio_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static void liquidio_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + static struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, @@ -3023,10 +3377,15 @@ static struct net_device_ops lionetdevops = { .ndo_set_mac_address = liquidio_set_mac, .ndo_set_rx_mode = liquidio_set_mcast_list, .ndo_tx_timeout = liquidio_tx_timeout, + + .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, .ndo_do_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, + .ndo_udp_tunnel_add = liquidio_add_vxlan_port, + .ndo_udp_tunnel_del = liquidio_del_vxlan_port, }; /** \brief Entry point for the liquidio module @@ -3081,24 +3440,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) { struct octeon_device *oct = (struct octeon_device *)buf; struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; - int ifidx = 0; + int gmxport = 0; union oct_link_status *ls; int i; - if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || - (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) { + if (recv_pkt->buffer_size[0] != sizeof(*ls)) { dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", recv_pkt->buffer_size[0], - recv_pkt->rh.r_nic_info.ifidx); + recv_pkt->rh.r_nic_info.gmxport); goto nic_info_err; } - ifidx = recv_pkt->rh.r_nic_info.ifidx; + gmxport = recv_pkt->rh.r_nic_info.gmxport; ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); - - update_link_status(oct->props[ifidx].netdev, ls); + for (i = 0; i < oct->ifcount; i++) { + if (oct->props[i].gmxport == gmxport) { + update_link_status(oct->props[i].netdev, ls); + break; + } + } nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++) @@ -3124,13 +3486,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; struct octdev_props *props; - int retval, num_iqueues, num_oqueues, q_no; - u64 q_mask; - int num_cpus = num_online_cpus(); + int retval, num_iqueues, num_oqueues; union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; u32 resp_size, ctx_size; + u32 ifidx_or_pfnum; /* This is to handle link status changes */ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, @@ -3166,14 +3527,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); gmx_port_id = CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); - if (num_iqueues > num_cpus) - num_iqueues = num_cpus; - if (num_oqueues > num_cpus) - num_oqueues = num_cpus; + ifidx_or_pfnum = i; + dev_dbg(&octeon_dev->pci_dev->dev, "requesting config for interface %d, iqs %d, oqs %d\n", - i, num_iqueues, num_oqueues); - ACCESS_ONCE(ctx->cond) = 0; + ifidx_or_pfnum, num_iqueues, num_oqueues); + WRITE_ONCE(ctx->cond, 0); ctx->octeon_id = lio_get_device_id(octeon_dev); init_waitqueue_head(&ctx->wc); @@ -3182,16 +3541,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if_cfg.s.num_oqueues = num_oqueues; if_cfg.s.base_queue = base_queue; if_cfg.s.gmx_port_id = gmx_port_id; + + sc->iq_no = 0; + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, - OPCODE_NIC_IF_CFG, i, + OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); sc->callback = if_cfg_callback; sc->callback_arg = sc; - sc->wait_time = 1000; + sc->wait_time = 3000; retval = octeon_send_soft_command(octeon_dev, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); @@ -3233,8 +3595,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) goto setup_nic_dev_fail; } - props = &octeon_dev->props[i]; - props->netdev = netdev; + SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); if (num_iqueues > 1) lionetdevops.ndo_select_queue = select_q; @@ -3248,23 +3609,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) memset(lio, 0, sizeof(struct lio)); - lio->linfo.ifidx = resp->cfg_info.ifidx; - lio->ifidx = resp->cfg_info.ifidx; + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; - q_mask = resp->cfg_info.oqmask; - /* q_mask is 0-based and already verified mask is nonzero */ for (j = 0; j < num_oqueues; j++) { - q_no = __ffs64(q_mask); - q_mask &= (~(1UL << q_no)); - lio->linfo.rxpciq[j] = q_no; + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; } - q_mask = resp->cfg_info.iqmask; for (j = 0; j < num_iqueues; j++) { - q_no = __ffs64(q_mask); - q_mask &= (~(1UL << q_no)); - lio->linfo.txpciq[j] = q_no; + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; } lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; @@ -3273,16 +3632,41 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); lio->dev_capability = NETIF_F_HIGHDMA - | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_SG | NETIF_F_RXCSUM - | NETIF_F_TSO | NETIF_F_TSO6 - | NETIF_F_LRO; + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_GRO + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); - netdev->features = lio->dev_capability; + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device + */ + lio->enc_dev_capability = NETIF_F_IP_CSUM + | NETIF_F_IPV6_CSUM + | NETIF_F_GSO_UDP_TUNNEL + | NETIF_F_HW_CSUM | NETIF_F_SG + | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + + netdev->hw_enc_features = (lio->enc_dev_capability & + ~NETIF_F_LRO); + + lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; + netdev->vlan_features = lio->dev_capability; + /* Add any unchangeable hw features */ + lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); netdev->hw_features = lio->dev_capability; + /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ + netdev->hw_features = netdev->hw_features & + ~NETIF_F_HW_VLAN_CTAG_RX; /* Point to the properties for octeon device to which this * interface belongs. @@ -3290,7 +3674,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->oct_dev = octeon_dev; lio->octprops = props; lio->netdev = netdev; - spin_lock_init(&lio->lock); dev_dbg(&octeon_dev->pci_dev->dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, @@ -3305,23 +3688,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) ether_addr_copy(netdev->dev_addr, mac); - if (setup_io_queues(octeon_dev, netdev)) { + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + if (setup_io_queues(octeon_dev, i)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); - /* By default all interfaces on a single Octeon uses the same - * tx and rx queues - */ - lio->txq = lio->linfo.txpciq[0]; - lio->rxq = lio->linfo.rxpciq[0]; - lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); - if (setup_glist(lio)) { + if (setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; @@ -3329,11 +3711,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Register ethtool support */ liquidio_set_ethtool_ops(netdev); + octeon_dev->priv_flags = 0x0; - liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + if (netdev->features & NETIF_F_LRO) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0); if ((debug != -1) && (debug & NETIF_MSG_HW)) - liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_ENABLE, 0); /* Register the network device with the OS */ if (register_netdev(netdev)) { @@ -3345,16 +3733,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); netif_carrier_off(netdev); - - if (lio->linfo.link.s.status) { - netif_carrier_on(netdev); - start_txq(netdev); - } else { - netif_carrier_off(netdev); - } + lio->link_changes++; ifstate_set(lio, LIO_IFSTATE_REGISTERED); + /* Sending command to firmware to enable Rx checksum offload + * by default at the time of setup of Liquidio driver for + * this device + */ + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, + OCTNET_CMD_TXCSUM_ENABLE); + dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); @@ -3385,7 +3776,7 @@ setup_nic_dev_fail: static int liquidio_init_nic_module(struct octeon_device *oct) { struct oct_intrmod_cfg *intrmod_cfg; - int retval = 0; + int i, retval = 0; int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); @@ -3399,6 +3790,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct) memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); + for (i = 0; i < MAX_OCTEON_LINKS; i++) + oct->props[i].gmxport = -1; + retval = setup_nic_devices(oct); if (retval) { dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); @@ -3409,15 +3803,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct) /* Initialize interrupt moderation params */ intrmod_cfg = &((struct octeon_device *)oct)->intrmod; - intrmod_cfg->intrmod_enable = 1; - intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; - intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; - intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; - intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; - intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; - intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; - intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; - + intrmod_cfg->rx_enable = 1; + intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; + intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; + intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; + intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER; + intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER; + intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER; + intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER; + intrmod_cfg->tx_enable = 1; + intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER; + intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER; + intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); return retval; @@ -3480,6 +3878,7 @@ static void nic_starter(struct work_struct *work) static int octeon_device_init(struct octeon_device *octeon_dev) { int j, ret; + char bootcmd[] = "\n"; struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)octeon_dev->priv; atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); @@ -3557,6 +3956,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Release any previously allocated queues */ for (j = 0; j < octeon_dev->num_oqs; j++) octeon_delete_droq(octeon_dev, j); + return 1; } atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); @@ -3579,7 +3979,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Setup the interrupt handler and record the INT SUM register address */ - octeon_setup_interrupt(octeon_dev); + if (octeon_setup_interrupt(octeon_dev)) + return 1; /* Enable Octeon device interrupts */ octeon_dev->fn_list.enable_interrupt(octeon_dev->chip); @@ -3591,14 +3992,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); - if (ddr_timeout == 0) { - dev_info(&octeon_dev->pci_dev->dev, - "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); - } + if (ddr_timeout == 0) + dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); /* Wait for the octeon to initialize DDR after the soft-reset. */ + while (ddr_timeout == 0) { + set_current_state(TASK_INTERRUPTIBLE); + if (schedule_timeout(HZ / 10)) { + /* user probably pressed Control-C */ + return 1; + } + } ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); if (ret) { dev_err(&octeon_dev->pci_dev->dev, @@ -3612,6 +4018,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev) return 1; } + /* Divert uboot to take commands from host instead. */ + ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); ret = octeon_init_consoles(octeon_dev); if (ret) { diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 0ac347ccc..199a8b9c7 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -30,10 +30,10 @@ #include "octeon_config.h" -#define LIQUIDIO_VERSION "1.1.9" -#define LIQUIDIO_MAJOR_VERSION 1 -#define LIQUIDIO_MINOR_VERSION 1 -#define LIQUIDIO_MICRO_VERSION 9 +#define LIQUIDIO_BASE_VERSION "1.4" +#define LIQUIDIO_MICRO_VERSION ".1" +#define LIQUIDIO_PACKAGE "" +#define LIQUIDIO_VERSION "1.4.1" #define CONTROL_IQ 0 /** Tag types used by Octeon cores in its work. */ @@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, /*------------------------- End Scatter/Gather ---------------------------*/ #define OCTNET_FRM_PTP_HEADER_SIZE 8 -#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */ -#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE) +#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */ + +#define OCTNET_MIN_FRM_SIZE 64 + #define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) #define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE) @@ -212,6 +214,17 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VERBOSE_ENABLE 0x14 #define OCTNET_CMD_VERBOSE_DISABLE 0x15 +#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16 +#define OCTNET_CMD_ADD_VLAN_FILTER 0x17 +#define OCTNET_CMD_DEL_VLAN_FILTER 0x18 +#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19 +#define OCTNET_CMD_VXLAN_PORT_ADD 0x0 +#define OCTNET_CMD_VXLAN_PORT_DEL 0x1 +#define OCTNET_CMD_RXCSUM_ENABLE 0x0 +#define OCTNET_CMD_RXCSUM_DISABLE 0x1 +#define OCTNET_CMD_TXCSUM_ENABLE 0x0 +#define OCTNET_CMD_TXCSUM_DISABLE 0x1 + /* RX(packets coming from wire) Checksum verification flags */ /* TCP/UDP csum */ #define CNNIC_L4SUM_VERIFIED 0x1 @@ -258,19 +271,19 @@ union octnet_cmd { u64 more:6; /* How many udd words follow the command */ - u64 param1:29; + u64 reserved:29; - u64 param2:16; + u64 param1:16; - u64 param3:8; + u64 param2:8; #else - u64 param3:8; + u64 param2:8; - u64 param2:16; + u64 param1:16; - u64 param1:29; + u64 reserved:29; u64 more:6; @@ -283,8 +296,140 @@ union octnet_cmd { #define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) +/* Instruction Header(DPI) - for OCTEON-III models */ +struct octeon_instr_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Reserved3 */ + u64 reserved3:1; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Front Data size */ + u64 fsz:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved1 */ + u64 reserved1:32; + +#else + /** Reserved1 */ + u64 reserved1:32; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** Front Data size */ + u64 fsz:6; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Reserved3 */ + u64 reserved3:1; + +#endif +}; + +/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */ +/** BIG ENDIAN format. */ +struct octeon_instr_pki_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Wider bit */ + u64 w:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Use Tag */ + u64 utag:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Reserved2 */ + u64 reserved2:1; + + /** Parse Mode */ + u64 pm:3; + + /** Skip Length */ + u64 sl:8; + + /** Use Tag Type */ + u64 utt:1; + + /** Tag type */ + u64 tagtype:2; + + /** Reserved1 */ + u64 reserved1:2; + + /** QPG Value */ + u64 qpg:11; + + /** Tag Value */ + u64 tag:32; + +#else + + /** Tag Value */ + u64 tag:32; + + /** QPG Value */ + u64 qpg:11; + + /** Reserved1 */ + u64 reserved1:2; + + /** Tag type */ + u64 tagtype:2; + + /** Use Tag Type */ + u64 utt:1; + + /** Skip Length */ + u64 sl:8; + + /** Parse Mode */ + u64 pm:3; + + /** Reserved2 */ + u64 reserved2:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Use Tag */ + u64 utag:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Wider bit */ + u64 w:1; +#endif + +}; + /** Instruction Header */ -struct octeon_instr_ih { +struct octeon_instr_ih2 { #ifdef __BIG_ENDIAN_BITFIELD /** Raw mode indicator 1 = RAW */ u64 raw:1; @@ -348,15 +493,15 @@ struct octeon_instr_irh { u64 opcode:4; u64 rflag:1; u64 subcode:7; - u64 len:3; - u64 rid:13; - u64 reserved:4; + u64 vlan:12; + u64 priority:3; + u64 reserved:5; u64 ossp:32; /* opcode/subcode specific parameters */ #else u64 ossp:32; /* opcode/subcode specific parameters */ - u64 reserved:4; - u64 rid:13; - u64 len:3; + u64 reserved:5; + u64 priority:3; + u64 vlan:12; u64 subcode:7; u64 rflag:1; u64 opcode:4; @@ -383,75 +528,77 @@ union octeon_rh { struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ - u64 reserved:4; - u64 ossp:32; /** opcode/subcode specific parameters */ + u64 len:3; /** additional 64-bit words */ + u64 reserved:17; + u64 ossp:32; /** opcode/subcode specific parameters */ } r; struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ - u64 extra:24; - u64 link:8; + u64 len:3; /** additional 64-bit words */ + u64 extra:28; + u64 vlan:12; + u64 priority:3; u64 csum_verified:3; /** checksum verified. */ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ + u64 encap_on:1; + u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */ } r_dh; struct { u64 opcode:4; u64 subcode:8; - u64 len:3; /** additional 64-bit words */ - u64 rid:13; /** request id in response to pkt sent by host */ + u64 len:3; /** additional 64-bit words */ + u64 reserved:11; u64 num_gmx_ports:8; - u64 max_nic_ports:8; + u64 max_nic_ports:10; u64 app_cap_flags:4; - u64 app_mode:16; + u64 app_mode:8; + u64 pkind:8; } r_core_drv_init; struct { u64 opcode:4; u64 subcode:8; u64 len:3; /** additional 64-bit words */ - u64 rid:13; - u64 reserved:4; + u64 reserved:8; u64 extra:25; - u64 ifidx:7; + u64 gmxport:16; } r_nic_info; #else u64 u64; struct { u64 ossp:32; /** opcode/subcode specific parameters */ - u64 reserved:4; - u64 rid:13; /** req id in response to pkt sent by host */ + u64 reserved:17; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; } r; struct { + u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */ + u64 encap_on:1; u64 has_hwtstamp:1; /** 1 = has hwtstamp */ u64 csum_verified:3; /** checksum verified. */ - u64 link:8; - u64 extra:24; - u64 rid:13; /** req id in response to pkt sent by host */ + u64 priority:3; + u64 vlan:12; + u64 extra:28; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; } r_dh; struct { - u64 app_mode:16; + u64 pkind:8; + u64 app_mode:8; u64 app_cap_flags:4; - u64 max_nic_ports:8; + u64 max_nic_ports:10; u64 num_gmx_ports:8; - u64 rid:13; + u64 reserved:11; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; } r_core_drv_init; struct { - u64 ifidx:7; + u64 gmxport:16; u64 extra:25; - u64 reserved:4; - u64 rid:13; + u64 reserved:8; u64 len:3; /** additional 64-bit words */ u64 subcode:8; u64 opcode:4; @@ -461,30 +608,25 @@ union octeon_rh { #define OCT_RH_SIZE (sizeof(union octeon_rh)) -#define OCT_PKT_PARAM_IPV4OPTS 1 -#define OCT_PKT_PARAM_IPV6EXTHDR 2 - union octnic_packet_params { u32 u32; struct { #ifdef __BIG_ENDIAN_BITFIELD - u32 reserved:6; + u32 reserved:24; + u32 ip_csum:1; /* Perform IP header checksum(s) */ + /* Perform Outer transport header checksum */ + u32 transport_csum:1; + /* Find tunnel, and perform transport csum. */ u32 tnl_csum:1; - u32 ip_csum:1; - u32 ipv4opts_ipv6exthdr:2; - u32 ipsec_ops:4; - u32 tsflag:1; - u32 csoffset:9; - u32 ifidx:8; + u32 tsflag:1; /* Timestamp this packet */ + u32 ipsec_ops:4; /* IPsec operation */ #else - u32 ifidx:8; - u32 csoffset:9; - u32 tsflag:1; u32 ipsec_ops:4; - u32 ipv4opts_ipv6exthdr:2; - u32 ip_csum:1; + u32 tsflag:1; u32 tnl_csum:1; - u32 reserved:6; + u32 transport_csum:1; + u32 ip_csum:1; + u32 reserved:24; #endif } s; }; @@ -496,56 +638,96 @@ union oct_link_status { struct { #ifdef __BIG_ENDIAN_BITFIELD u64 duplex:8; - u64 status:8; u64 mtu:16; u64 speed:16; + u64 link_up:1; u64 autoneg:1; - u64 interface:4; + u64 if_mode:5; u64 pause:1; - u64 reserved:10; + u64 flashing:1; + u64 reserved:15; #else - u64 reserved:10; + u64 reserved:15; + u64 flashing:1; u64 pause:1; - u64 interface:4; + u64 if_mode:5; u64 autoneg:1; + u64 link_up:1; u64 speed:16; u64 mtu:16; - u64 status:8; u64 duplex:8; #endif } s; }; +/** The txpciq info passed to host from the firmware */ + +union oct_txpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 port:8; + u64 pkind:6; + u64 use_qpg:1; + u64 qpg:11; + u64 reserved:30; +#else + u64 reserved:30; + u64 qpg:11; + u64 use_qpg:1; + u64 pkind:6; + u64 port:8; + u64 q_no:8; +#endif + } s; +}; + +/** The rxpciq info passed to host from the firmware */ + +union oct_rxpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 reserved:56; +#else + u64 reserved:56; + u64 q_no:8; +#endif + } s; +}; + /** Information for a OCTEON ethernet interface shared between core & host. */ struct oct_link_info { union oct_link_status link; u64 hw_addr; #ifdef __BIG_ENDIAN_BITFIELD - u16 gmxport; - u8 rsvd[3]; - u8 num_txpciq; - u8 num_rxpciq; - u8 ifidx; + u64 gmxport:16; + u64 rsvd:32; + u64 num_txpciq:8; + u64 num_rxpciq:8; #else - u8 ifidx; - u8 num_rxpciq; - u8 num_txpciq; - u8 rsvd[3]; - u16 gmxport; + u64 num_rxpciq:8; + u64 num_txpciq:8; + u64 rsvd:32; + u64 gmxport:16; #endif - u8 txpciq[MAX_IOQS_PER_NICIF]; - u8 rxpciq[MAX_IOQS_PER_NICIF]; + union oct_txpciq txpciq[MAX_IOQS_PER_NICIF]; + union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF]; }; #define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info)) struct liquidio_if_cfg_info { - u64 ifidx; u64 iqmask; /** mask for IQs enabled for the port */ u64 oqmask; /** mask for OQs enabled for the port */ struct oct_link_info linfo; /** initial link information */ + char liquidio_firmware_version[32]; }; /** Stats for each NIC port in RX direction. */ @@ -570,10 +752,18 @@ struct nic_rx_stats { u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; + u64 fw_rx_vxlan; + u64 fw_rx_vxlan_err; + + /* LRO */ u64 fw_lro_pkts; /* Number of packets that are LROed */ u64 fw_lro_octs; /* Number of octets that are LROed */ u64 fw_total_lro; /* Number of LRO packets formed */ u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ + u64 fw_lro_aborts_port; + u64 fw_lro_aborts_seq; + u64 fw_lro_aborts_tsval; + u64 fw_lro_aborts_timer; /* intrmod: packet forward rate */ u64 fwd_rate; }; @@ -597,9 +787,14 @@ struct nic_tx_stats { /* firmware stats */ u64 fw_total_sent; u64 fw_total_fwd; + u64 fw_total_fwd_bytes; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; + u64 fw_err_tso; + u64 fw_tso; /* number of tso requests */ + u64 fw_tso_fwd; /* number of packets segmented in tso */ + u64 fw_tx_vxlan; }; struct oct_link_stats { @@ -630,23 +825,44 @@ struct oct_mdio_cmd { #define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) +/* intrmod: max. packet rate threshold */ +#define LIO_INTRMOD_MAXPKT_RATETHR 196608 +/* intrmod: min. packet rate threshold */ +#define LIO_INTRMOD_MINPKT_RATETHR 9216 +/* intrmod: max. packets to trigger interrupt */ +#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384 +/* intrmod: min. packets to trigger interrupt */ +#define LIO_INTRMOD_RXMINCNT_TRIGGER 1 +/* intrmod: max. time to trigger interrupt */ +#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128 +/* 66xx:intrmod: min. time to trigger interrupt + * (value of 1 is optimum for TCP_RR) + */ +#define LIO_INTRMOD_RXMINTMR_TRIGGER 1 + +/* intrmod: max. packets to trigger interrupt */ +#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64 +/* intrmod: min. packets to trigger interrupt */ +#define LIO_INTRMOD_TXMINCNT_TRIGGER 0 + +/* intrmod: poll interval in seconds */ #define LIO_INTRMOD_CHECK_INTERVAL 1 -#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */ -#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */ -#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */ -#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */ -#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */ -#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */ struct oct_intrmod_cfg { - u64 intrmod_enable; - u64 intrmod_check_intrvl; - u64 intrmod_maxpkt_ratethr; - u64 intrmod_minpkt_ratethr; - u64 intrmod_maxcnt_trigger; - u64 intrmod_maxtmr_trigger; - u64 intrmod_mincnt_trigger; - u64 intrmod_mintmr_trigger; + u64 rx_enable; + u64 tx_enable; + u64 check_intrvl; + u64 maxpkt_ratethr; + u64 minpkt_ratethr; + u64 rx_maxcnt_trigger; + u64 rx_mincnt_trigger; + u64 rx_maxtmr_trigger; + u64 rx_mintmr_trigger; + u64 tx_mincnt_trigger; + u64 tx_maxcnt_trigger; + u64 rx_frames; + u64 tx_frames; + u64 rx_usecs; }; #define BASE_QUEUE_NOT_REQUESTED 65535 @@ -659,9 +875,9 @@ union oct_nic_if_cfg { u64 num_iqueues:16; u64 num_oqueues:16; u64 gmx_port_id:8; - u64 reserved:8; + u64 vf_id:8; #else - u64 reserved:8; + u64 vf_id:8; u64 gmx_port_id:8; u64 num_oqueues:16; u64 num_iqueues:16; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 62a8dd5cd..b3396e3a8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -37,7 +37,7 @@ /* Maximum octeon devices defined as MAX_OCTEON_NICIF to support * multiple(<= MAX_OCTEON_NICIF) Miniports */ -#define MAX_OCTEON_NICIF 32 +#define MAX_OCTEON_NICIF 128 #define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF #define MAX_OCTEON_LINKS MAX_OCTEON_NICIF #define MAX_OCTEON_MULTICAST_ADDR 32 @@ -135,7 +135,7 @@ #define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) /* Max IOQs per OCTEON Link */ -#define MAX_IOQS_PER_NICIF 32 +#define MAX_IOQS_PER_NICIF 64 enum lio_card_type { LIO_210SV = 0, /* Two port, 66xx */ @@ -226,7 +226,7 @@ struct octeon_oq_config { */ u64 refill_threshold:16; - /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + /** If set, the Output queue uses info-pointer mode. (Default: 1) */ u64 info_ptr:32; /* Max number of OQs available */ @@ -236,7 +236,7 @@ struct octeon_oq_config { /* Max number of OQs available */ u64 max_oqs:8; - /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + /** If set, the Output queue uses info-pointer mode. (Default: 1) */ u64 info_ptr:32; /** The number of buffers that were consumed during packet processing by @@ -416,9 +416,11 @@ struct octeon_config { #define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS) /* Maximum number of Octeon Instruction (command) queues */ -#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES +#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES +/* Maximum number of Octeon Output queues */ +#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES -/* Maximum number of Octeon Instruction (command) queues */ -#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES #endif /* __OCTEON_CONFIG_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 466147e40..bbb50ea66 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -23,27 +23,14 @@ /** * @file octeon_console.c */ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" #include "octeon_mem_ops.h" static void octeon_remote_lock(void); @@ -51,6 +38,8 @@ static void octeon_remote_unlock(void); static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, const char *name, u32 flags); +static int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size); #define MIN(a, b) min((a), (b)) #define CAST_ULL(v) ((u64)(v)) @@ -170,8 +159,8 @@ struct octeon_pci_console_desc { offsetof(struct cvmx_bootmem_desc, field), \ SIZEOF_FIELD(struct cvmx_bootmem_desc, field)) -#define __cvmx_bootmem_lock(flags) -#define __cvmx_bootmem_unlock(flags) +#define __cvmx_bootmem_lock(flags) (flags = flags) +#define __cvmx_bootmem_unlock(flags) (flags = flags) /** * This macro returns a member of the @@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct, u32 len) { addr += offsetof(struct cvmx_bootmem_named_block_desc, name); - octeon_pci_read_core_mem(oct, addr, str, len); + octeon_pci_read_core_mem(oct, addr, (u8 *)str, len); str[len] = 0; } @@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, if (name && named_size) { char *name_tmp = kmalloc(name_length + 1, GFP_KERNEL); + if (!name_tmp) + break; + CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr, name_tmp, name_length); @@ -383,7 +375,7 @@ static void octeon_remote_unlock(void) int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, u32 wait_hundredths) { - u32 len = strlen(cmd_str); + u32 len = (u32)strlen(cmd_str); dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str); @@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct, } static void octeon_console_handle_result(struct octeon_device *oct, - size_t console_num, - char *buffer, s32 bytes_read) + size_t console_num) { struct octeon_console *console; @@ -492,7 +483,7 @@ static void check_console(struct work_struct *work) struct octeon_console *console; struct cavium_wk *wk = (struct cavium_wk *)work; struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; - size_t console_num = wk->ctxul; + u32 console_num = (u32)wk->ctxul; u32 delay; console = &oct->console[console_num]; @@ -505,20 +496,17 @@ static void check_console(struct work_struct *work) */ bytes_read = octeon_console_read(oct, console_num, console_buffer, - sizeof(console_buffer) - 1, 0); + sizeof(console_buffer) - 1); if (bytes_read > 0) { total_read += bytes_read; - if (console->waiting) { - octeon_console_handle_result(oct, console_num, - console_buffer, - bytes_read); - } + if (console->waiting) + octeon_console_handle_result(oct, console_num); if (octeon_console_debug_enabled(console_num)) { output_console_line(oct, console, console_num, console_buffer, bytes_read); } } else if (bytes_read < 0) { - dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n", + dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n", console_num, bytes_read); } @@ -530,7 +518,7 @@ static void check_console(struct work_struct *work) */ if (octeon_console_debug_enabled(console_num) && (total_read == 0) && (console->leftover[0])) { - dev_info(&oct->pci_dev->dev, "%lu: %s\n", + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, console->leftover); console->leftover[0] = '\0'; } @@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size, octeon_console_free_bytes(buffer_size, wr_idx, rd_idx); } -int octeon_console_read(struct octeon_device *oct, u32 console_num, - char *buffer, u32 buf_size, u32 flags) +static int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size) { int bytes_to_read; u32 rd_idx, wr_idx; @@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num, bytes_to_read = console->buffer_size - rd_idx; octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx, - buffer, bytes_to_read); + (u8 *)buffer, bytes_to_read); octeon_write_device_mem32(oct, console->addr + offsetof(struct octeon_pci_console, output_read_index), diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 8e23e3fad..0eb504a43 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -19,28 +19,19 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> #include <linux/pci.h> #include <linux/crc32.h> -#include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" #include "liquidio_image.h" #include "octeon_mem_ops.h" @@ -449,10 +440,10 @@ static struct octeon_config_ptr { }; static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { - "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", - "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", + "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", "INVALID" }; @@ -550,17 +541,19 @@ static char *get_oct_app_string(u32 app_mode) return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; } +u8 fbuf[4 * 1024 * 1024]; + int octeon_download_firmware(struct octeon_device *oct, const u8 *data, size_t size) { int ret = 0; - u8 *p; - u8 *buffer; + u8 *p = fbuf; u32 crc32_result; u64 load_addr; u32 image_len; struct octeon_firmware_file_header *h; - u32 i; + u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION); + char *base; if (size < sizeof(struct octeon_firmware_file_header)) { dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n", @@ -576,19 +569,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, return -EINVAL; } - crc32_result = - crc32(~0, data, - sizeof(struct octeon_firmware_file_header) - - sizeof(u32)) ^ ~0U; + crc32_result = crc32((unsigned int)~0, data, + sizeof(struct octeon_firmware_file_header) - + sizeof(u32)) ^ ~0U; if (crc32_result != be32_to_cpu(h->crc32)) { dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n", crc32_result, be32_to_cpu(h->crc32)); return -EINVAL; } - if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) { - dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n", - LIQUIDIO_VERSION, h->version); + if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n", + LIQUIDIO_PACKAGE, h->version); + return -EINVAL; + } + + base = h->version + strlen(LIQUIDIO_PACKAGE); + ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len); + if (ret) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", + LIQUIDIO_BASE_VERSION, base); return -EINVAL; } @@ -602,58 +602,58 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s", h->version); - buffer = kmemdup(data, size, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - p = buffer + sizeof(struct octeon_firmware_file_header); + data += sizeof(struct octeon_firmware_file_header); + dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__, + be32_to_cpu(h->num_images)); /* load all images */ for (i = 0; i < be32_to_cpu(h->num_images); i++) { load_addr = be64_to_cpu(h->desc[i].addr); image_len = be32_to_cpu(h->desc[i].len); - /* validate the image */ - crc32_result = crc32(~0, p, image_len) ^ ~0U; - if (crc32_result != be32_to_cpu(h->desc[i].crc32)) { - dev_err(&oct->pci_dev->dev, - "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n", - i, crc32_result, - be32_to_cpu(h->desc[i].crc32)); - ret = -EINVAL; - goto done_downloading; - } + dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n", + image_len, load_addr); - /* download the image */ - octeon_pci_write_core_mem(oct, load_addr, p, image_len); + /* Write in 4MB chunks*/ + rem = image_len; - p += image_len; - dev_dbg(&oct->pci_dev->dev, - "Downloaded image %d (%d bytes) to address 0x%016llx\n", - i, image_len, load_addr); + while (rem) { + if (rem < (4 * 1024 * 1024)) + size = rem; + else + size = 4 * 1024 * 1024; + + memcpy(p, data, size); + + /* download the image */ + octeon_pci_write_core_mem(oct, load_addr, p, (u32)size); + + data += size; + rem -= (u32)size; + load_addr += size; + } } + dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n", + h->bootcmd); /* Invoke the bootcmd */ ret = octeon_console_send_cmd(oct, h->bootcmd, 50); -done_downloading: - kfree(buffer); - - return ret; + return 0; } void octeon_free_device_mem(struct octeon_device *oct) { - u32 i; + int i; - for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { - /* could check mask as well */ - vfree(oct->droq[i]); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (oct->io_qmask.oq & (1ULL << i)) + vfree(oct->droq[i]); } - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - /* could check mask as well */ - vfree(oct->instr_queue[i]); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (oct->io_qmask.iq & (1ULL << i)) + vfree(oct->instr_queue[i]); } i = oct->octeon_id; @@ -735,55 +735,61 @@ struct octeon_device *octeon_allocate_device(u32 pci_id, octeon_device[oct_idx] = oct; oct->octeon_id = oct_idx; - snprintf((oct->device_name), sizeof(oct->device_name), + snprintf(oct->device_name, sizeof(oct->device_name), "LiquidIO%d", (oct->octeon_id)); return oct; } +/* this function is only for setting up the first queue */ int octeon_setup_instr_queues(struct octeon_device *oct) { - u32 i, num_iqs = 0; u32 num_descs = 0; + u32 iq_no = 0; + union oct_txpciq txpciq; + int numa_node = cpu_to_node(iq_no % num_online_cpus()); /* this causes queue 0 to be default queue */ - if (OCTEON_CN6XXX(oct)) { - num_iqs = 1; + if (OCTEON_CN6XXX(oct)) num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); - } oct->num_iqs = 0; - for (i = 0; i < num_iqs; i++) { - oct->instr_queue[i] = + oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), + numa_node); + if (!oct->instr_queue[0]) + oct->instr_queue[0] = vmalloc(sizeof(struct octeon_instr_queue)); - if (!oct->instr_queue[i]) - return 1; - - memset(oct->instr_queue[i], 0, - sizeof(struct octeon_instr_queue)); - - oct->instr_queue[i]->app_ctx = (void *)(size_t)i; - if (octeon_init_instr_queue(oct, i, num_descs)) - return 1; - - oct->num_iqs++; + if (!oct->instr_queue[0]) + return 1; + memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[0]->q_index = 0; + oct->instr_queue[0]->app_ctx = (void *)(size_t)0; + oct->instr_queue[0]->ifidx = 0; + txpciq.u64 = 0; + txpciq.s.q_no = iq_no; + txpciq.s.use_qpg = 0; + txpciq.s.qpg = 0; + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { + /* prevent memory leak */ + vfree(oct->instr_queue[0]); + return 1; } + oct->num_iqs++; return 0; } int octeon_setup_output_queues(struct octeon_device *oct) { - u32 i, num_oqs = 0; u32 num_descs = 0; u32 desc_size = 0; + u32 oq_no = 0; + int numa_node = cpu_to_node(oq_no % num_online_cpus()); /* this causes queue 0 to be default queue */ if (OCTEON_CN6XXX(oct)) { - /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */ - num_oqs = 1; num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); desc_size = @@ -791,19 +797,15 @@ int octeon_setup_output_queues(struct octeon_device *oct) } oct->num_oqs = 0; + oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); + if (!oct->droq[0]) + oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); + if (!oct->droq[0]) + return 1; - for (i = 0; i < num_oqs; i++) { - oct->droq[i] = vmalloc(sizeof(*oct->droq[i])); - if (!oct->droq[i]) - return 1; - - memset(oct->droq[i], 0, sizeof(struct octeon_droq)); - - if (octeon_init_droq(oct, i, num_descs, desc_size, NULL)) - return 1; - - oct->num_oqs++; - } + if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) + return 1; + oct->num_oqs++; return 0; } @@ -1005,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct, return 0; } -/* octeon_unregister_dispatch_fn - * Parameters: - * oct - octeon device - * opcode - driver should unregister the function for this opcode - * subcode - driver should unregister the function for this subcode - * Description: - * Unregister the function set for this opcode+subcode. - * Returns: - * Success: 0 - * Failure: 1 - * Locks: - * No locks are held. - */ -int -octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode, - u16 subcode) -{ - int retval = 0; - u32 idx; - struct list_head *dispatch, *dfree = NULL, *tmp2; - u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); - - idx = combined_opcode & OCTEON_OPCODE_MASK; - - spin_lock_bh(&oct->dispatch.lock); - - if (oct->dispatch.count == 0) { - spin_unlock_bh(&oct->dispatch.lock); - dev_err(&oct->pci_dev->dev, - "No dispatch functions registered for this device\n"); - return 1; - } - - if (oct->dispatch.dlist[idx].opcode == combined_opcode) { - dispatch = &oct->dispatch.dlist[idx].list; - if (dispatch->next != dispatch) { - dispatch = dispatch->next; - oct->dispatch.dlist[idx].opcode = - ((struct octeon_dispatch *)dispatch)->opcode; - oct->dispatch.dlist[idx].dispatch_fn = - ((struct octeon_dispatch *) - dispatch)->dispatch_fn; - oct->dispatch.dlist[idx].arg = - ((struct octeon_dispatch *)dispatch)->arg; - list_del(dispatch); - dfree = dispatch; - } else { - oct->dispatch.dlist[idx].opcode = 0; - oct->dispatch.dlist[idx].dispatch_fn = NULL; - oct->dispatch.dlist[idx].arg = NULL; - } - } else { - retval = 1; - list_for_each_safe(dispatch, tmp2, - &(oct->dispatch.dlist[idx]. - list)) { - if (((struct octeon_dispatch *)dispatch)->opcode == - combined_opcode) { - list_del(dispatch); - dfree = dispatch; - retval = 0; - } - } - } - - if (!retval) - oct->dispatch.count--; - - spin_unlock_bh(&oct->dispatch.lock); - vfree(dfree); - return retval; -} - int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) { u32 i; @@ -1152,8 +1081,8 @@ core_drv_init_err: int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) { - if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) && - (oct->io_qmask.iq & (1UL << q_no))) + if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && + (oct->io_qmask.iq & (1ULL << q_no))) return oct->instr_queue[q_no]->max_count; return -1; @@ -1161,8 +1090,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) { - if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) && - (oct->io_qmask.oq & (1UL << q_no))) + if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && + (oct->io_qmask.oq & (1ULL << q_no))) return oct->droq[q_no]->max_count; return -1; } @@ -1253,10 +1182,10 @@ void lio_pci_writeq(struct octeon_device *oct, int octeon_mem_access_ok(struct octeon_device *oct) { u64 access_okay = 0; + u64 lmc0_reset_ctl; /* Check to make sure a DDR interface is enabled */ - u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); - + lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); return access_okay ? 0 : 1; @@ -1270,9 +1199,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) if (!timeout) return ret; - while (*timeout == 0) - schedule_timeout_uninterruptible(HZ / 10); - for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); ms += HZ / 10) { ret = octeon_mem_access_ok(oct); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 36e1f85df..01edfb404 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -152,9 +152,9 @@ struct octeon_mmio { #define MAX_OCTEON_MAPS 32 struct octeon_io_enable { - u32 iq; - u32 oq; - u32 iq64B; + u64 iq; + u64 oq; + u64 iq64B; }; struct octeon_reg_list { @@ -204,8 +204,7 @@ struct octeon_fn_list { void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); void (*bar1_idx_write)(struct octeon_device *, u32, u32); u32 (*bar1_idx_read)(struct octeon_device *, u32); - u32 (*update_iq_read_idx)(struct octeon_device *, - struct octeon_instr_queue *); + u32 (*update_iq_read_idx)(struct octeon_instr_queue *); void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32); void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32); @@ -222,7 +221,7 @@ struct octeon_fn_list { /* Structure for named memory blocks * Number of descriptors - * available can be changed without affecting compatiblity, + * available can be changed without affecting compatibility, * but name length changes require a bump in the bootmem * descriptor version * Note: This structure must be naturally 64 bit aligned, as a single @@ -255,7 +254,7 @@ struct oct_fw_info { struct cavium_wk { struct delayed_work work; void *ctxptr; - size_t ctxul; + u64 ctxul; }; struct cavium_wq { @@ -267,6 +266,8 @@ struct octdev_props { /* Each interface in the Octeon device has a network * device pointer (used for OS specific calls). */ + int napi_enabled; + int gmxport; struct net_device *netdev; }; @@ -324,7 +325,8 @@ struct octeon_device { struct octeon_sc_buffer_pool sc_buf_pool; /** The input instruction queues */ - struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES]; + struct octeon_instr_queue *instr_queue + [MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; /** The doubly-linked list of instruction response */ struct octeon_response_list response_list[MAX_RESPONSE_LISTS]; @@ -332,7 +334,7 @@ struct octeon_device { u32 num_oqs; /** The DROQ output queues */ - struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES]; + struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; struct octeon_io_enable io_qmask; @@ -381,15 +383,29 @@ struct octeon_device { struct cavium_wq dma_comp_wq; - struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES]; + /** Lock for dma response list */ + spinlock_t cmd_resp_wqlock; + u32 cmd_resp_state; + + struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; struct cavium_wk nic_poll_work; struct cavium_wk console_poll_work[MAX_OCTEON_MAPS]; void *priv; + + int rx_pause; + int tx_pause; + + struct oct_link_stats link_stats; /*stastics from firmware*/ + + /* private flags to control driver-specific features through ethtool */ + u32 priv_flags; }; +#define OCT_DRV_ONLINE 1 +#define OCT_DRV_OFFLINE 2 #define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \ (oct->chip_id == OCTEON_CN68XX)) #define CHIP_FIELD(oct, TYPE, field) \ @@ -569,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num); int octeon_console_write(struct octeon_device *oct, u32 console_num, char *buffer, u32 write_request_size, u32 flags); int octeon_console_write_avail(struct octeon_device *oct, u32 console_num); -int octeon_console_read(struct octeon_device *oct, u32 console_num, - char *buffer, u32 buf_size, u32 flags); + int octeon_console_read_avail(struct octeon_device *oct, u32 console_num); /** Removes all attached consoles. */ @@ -646,4 +661,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type); */ struct octeon_config *octeon_get_conf(struct octeon_device *oct); +/* LiquidIO driver pivate flags */ +enum { + OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */ +}; + +static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag, + u32 val) +{ + if (val) + octdev->priv_flags |= (0x1 << flag); + else + octdev->priv_flags &= ~(0x1 << flag); +} #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 174072b37..e0afe4c1f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -19,30 +19,18 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" #include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" - -/* #define CAVIUM_ONLY_PERF_MODE */ #define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2)) #define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2)) @@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, return fn_arg; } -u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, - struct octeon_droq *droq) +/** Check for packets on Droq. This function should be called with + * lock held. + * @param droq - Droq on which count is checked. + * @return Returns packet count. + */ +u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) { u32 pkt_count = 0; @@ -151,22 +143,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, struct octeon_droq *droq) { u32 i; + struct octeon_skb_page_info *pg_info; for (i = 0; i < droq->max_count; i++) { - if (droq->recv_buf_list[i].buffer) { - if (droq->desc_ring) { - lio_unmap_ring_info(oct->pci_dev, - (u64)droq-> - desc_ring[i].info_ptr, - OCT_DROQ_INFO_SIZE); - lio_unmap_ring(oct->pci_dev, - (u64)droq->desc_ring[i]. - buffer_ptr, - droq->buffer_size); - } - recv_buffer_free(droq->recv_buf_list[i].buffer); - droq->recv_buf_list[i].buffer = NULL; - } + pg_info = &droq->recv_buf_list[i].pg_info; + + if (pg_info->dma) + lio_unmap_ring(oct->pci_dev, + (u64)pg_info->dma); + pg_info->dma = 0; + + if (pg_info->page) + recv_buffer_destroy(droq->recv_buf_list[i].buffer, + pg_info); + + if (droq->desc_ring && droq->desc_ring[i].info_ptr) + lio_unmap_ring_info(oct->pci_dev, + (u64)droq-> + desc_ring[i].info_ptr, + OCT_DROQ_INFO_SIZE); + droq->recv_buf_list[i].buffer = NULL; } octeon_droq_reset_indices(droq); @@ -181,25 +177,23 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct, struct octeon_droq_desc *desc_ring = droq->desc_ring; for (i = 0; i < droq->max_count; i++) { - buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size); + buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); if (!buf) { dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", __func__); + droq->stats.rx_alloc_failure++; return -ENOMEM; } droq->recv_buf_list[i].buffer = buf; droq->recv_buf_list[i].data = get_rbd(buf); - droq->info_list[i].length = 0; /* map ring buffers into memory */ desc_ring[i].info_ptr = lio_map_ring_info(droq, i); desc_ring[i].buffer_ptr = - lio_map_ring(oct->pci_dev, - droq->recv_buf_list[i].buffer, - droq->buffer_size); + lio_map_ring(droq->recv_buf_list[i].buffer); } octeon_droq_reset_indices(droq); @@ -242,6 +236,8 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_droq *droq; u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; u32 c_pkts_per_intr = 0, c_refill_threshold = 0; + int orig_node = dev_to_node(&oct->pci_dev->dev); + int numa_node = cpu_to_node(q_no % num_online_cpus()); dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); @@ -261,15 +257,23 @@ int octeon_init_droq(struct octeon_device *oct, struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); - c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + c_refill_threshold = + (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + } else { + return 1; } droq->max_count = c_num_descs; droq->buffer_size = c_buf_size; desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; + set_dev_node(&oct->pci_dev->dev, numa_node); droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, (dma_addr_t *)&droq->desc_ring_dma); + set_dev_node(&oct->pci_dev->dev, orig_node); + if (!droq->desc_ring) + droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, + (dma_addr_t *)&droq->desc_ring_dma); if (!droq->desc_ring) { dev_err(&oct->pci_dev->dev, @@ -283,12 +287,11 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->info_list = - cnnic_alloc_aligned_dma(oct->pci_dev, - (droq->max_count * OCT_DROQ_INFO_SIZE), - &droq->info_alloc_size, - &droq->info_base_addr, - &droq->info_list_dma); - + cnnic_numa_alloc_aligned_dma((droq->max_count * + OCT_DROQ_INFO_SIZE), + &droq->info_alloc_size, + &droq->info_base_addr, + numa_node); if (!droq->info_list) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), @@ -297,7 +300,12 @@ int octeon_init_droq(struct octeon_device *oct, } droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc(droq->max_count * + vmalloc_node(droq->max_count * + OCT_DROQ_RECVBUF_SIZE, + numa_node); + if (!droq->recv_buf_list) + droq->recv_buf_list = (struct octeon_recv_buffer *) + vmalloc(droq->max_count * OCT_DROQ_RECVBUF_SIZE); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); @@ -320,7 +328,7 @@ int octeon_init_droq(struct octeon_device *oct, /* For 56xx Pass1, this function won't be called, so no checks. */ oct->fn_list.setup_oq_regs(oct, q_no); - oct->io_qmask.oq |= (1 << q_no); + oct->io_qmask.oq |= (1ULL << q_no); return 0; @@ -358,6 +366,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info( struct octeon_recv_pkt *recv_pkt; struct octeon_recv_info *recv_info; u32 i, bytes_left; + struct octeon_skb_page_info *pg_info; info = &droq->info_list[idx]; @@ -375,9 +384,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info( bytes_left = (u32)info->length; while (buf_cnt) { - lio_unmap_ring(octeon_dev->pci_dev, - (u64)droq->desc_ring[idx].buffer_ptr, - droq->buffer_size); + { + pg_info = &droq->recv_buf_list[idx].pg_info; + + lio_unmap_ring(octeon_dev->pci_dev, + (u64)pg_info->dma); + pg_info->page = NULL; + pg_info->dma = 0; + } recv_pkt->buffer_size[i] = (bytes_left >= @@ -449,6 +463,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) void *buf = NULL; u8 *data; u32 desc_refilled = 0; + struct octeon_skb_page_info *pg_info; desc_ring = droq->desc_ring; @@ -458,13 +473,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) * the buffer, else allocate. */ if (!droq->recv_buf_list[droq->refill_idx].buffer) { - buf = recv_buffer_alloc(octeon_dev, droq->q_no, - droq->buffer_size); + pg_info = + &droq->recv_buf_list[droq->refill_idx].pg_info; + /* Either recycle the existing pages or go for + * new page alloc + */ + if (pg_info->page) + buf = recv_buffer_reuse(octeon_dev, pg_info); + else + buf = recv_buffer_alloc(octeon_dev, pg_info); /* If a buffer could not be allocated, no point in * continuing */ - if (!buf) + if (!buf) { + droq->stats.rx_alloc_failure++; break; + } droq->recv_buf_list[droq->refill_idx].buffer = buf; data = get_rbd(buf); @@ -476,11 +500,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) droq->recv_buf_list[droq->refill_idx].data = data; desc_ring[droq->refill_idx].buffer_ptr = - lio_map_ring(octeon_dev->pci_dev, - droq->recv_buf_list[droq-> - refill_idx].buffer, - droq->buffer_size); - + lio_map_ring(droq->recv_buf_list[droq-> + refill_idx].buffer); /* Reset any previous values in the length field. */ droq->info_list[droq->refill_idx].length = 0; @@ -539,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct, droq->stats.dropped_nomem++; } } else { - dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n"); + dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", + (unsigned int)rh->r.opcode, + (unsigned int)rh->r.subcode); droq->stats.dropped_nodispatch++; } /* else (dispatch_fn ... */ @@ -586,6 +609,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, for (pkt = 0; pkt < pkt_count; pkt++) { u32 pkt_len = 0; struct sk_buff *nicbuf = NULL; + struct octeon_skb_page_info *pg_info; + void *buf; info = &droq->info_list[droq->read_idx]; octeon_swap_8B_data((u64 *)info, 2); @@ -605,7 +630,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, rh = &info->rh; total_len += (u32)info->length; - if (OPCODE_SLOW_PATH(rh)) { u32 buf_cnt; @@ -614,50 +638,45 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, droq->refill_count += buf_cnt; } else { if (info->length <= droq->buffer_size) { - lio_unmap_ring(oct->pci_dev, - (u64)droq->desc_ring[ - droq->read_idx].buffer_ptr, - droq->buffer_size); pkt_len = (u32)info->length; nicbuf = droq->recv_buf_list[ droq->read_idx].buffer; + pg_info = &droq->recv_buf_list[ + droq->read_idx].pg_info; + if (recv_buffer_recycle(oct, pg_info)) + pg_info->page = NULL; droq->recv_buf_list[droq->read_idx].buffer = NULL; + INCR_INDEX_BY1(droq->read_idx, droq->max_count); - skb_put(nicbuf, pkt_len); droq->refill_count++; } else { - nicbuf = octeon_fast_packet_alloc(oct, droq, - droq->q_no, - (u32) + nicbuf = octeon_fast_packet_alloc((u32) info->length); pkt_len = 0; /* nicbuf allocation can fail. We'll handle it * inside the loop. */ while (pkt_len < info->length) { - int cpy_len; + int cpy_len, idx = droq->read_idx; - cpy_len = ((pkt_len + - droq->buffer_size) > - info->length) ? + cpy_len = ((pkt_len + droq->buffer_size) + > info->length) ? ((u32)info->length - pkt_len) : droq->buffer_size; if (nicbuf) { - lio_unmap_ring(oct->pci_dev, - (u64) - droq->desc_ring - [droq->read_idx]. - buffer_ptr, - droq-> - buffer_size); octeon_fast_packet_next(droq, nicbuf, cpy_len, - droq-> - read_idx - ); + idx); + buf = droq->recv_buf_list[idx]. + buffer; + recv_buffer_fast_free(buf); + droq->recv_buf_list[idx].buffer + = NULL; + } else { + droq->stats.rx_alloc_failure++; } pkt_len += cpy_len; @@ -668,12 +687,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, } if (nicbuf) { - if (droq->ops.fptr) + if (droq->ops.fptr) { droq->ops.fptr(oct->octeon_id, - nicbuf, pkt_len, - rh, &droq->napi); - else + nicbuf, pkt_len, + rh, &droq->napi, + droq->ops.farg); + } else { recv_buffer_free(nicbuf); + } } } @@ -681,16 +702,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, int desc_refilled = octeon_droq_refill(oct, droq); /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ + * that when we update the credits the data in memory + * is accurate. + */ wmb(); writel((desc_refilled), droq->pkts_credit_reg); /* make sure mmio write completes */ mmiowb(); } - } /* for ( each packet )... */ + } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; @@ -721,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct, if (pkt_count > budget) pkt_count = budget; - /* Grab the lock */ + /* Grab the droq lock */ spin_lock(&droq->lock); pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); @@ -783,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, total_pkts_processed += pkts_processed; - octeon_droq_check_hw_for_pkts(oct, droq); + octeon_droq_check_hw_for_pkts(droq); } spin_unlock(&droq->lock); @@ -807,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, u32 arg) { struct octeon_droq *droq; - struct octeon_config *oct_cfg = NULL; - - oct_cfg = octeon_get_conf(oct); - - if (!oct_cfg) - return -EINVAL; - - if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { - dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", - __func__, q_no, (oct->num_oqs - 1)); - return -EINVAL; - } droq = oct->droq[q_no]; @@ -937,6 +946,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) spin_lock_irqsave(&droq->lock, flags); droq->ops.fptr = NULL; + droq->ops.farg = NULL; droq->ops.drop_on_max = 0; spin_unlock_irqrestore(&droq->lock, flags); @@ -949,6 +959,7 @@ int octeon_create_droq(struct octeon_device *oct, u32 desc_size, void *app_ctx) { struct octeon_droq *droq; + int numa_node = cpu_to_node(q_no % num_online_cpus()); if (oct->droq[q_no]) { dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", @@ -957,7 +968,9 @@ int octeon_create_droq(struct octeon_device *oct, } /* Allocate the DS for the new droq. */ - droq = vmalloc(sizeof(*droq)); + droq = vmalloc_node(sizeof(*droq), numa_node); + if (!droq) + droq = vmalloc(sizeof(*droq)); if (!droq) goto create_droq_fail; memset(droq, 0, sizeof(struct octeon_droq)); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index 7940ccee1..5a6fb9113 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -65,6 +65,17 @@ struct octeon_droq_info { #define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) +struct octeon_skb_page_info { + /* DMA address for the page */ + dma_addr_t dma; + + /* Page for the rx dma **/ + struct page *page; + + /** which offset into page */ + unsigned int page_offset; +}; + /** Pointer to data buffer. * Driver keeps a pointer to the data buffer that it made available to * the Octeon device. Since the descriptor ring keeps physical (bus) @@ -77,6 +88,9 @@ struct octeon_recv_buffer { /** Data in the packet buffer. */ u8 *data; + + /** pg_info **/ + struct octeon_skb_page_info pg_info; }; #define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) @@ -106,6 +120,13 @@ struct oct_droq_stats { /** Num of Packets dropped due to receive path failures. */ u64 rx_dropped; + + /** Num of vxlan packets received; */ + u64 rx_vxlan; + + /** Num of failures of recv_buffer_alloc() */ + u64 rx_alloc_failure; + }; #define POLL_EVENT_INTR_ARRIVED 1 @@ -213,7 +234,8 @@ struct octeon_droq_ops { * data in the buffer. The receive header gives the port * number to the caller. Function pointer is set by caller. */ - void (*fptr)(u32, void *, u32, union octeon_rh *, void *); + void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *); + void *farg; /* This function will be called by the driver for all NAPI related * events. The first param is the octeon id. The second param is the @@ -394,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct, u16 subcode, octeon_dispatch_fn_t fn, void *fn_arg); -/** Remove registration for an opcode/subcode. This will delete the mapping for - * an opcode/subcode. The dispatch function will be unregistered and will no - * longer be called if a packet with the opcode/subcode arrives in the driver - * output queues. - * @param oct - the octeon device to unregister from. - * @param opcode - the opcode to be unregistered. - * @param subcode - the subcode to be unregistered. - * - * @return Success: 0; Failure: 1 - */ -int octeon_unregister_dispatch_fn(struct octeon_device *oct, - u16 opcode, - u16 subcode); - void octeon_droq_print_stats(void); -u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, - struct octeon_droq *droq); +u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq); int octeon_create_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 592fe49b5..ff4b1d6f0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -65,6 +65,11 @@ struct oct_iq_stats { u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ + u64 tx_gso; /* count of tso */ + u64 tx_vxlan; /* tunnel */ + u64 tx_dmamap_fail; + u64 tx_restart; + /*u64 tx_timeout_count;*/ }; #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) @@ -75,18 +80,26 @@ struct oct_iq_stats { * a Octeon device has one such structure to represent it. */ struct octeon_instr_queue { + struct octeon_device *oct_dev; + /** A spinlock to protect access to the input ring. */ spinlock_t lock; + /** A spinlock to protect while posting on the ring. */ + spinlock_t post_lock; + + /** A spinlock to protect access to the input ring.*/ + spinlock_t iq_flush_running_lock; + /** Flag that indicates if the queue uses 64 byte commands. */ u32 iqcmd_64B:1; - /** Queue Number. */ - u32 iq_no:5; + /** Queue info. */ + union oct_txpciq txpciq; u32 rsvd:17; - /* Controls the periodic flushing of iq */ + /* Controls whether extra flushing of IQ is done on Tx */ u32 do_auto_flush:1; u32 status:8; @@ -147,6 +160,13 @@ struct octeon_instr_queue { /** Application context */ void *app_ctx; + + /* network stack queue index */ + int q_index; + + /*os ifidx associated with this queue */ + int ifidx; + }; /*---------------------- INSTRUCTION FORMAT ----------------------------*/ @@ -176,12 +196,12 @@ struct octeon_instr_32B { /** 64-byte instruction format. * Format of instruction for a 64-byte mode input queue. */ -struct octeon_instr_64B { +struct octeon_instr2_64B { /** Pointer where the input data is available. */ u64 dptr; /** Instruction Header. */ - u64 ih; + u64 ih2; /** Input Request Header. */ u64 irh; @@ -198,14 +218,44 @@ struct octeon_instr_64B { u64 rptr; u64 reserved; +}; + +struct octeon_instr3_64B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih3; + + /** Instruction Header. */ + u64 pki_ih3; + + /** Input Request Header. */ + u64 irh; + /** opcode/subcode specific parameters */ + u64 ossp[2]; + + /** Return Data Parameters */ + u64 rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + +}; + +union octeon_instr_64B { + struct octeon_instr2_64B cmd2; + struct octeon_instr3_64B cmd3; }; -#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B)) +#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B)) /** The size of each buffer in soft command buffer pool */ -#define SOFT_COMMAND_BUFFER_SIZE 1024 +#define SOFT_COMMAND_BUFFER_SIZE 1536 struct octeon_soft_command { /** Soft command buffer info. */ @@ -214,7 +264,8 @@ struct octeon_soft_command { u32 size; /** Command and return status */ - struct octeon_instr_64B cmd; + union octeon_instr_64B cmd; + #define COMPLETION_WORD_INIT 0xffffffffffffffffULL u64 *status_word; @@ -242,7 +293,7 @@ struct octeon_soft_command { /** Maximum number of buffers to allocate into soft command buffer pool */ -#define MAX_SOFT_COMMAND_BUFFERS 16 +#define MAX_SOFT_COMMAND_BUFFERS 256 /** Head of a soft command buffer pool. */ @@ -268,14 +319,15 @@ void octeon_free_soft_command(struct octeon_device *oct, /** * octeon_init_instr_queue() * @param octeon_dev - pointer to the octeon device structure. - * @param iq_no - queue to be initialized (0 <= q_no <= 3). + * @param txpciq - queue to be initialized (0 <= q_no <= 3). * * Called at driver init time for each input queue. iq_conf has the * configuration parameters for the queue. * * @return Success: 0 Failure: 1 */ -int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no, +int octeon_init_instr_queue(struct octeon_device *octeon_dev, + union oct_txpciq txpciq, u32 num_descs); /** @@ -298,7 +350,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, int lio_process_iq_request_list(struct octeon_device *oct, - struct octeon_instr_queue *iq); + struct octeon_instr_queue *iq, u32 napi_budget); int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, @@ -313,7 +365,10 @@ void octeon_prepare_soft_command(struct octeon_device *oct, int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc); -int octeon_setup_iq(struct octeon_device *oct, u32 iq_no, - u32 num_descs, void *app_ctx); - +int octeon_setup_iq(struct octeon_device *oct, int ifidx, + int q_index, union oct_txpciq iq_no, u32 num_descs, + void *app_ctx); +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh, u32 napi_budget); #endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index cbd081981..bc14e4c27 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, } static inline void * -cnnic_alloc_aligned_dma(struct pci_dev *pci_dev, - u32 size, - u32 *alloc_size, - size_t *orig_ptr, - size_t *dma_addr __attribute__((unused))) +cnnic_numa_alloc_aligned_dma(u32 size, + u32 *alloc_size, + size_t *orig_ptr, + int numa_node) { int retries = 0; void *ptr = NULL; #define OCTEON_MAX_ALLOC_RETRIES 1 do { - ptr = - (void *)__get_free_pages(GFP_KERNEL, - get_order(size)); + struct page *page = NULL; + + page = alloc_pages_node(numa_node, + GFP_KERNEL, + get_order(size)); + if (!page) + page = alloc_pages(GFP_KERNEL, + get_order(size)); + ptr = (void *)page_address(page); if ((unsigned long)ptr & 0x07) { - free_pages((unsigned long)ptr, get_order(size)); + __free_pages(page, get_order(size)); ptr = NULL; /* Increment the size required if the first * attempt failed. @@ -169,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition) init_waitqueue_entry(&we, current); add_wait_queue(wait_queue, &we); - while (!(ACCESS_ONCE(*condition))) { + while (!(READ_ONCE(*condition))) { set_current_state(TASK_INTERRUPTIBLE); if (signal_pending(current)) goto out; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c index 5aecef870..95a4bbedf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -19,43 +19,29 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" -#include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" #define MEMOPS_IDX MAX_BAR1_MAP_INDEX +#ifdef __BIG_ENDIAN_BITFIELD static inline void -octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)), - u32 idx __attribute__((unused))) +octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx) { -#ifdef __BIG_ENDIAN_BITFIELD u32 mask; mask = oct->fn_list.bar1_idx_read(oct, idx); mask = (mask & 0x2) ? (mask & ~2) : (mask | 2); oct->fn_list.bar1_idx_write(oct, idx, mask); -#endif } +#else +#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct) +#endif static void octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index b3abe5818..fb820dc7f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -30,6 +30,20 @@ #include <linux/dma-mapping.h> #include <linux/ptp_clock_kernel.h> +#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) +#define LIO_MIN_MTU_SIZE 68 + +struct oct_nic_stats_resp { + u64 rh; + struct oct_link_stats stats; + u64 status; +}; + +struct oct_nic_stats_ctrl { + struct completion complete; + struct net_device *netdev; +}; + /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -48,11 +62,11 @@ struct lio { */ int rxq; - /** Guards the glist */ - spinlock_t lock; + /** Guards each glist */ + spinlock_t *glist_lock; - /** Linked list of gather components */ - struct list_head glist; + /** Array of gather component linked lists */ + struct list_head *glist; /** Pointer to the NIC properties for the Octeon device this network * interface is associated with. @@ -67,6 +81,9 @@ struct lio { /** Link information sent by the core application for this interface. */ struct oct_link_info linfo; + /** counter of link changes */ + u64 link_changes; + /** Size of Tx queue for this octeon device. */ u32 tx_qsize; @@ -82,6 +99,12 @@ struct lio { /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ u64 dev_capability; + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device for Kernel + * 3.10.0 onwards + */ + u64 enc_dev_capability; + /** Copy of beacaon reg in phy */ u32 phy_beacon_val; @@ -101,7 +124,6 @@ struct lio { /* work queue for txq status */ struct cavium_wq txq_status_wq; - }; #define LIO_SIZE (sizeof(struct lio)) @@ -111,8 +133,9 @@ struct lio { * \brief Enable or disable feature * @param netdev pointer to network device * @param cmd Command that just requires acknowledgment + * @param param1 Parameter to command */ -int liquidio_set_feature(struct net_device *netdev, int cmd); +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); /** * \brief Link control command completion callback @@ -131,14 +154,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); */ void liquidio_set_ethtool_ops(struct net_device *netdev); -static inline void -*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)), - u32 q_no __attribute__((unused)), u32 size) -{ #define SKB_ADJ_MASK 0x3F #define SKB_ADJ (SKB_ADJ_MASK + 1) - struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ); +#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */ +#define LIO_RXBUFFER_SZ 2048 + +static inline void +*recv_buffer_alloc(struct octeon_device *oct, + struct octeon_skb_page_info *pg_info) +{ + struct page *page; + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + page = alloc_page(GFP_ATOMIC | __GFP_COLD); + if (unlikely(!page)) + return NULL; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + __free_page(page); + pg_info->page = NULL; + return NULL; + } if ((unsigned long)skb->data & SKB_ADJ_MASK) { u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); @@ -146,11 +185,151 @@ static inline void skb_reserve(skb, r); } + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + /* Get DMA info */ + pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* Mapping failed!! */ + if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { + __free_page(page); + dev_kfree_skb_any((struct sk_buff *)skb); + pg_info->page = NULL; + return NULL; + } + + pg_info->page = page; + pg_info->page_offset = 0; + skb_pg_info->page = page; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = pg_info->dma; + return (void *)skb; } +static inline void +*recv_buffer_fast_alloc(u32 size) +{ + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + skb = dev_alloc_skb(size + SKB_ADJ); + if (unlikely(!skb)) + return NULL; + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = NULL; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = 0; + + return skb; +} + +static inline int +recv_buffer_recycle(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf; + + if (!pg_info->page) { + dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", + __func__); + return -ENOMEM; + } + + if (unlikely(page_count(pg_info->page) != 1) || + unlikely(page_to_nid(pg_info->page) != numa_node_id())) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + return -ENOMEM; + } + + /* Flip to other half of the buffer */ + if (pg_info->page_offset == 0) + pg_info->page_offset = LIO_RXBUFFER_SZ; + else + pg_info->page_offset = 0; + page_ref_inc(pg_info->page); + + return 0; +} + +static inline void +*recv_buffer_reuse(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf, *skb_pg_info; + struct sk_buff *skb; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + return NULL; + } + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = pg_info->page; + skb_pg_info->page_offset = pg_info->page_offset; + skb_pg_info->dma = pg_info->dma; + + return skb; +} + +static inline void +recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + + if (skb) + dev_kfree_skb_any(skb); +} + static inline void recv_buffer_free(void *buffer) { + struct sk_buff *skb = (struct sk_buff *)buffer; + struct octeon_skb_page_info *pg_info; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + + if (pg_info->page) { + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + } + + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void +recv_buffer_fast_free(void *buffer) +{ + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void tx_buffer_free(void *buffer) +{ dev_kfree_skb_any((struct sk_buff *)buffer); } @@ -159,7 +338,17 @@ static inline void recv_buffer_free(void *buffer) #define lio_dma_free(oct, size, virt_addr, dma_addr) \ dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr) -#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data) +static inline +void *get_rbd(struct sk_buff *skb) +{ + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + va = page_address(pg_info->page) + pg_info->page_offset; + + return va; +} static inline u64 lio_map_ring_info(struct octeon_droq *droq, u32 i) @@ -170,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i) dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i], OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE); - BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); + WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); return (u64)dma_addr; } @@ -183,33 +372,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev, } static inline u64 -lio_map_ring(struct pci_dev *pci_dev, - void *buf, u32 size) +lio_map_ring(void *buf) { dma_addr_t dma_addr; - dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size, - DMA_FROM_DEVICE); + struct sk_buff *skb = (struct sk_buff *)buf; + struct octeon_skb_page_info *pg_info; - BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr)); + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (!pg_info->page) { + pr_err("%s: pg_info->page NULL\n", __func__); + WARN_ON(1); + } + + /* Get DMA info */ + dma_addr = pg_info->dma; + if (!pg_info->dma) { + pr_err("%s: ERROR it should be already available\n", + __func__); + WARN_ON(1); + } + dma_addr += pg_info->page_offset; return (u64)dma_addr; } static inline void lio_unmap_ring(struct pci_dev *pci_dev, - u64 buf_ptr, u32 size) + u64 buf_ptr) + { - dma_unmap_single(&pci_dev->dev, - buf_ptr, size, - DMA_FROM_DEVICE); + dma_unmap_page(&pci_dev->dev, + buf_ptr, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); } -static inline void *octeon_fast_packet_alloc(struct octeon_device *oct, - struct octeon_droq *droq, - u32 q_no, u32 size) +static inline void *octeon_fast_packet_alloc(u32 size) { - return recv_buffer_alloc(oct, q_no, size); + return recv_buffer_fast_alloc(size); } static inline void octeon_fast_packet_next(struct octeon_droq *droq, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 1a0191549..166727be9 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -19,14 +19,9 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> #include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -34,21 +29,14 @@ #include "octeon_device.h" #include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" -#include "octeon_mem_ops.h" void * octeon_alloc_soft_command_resp(struct octeon_device *oct, - struct octeon_instr_64B *cmd, - size_t rdatasize) + union octeon_instr_64B *cmd, + u32 rdatasize) { struct octeon_soft_command *sc; - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; @@ -59,24 +47,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, return NULL; /* Copy existing command structure into the soft command */ - memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B)); + memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B)); /* Add in the response related fields. Opcode and Param are already * there. */ - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; + ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ - irh = (struct octeon_instr_irh *)&sc->cmd.irh; irh->rflag = 1; /* a response is required */ - irh->len = 4; /* means four 64-bit words immediately follow irh */ - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = rdatasize; *sc->status_word = COMPLETION_WORD_INIT; + sc->cmd.cmd2.rptr = sc->dmarptr; + sc->wait_time = 1000; sc->timeout = jiffies + sc->wait_time; @@ -119,12 +108,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct, static inline struct octeon_soft_command *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams) + struct octnic_ctrl_pkt *nctrl) { struct octeon_soft_command *sc = NULL; u8 *data; - size_t rdatasize; + u32 rdatasize; u32 uddsize = 0, datasize = 0; uddsize = (u32)(nctrl->ncmd.s.more * 8); @@ -143,7 +131,7 @@ static inline struct octeon_soft_command data = (u8 *)sc->virtdptr; - memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); + memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3)); @@ -152,6 +140,8 @@ static inline struct octeon_soft_command memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize); } + sc->iq_no = (u32)nctrl->iq_no; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); @@ -164,26 +154,41 @@ static inline struct octeon_soft_command int octnet_send_nic_ctrl_pkt(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams) + struct octnic_ctrl_pkt *nctrl) { int retval; struct octeon_soft_command *sc = NULL; - sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams); + spin_lock_bh(&oct->cmd_resp_wqlock); + /* Allow only rx ctrl command to stop traffic on the chip + * during offline operations + */ + if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) && + (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) { + spin_unlock_bh(&oct->cmd_resp_wqlock); + dev_err(&oct->pci_dev->dev, + "%s cmd:%d not processed since driver offline\n", + __func__, nctrl->ncmd.s.cmd); + return -1; + } + + sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl); if (!sc) { dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", __func__); + spin_unlock_bh(&oct->cmd_resp_wqlock); return -1; } retval = octeon_send_soft_command(oct, sc); - if (retval) { + if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct, sc); - dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n", - __func__, retval); + dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n", + __func__, nctrl->ncmd.s.cmd, retval); + spin_unlock_bh(&oct->cmd_resp_wqlock); return -1; } + spin_unlock_bh(&oct->cmd_resp_wqlock); return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index 0238857c8..b71a2bbe4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -52,6 +52,9 @@ struct octnic_ctrl_pkt { /** Additional data that may be needed by some commands. */ u64 udd[MAX_NCTRL_UDD]; + /** Input queue to use to send this command. */ + u64 iq_no; + /** Time to wait for Octeon software to respond to this control command. * If wait_time is 0, OSI assumes no response is expected. */ @@ -82,7 +85,7 @@ struct octnic_data_pkt { u32 datasize; /** Command to be passed to the Octeon device software. */ - struct octeon_instr_64B cmd; + union octeon_instr_64B cmd; /** Input queue to use to send this command. */ u32 q_no; @@ -94,15 +97,14 @@ struct octnic_data_pkt { */ union octnic_cmd_setup { struct { - u32 ifidx:8; - u32 cksum_offset:7; + u32 iq_no:8; u32 gather:1; u32 timestamp:1; - u32 ipv4opts_ipv6exthdr:2; u32 ip_csum:1; + u32 transport_csum:1; u32 tnl_csum:1; + u32 rsvd:19; - u32 rsvd:11; union { u32 datasize; u32 gatherptrs; @@ -113,79 +115,146 @@ union octnic_cmd_setup { }; -struct octnic_ctrl_params { - u32 resp_order; -}; - static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) { return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) >= (oct->instr_queue[q_no]->max_count - 2)); } -/** Utility function to prepare a 64B NIC instruction based on a setup command - * @param cmd - pointer to instruction to be filled in. - * @param setup - pointer to the setup structure - * @param q_no - which queue for back pressure - * - * Assumes the cmd instruction is pre-allocated, but no fields are filled in. - */ static inline void -octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, - union octnic_cmd_setup *setup, u32 tag) +octnet_prepare_pci_cmd_o2(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) { - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; union octnic_packet_params packet_params; + int port; - memset(cmd, 0, sizeof(struct octeon_instr_64B)); + memset(cmd, 0, sizeof(union octeon_instr_64B)); - ih = (struct octeon_instr_ih *)&cmd->ih; + ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2; /* assume that rflag is cleared so therefore front data will only have - * irh and ossp[1] and ossp[2] for a total of 24 bytes + * irh and ossp[0], ossp[1] for a total of 32 bytes */ - ih->fsz = 24; + ih2->fsz = 24; + + ih2->tagtype = ORDERED_TAG; + ih2->grp = DEFAULT_POW_GRP; - ih->tagtype = ORDERED_TAG; - ih->grp = DEFAULT_POW_GRP; + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; if (tag) - ih->tag = tag; + ih2->tag = tag; else - ih->tag = LIO_DATA(setup->s.ifidx); + ih2->tag = LIO_DATA(port); - ih->raw = 1; - ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */ + ih2->raw = 1; + ih2->qos = (port & 3) + 4; /* map qos based on interface */ if (!setup->s.gather) { - ih->dlengsz = setup->s.u.datasize; + ih2->dlengsz = setup->s.u.datasize; } else { - ih->gather = 1; - ih->dlengsz = setup->s.u.gatherptrs; + ih2->gather = 1; + ih2->dlengsz = setup->s.u.gatherptrs; } - irh = (struct octeon_instr_irh *)&cmd->irh; + irh = (struct octeon_instr_irh *)&cmd->cmd2.irh; irh->opcode = OPCODE_NIC; irh->subcode = OPCODE_NIC_NW_DATA; packet_params.u32 = 0; - if (setup->s.cksum_offset) { - packet_params.s.csoffset = setup->s.cksum_offset; - packet_params.s.ipv4opts_ipv6exthdr = - setup->s.ipv4opts_ipv6exthdr; + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + +static inline void +octnet_prepare_pci_cmd_o3(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_irh *irh; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_pki_ih3 *pki_ih3; + union octnic_packet_params packet_params; + int port; + + memset(cmd, 0, sizeof(union octeon_instr_64B)); + + ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3; + pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[1] and ossp[2] for a total of 24 bytes + */ + ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind; + /*PKI IH*/ + ih3->fsz = 24 + 8; + + if (!setup->s.gather) { + ih3->dlengsz = setup->s.u.datasize; + } else { + ih3->gather = 1; + ih3->dlengsz = setup->s.u.gatherptrs; } + pki_ih3->w = 1; + pki_ih3->raw = 1; + pki_ih3->utag = 1; + pki_ih3->utt = 1; + pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg; + + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + pki_ih3->tag = tag; + else + pki_ih3->tag = LIO_DATA(port); + + pki_ih3->tagtype = ORDERED_TAG; + pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg; + pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/ + pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/ + + irh = (struct octeon_instr_irh *)&cmd->cmd3.irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; packet_params.s.tnl_csum = setup->s.tnl_csum; - packet_params.s.ifidx = setup->s.ifidx; packet_params.s.tsflag = setup->s.timestamp; irh->ossp = packet_params.u32; } +/** Utility function to prepare a 64B NIC instruction based on a setup command + * @param cmd - pointer to instruction to be filled in. + * @param setup - pointer to the setup structure + * @param q_no - which queue for back pressure + * + * Assumes the cmd instruction is pre-allocated, but no fields are filled in. + */ +static inline void +octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + if (OCTEON_CN6XXX(oct)) + octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag); + else + octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag); +} + /** Allocate and a soft command with space for a response immediately following * the commnad. * @param oct - octeon device pointer @@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, */ void * octeon_alloc_soft_command_resp(struct octeon_device *oct, - struct octeon_instr_64B *cmd, - size_t rdatasize); + union octeon_instr_64B *cmd, + u32 rdatasize); /** Send a NIC data packet to the device * @param oct - octeon device pointer @@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct, /** Send a NIC control packet to the device * @param oct - octeon device pointer * @param nctrl - control structure with command, timout, and callback info - * @param nparams - response control structure - * * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the * queue should be stopped, and IQ_SEND_OK if it sent okay. */ int octnet_send_nic_ctrl_pkt(struct octeon_device *oct, - struct octnic_ctrl_pkt *nctrl, - struct octnic_ctrl_params nparams); + struct octnic_ctrl_pkt *nctrl); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index a2a24652c..d32492f18 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -19,28 +19,17 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" #include "octeon_network.h" -#include "cn66xx_regs.h" #include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) @@ -51,7 +40,7 @@ struct iq_post_status { }; static void check_db_timeout(struct work_struct *work); -static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no); +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); @@ -69,12 +58,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) /* Return 0 on success, 1 on failure */ int octeon_init_instr_queue(struct octeon_device *oct, - u32 iq_no, u32 num_descs) + union oct_txpciq txpciq, + u32 num_descs) { struct octeon_instr_queue *iq; struct octeon_iq_config *conf = NULL; + u32 iq_no = (u32)txpciq.s.q_no; u32 q_size; struct cavium_wq *db_wq; + int orig_node = dev_to_node(&oct->pci_dev->dev); + int numa_node = cpu_to_node(iq_no % num_online_cpus()); if (OCTEON_CN6XXX(oct)) conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); @@ -95,9 +88,15 @@ int octeon_init_instr_queue(struct octeon_device *oct, q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; + iq->oct_dev = oct; + set_dev_node(&oct->pci_dev->dev, numa_node); iq->base_addr = lio_dma_alloc(oct, q_size, (dma_addr_t *)&iq->base_addr_dma); + set_dev_node(&oct->pci_dev->dev, orig_node); + if (!iq->base_addr) + iq->base_addr = lio_dma_alloc(oct, q_size, + (dma_addr_t *)&iq->base_addr_dma); if (!iq->base_addr) { dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", iq_no); @@ -109,7 +108,11 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize a list to holds requests that have been posted to Octeon * but has yet to be fetched by octeon */ - iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); + iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), + numa_node); + if (!iq->request_list) + iq->request_list = vmalloc(sizeof(*iq->request_list) * + num_descs); if (!iq->request_list) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", @@ -122,7 +125,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); - iq->iq_no = iq_no; + iq->txpciq.u64 = txpciq.u64; iq->fill_threshold = (u32)conf->db_min; iq->fill_cnt = 0; iq->host_write_index = 0; @@ -135,8 +138,11 @@ int octeon_init_instr_queue(struct octeon_device *oct, /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); + spin_lock_init(&iq->post_lock); - oct->io_qmask.iq |= (1 << iq_no); + spin_lock_init(&iq->iq_flush_running_lock); + + oct->io_qmask.iq |= (1ULL << iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); @@ -144,7 +150,9 @@ int octeon_init_instr_queue(struct octeon_device *oct, oct->fn_list.setup_iq_regs(oct, iq_no); - oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); + oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", + WQ_MEM_RECLAIM, + 0); if (!oct->check_db_wq[iq_no].wq) { lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", @@ -168,7 +176,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); - flush_workqueue(oct->check_db_wq[iq_no].wq); destroy_workqueue(oct->check_db_wq[iq_no].wq); if (OCTEON_CN6XXX(oct)) @@ -188,26 +195,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) /* Return 0 on success, 1 on failure */ int octeon_setup_iq(struct octeon_device *oct, - u32 iq_no, + int ifidx, + int q_index, + union oct_txpciq txpciq, u32 num_descs, void *app_ctx) { + u32 iq_no = (u32)txpciq.s.q_no; + int numa_node = cpu_to_node(iq_no % num_online_cpus()); + if (oct->instr_queue[iq_no]) { dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", iq_no); + oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64; oct->instr_queue[iq_no]->app_ctx = app_ctx; return 0; } oct->instr_queue[iq_no] = - vmalloc(sizeof(struct octeon_instr_queue)); + vmalloc_node(sizeof(struct octeon_instr_queue), numa_node); + if (!oct->instr_queue[iq_no]) + oct->instr_queue[iq_no] = + vmalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[iq_no]) return 1; memset(oct->instr_queue[iq_no], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[iq_no]->q_index = q_index; oct->instr_queue[iq_no]->app_ctx = app_ctx; - if (octeon_init_instr_queue(oct, iq_no, num_descs)) { + oct->instr_queue[iq_no]->ifidx = ifidx; + + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { vfree(oct->instr_queue[iq_no]); oct->instr_queue[iq_no] = NULL; return 1; @@ -226,8 +245,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct) instr_cnt = 0; /*for (i = 0; i < oct->num_iqs; i++) {*/ - for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { - if (!(oct->io_qmask.iq & (1UL << i))) + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & (1ULL << i))) continue; pending = atomic_read(&oct-> @@ -271,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, memcpy(iqptr, cmd, cmdsize); } -static inline int -__post_command(struct octeon_device *octeon_dev __attribute__((unused)), - struct octeon_instr_queue *iq, - u32 force_db __attribute__((unused)), u8 *cmd) -{ - u32 index = -1; - - /* This ensures that the read index does not wrap around to the same - * position if queue gets full before Octeon could fetch any instr. - */ - if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) - return -1; - - __copy_cmd_into_iq(iq, cmd); - - /* "index" is returned, host_write_index is modified. */ - index = iq->host_write_index; - INCR_INDEX_BY1(iq->host_write_index, iq->max_count); - iq->fill_cnt++; - - /* Flush the command into memory. We need to be sure the data is in - * memory before indicating that the instruction is pending. - */ - wmb(); - - atomic_inc(&iq->instr_pending); - - return index; -} - static inline struct iq_post_status -__post_command2(struct octeon_device *octeon_dev __attribute__((unused)), - struct octeon_instr_queue *iq, - u32 force_db __attribute__((unused)), u8 *cmd) +__post_command2(struct octeon_instr_queue *iq, u8 *cmd) { struct iq_post_status st; @@ -362,17 +349,19 @@ __add_to_request_list(struct octeon_instr_queue *iq, iq->request_list[idx].reqtype = reqtype; } +/* Can only run in process context */ int lio_process_iq_request_list(struct octeon_device *oct, - struct octeon_instr_queue *iq) + struct octeon_instr_queue *iq, u32 napi_budget) { int reqtype; void *buf; u32 old = iq->flush_index; u32 inst_count = 0; - unsigned pkts_compl = 0, bytes_compl = 0; + unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; struct octeon_instr_irh *irh; + unsigned long flags; while (old != iq->octeon_read_index) { reqtype = iq->request_list[old].reqtype; @@ -394,7 +383,7 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_SOFT_COMMAND: sc = buf; - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; if (irh->rflag) { /* We're expecting a response from Octeon. * It's up to lio_process_ordered_list() to @@ -402,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct, * command response list because we expect * a response from Octeon. */ - spin_lock_bh(&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock); + spin_lock_irqsave + (&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); atomic_inc(&oct->response_list [OCTEON_ORDERED_SC_LIST]. pending_req_count); list_add_tail(&sc->node, &oct->response_list [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_bh(&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock); + spin_unlock_irqrestore + (&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); } else { if (sc->callback) { + /* This callback must not sleep */ sc->callback(oct, OCTEON_REQUEST_DONE, sc->callback_arg); } @@ -430,6 +424,9 @@ lio_process_iq_request_list(struct octeon_device *oct, skip_this: inst_count++; INCR_INDEX_BY1(old, iq->max_count); + + if ((napi_budget) && (inst_count >= napi_budget)) + break; } if (bytes_compl) octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, @@ -439,38 +436,63 @@ lio_process_iq_request_list(struct octeon_device *oct, return inst_count; } -static inline void -update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq) +/* Can only be called from process context */ +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh, u32 napi_budget) { u32 inst_processed = 0; + u32 tot_inst_processed = 0; + int tx_done = 1; - /* Calculate how many commands Octeon has read and move the read index - * accordingly. - */ - iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq); + if (!spin_trylock(&iq->iq_flush_running_lock)) + return tx_done; - /* Move the NORESPONSE requests to the per-device completion list. */ - if (iq->flush_index != iq->octeon_read_index) - inst_processed = lio_process_iq_request_list(oct, iq); + spin_lock_bh(&iq->lock); - if (inst_processed) { - atomic_sub(inst_processed, &iq->instr_pending); - iq->stats.instr_processed += inst_processed; - } -} + iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); -static void -octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, - u32 pending_thresh) -{ if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { - spin_lock_bh(&iq->lock); - update_iq_indices(oct, iq); - spin_unlock_bh(&iq->lock); + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; + + if (napi_budget) + inst_processed = lio_process_iq_request_list + (oct, iq, + napi_budget - tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } + + tot_inst_processed += inst_processed; + inst_processed = 0; + + } while (tot_inst_processed < napi_budget); + + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; } + + iq->last_db_time = jiffies; + + spin_unlock_bh(&iq->lock); + + spin_unlock(&iq->iq_flush_running_lock); + + return tx_done; } -static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) +/* Process instruction queue after timeout. + * This routine gets called from a workqueue or when removing the module. + */ +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) { struct octeon_instr_queue *iq; u64 next_time; @@ -481,24 +503,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) if (!iq) return; + /* return immediately, if no work pending */ + if (!atomic_read(&iq->instr_pending)) + return; /* If jiffies - last_db_time < db_timeout do nothing */ next_time = iq->last_db_time + iq->db_timeout; if (!time_after(jiffies, (unsigned long)next_time)) return; iq->last_db_time = jiffies; - /* Get the lock and prevent tasklets. This routine gets called from - * the poll thread. Instructions can now be posted in tasklet context - */ - spin_lock_bh(&iq->lock); - if (iq->fill_cnt != 0) - ring_doorbell(oct, iq); - - spin_unlock_bh(&iq->lock); - /* Flush the instruction queue */ - if (iq->do_auto_flush) - octeon_flush_iq(oct, iq, 1); + octeon_flush_iq(oct, iq, 1, 0); } /* Called by the Poll thread at regular intervals to check the instruction @@ -508,11 +523,12 @@ static void check_db_timeout(struct work_struct *work) { struct cavium_wk *wk = (struct cavium_wk *)work; struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; - unsigned long iq_no = wk->ctxul; + u64 iq_no = wk->ctxul; struct cavium_wq *db_wq = &oct->check_db_wq[iq_no]; + u32 delay = 10; __check_db_timeout(oct, iq_no); - queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); + queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay)); } int @@ -523,9 +539,12 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, struct iq_post_status st; struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; - spin_lock_bh(&iq->lock); + /* Get the lock and prevent other tasks and tx interrupt handler from + * running. + */ + spin_lock_bh(&iq->post_lock); - st = __post_command2(oct, iq, force_db, cmd); + st = __post_command2(iq, cmd); if (st.status != IQ_SEND_FAILED) { octeon_report_sent_bytes_to_bql(buf, reqtype); @@ -533,16 +552,19 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); - if (iq->fill_cnt >= iq->fill_threshold || force_db) + if (force_db) ring_doorbell(oct, iq); } else { INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } - spin_unlock_bh(&iq->lock); + spin_unlock_bh(&iq->post_lock); - if (iq->do_auto_flush) - octeon_flush_iq(oct, iq, 2); + /* This is only done here to expedite packets being flushed + * for cases where there are no IQ completion interrupts. + */ + /*if (iq->do_auto_flush)*/ + /* octeon_flush_iq(oct, iq, 2, 0);*/ return st.status; } @@ -557,82 +579,75 @@ octeon_prepare_soft_command(struct octeon_device *oct, u64 ossp1) { struct octeon_config *oct_cfg; - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; - BUG_ON(opcode > 15); - BUG_ON(subcode > 127); + WARN_ON(opcode > 15); + WARN_ON(subcode > 127); oct_cfg = octeon_get_conf(oct); - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - ih->tagtype = ATOMIC_TAG; - ih->tag = LIO_CONTROL; - ih->raw = 1; - ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + ih2->tagtype = ATOMIC_TAG; + ih2->tag = LIO_CONTROL; + ih2->raw = 1; + ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); if (sc->datasize) { - ih->dlengsz = sc->datasize; - ih->rs = 1; + ih2->dlengsz = sc->datasize; + ih2->rs = 1; } - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; irh->opcode = opcode; irh->subcode = subcode; /* opcode/subcode specific parameters (ossp) */ irh->ossp = irh_ossp; - sc->cmd.ossp[0] = ossp0; - sc->cmd.ossp[1] = ossp1; + sc->cmd.cmd2.ossp[0] = ossp0; + sc->cmd.cmd2.ossp[1] = ossp1; if (sc->rdatasize) { - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = sc->rdatasize; irh->rflag = 1; - irh->len = 4; - ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ + ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ } else { irh->rflag = 0; - irh->len = 2; - ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ + ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ } - - while (!(oct->io_qmask.iq & (1 << sc->iq_no))) - sc->iq_no++; } int octeon_send_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { - struct octeon_instr_ih *ih; + struct octeon_instr_ih2 *ih2; struct octeon_instr_irh *irh; - struct octeon_instr_rdp *rdp; + u32 len; - ih = (struct octeon_instr_ih *)&sc->cmd.ih; - if (ih->dlengsz) { - BUG_ON(!sc->dmadptr); - sc->cmd.dptr = sc->dmadptr; + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + if (ih2->dlengsz) { + WARN_ON(!sc->dmadptr); + sc->cmd.cmd2.dptr = sc->dmadptr; } - - irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; if (irh->rflag) { - BUG_ON(!sc->dmarptr); - BUG_ON(!sc->status_word); + WARN_ON(!sc->dmarptr); + WARN_ON(!sc->status_word); *sc->status_word = COMPLETION_WORD_INIT; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; - - sc->cmd.rptr = sc->dmarptr; + sc->cmd.cmd2.rptr = sc->dmarptr; } + len = (u32)ih2->dlengsz; if (sc->wait_time) sc->timeout = jiffies + sc->wait_time; - return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, - (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND); + return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, + len, REQTYPE_SOFT_COMMAND)); } int octeon_setup_sc_buffer_pool(struct octeon_device *oct) @@ -667,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { list_del(tmp); @@ -679,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct) INIT_LIST_HEAD(&oct->sc_buf_pool.head); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); return 0; } @@ -695,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; - BUG_ON((offset + datasize + rdatasize + ctxsize) > + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); if (list_empty(&oct->sc_buf_pool.head)) { - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); return NULL; } @@ -712,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, atomic_inc(&oct->sc_buf_pool.alloc_buf_count); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); sc = (struct octeon_soft_command *)tmp; @@ -742,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, offset = (offset + datasize + 127) & 0xffffff80; if (rdatasize) { - BUG_ON(rdatasize < 16); + WARN_ON(rdatasize < 16); sc->virtrptr = (u8 *)sc + offset; sc->dmarptr = dma_addr + offset; sc->rdatasize = rdatasize; @@ -755,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, void octeon_free_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc) { - spin_lock(&oct->sc_buf_pool.lock); + spin_lock_bh(&oct->sc_buf_pool.lock); list_add_tail(&sc->node, &oct->sc_buf_pool.head); atomic_dec(&oct->sc_buf_pool.alloc_buf_count); - spin_unlock(&oct->sc_buf_pool.lock); + spin_unlock_bh(&oct->sc_buf_pool.lock); } diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index 091f537a9..709049e36 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -19,28 +19,14 @@ * This file may also be available under a different license from Cavium. * Contact Cavium, Inc. for more information **********************************************************************/ -#include <linux/version.h> -#include <linux/types.h> -#include <linux/list.h> -#include <linux/interrupt.h> -#include <linux/dma-mapping.h> #include <linux/pci.h> -#include <linux/kthread.h> #include <linux/netdevice.h> -#include "octeon_config.h" #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" #include "response_manager.h" #include "octeon_device.h" -#include "octeon_nic.h" #include "octeon_main.h" -#include "octeon_network.h" -#include "cn66xx_regs.h" -#include "cn66xx_device.h" -#include "cn68xx_regs.h" -#include "cn68xx_device.h" -#include "liquidio_image.h" static void oct_poll_req_completion(struct work_struct *work); @@ -54,8 +40,9 @@ int octeon_setup_response_list(struct octeon_device *oct) spin_lock_init(&oct->response_list[i].lock); atomic_set(&oct->response_list[i].pending_req_count, 0); } + spin_lock_init(&oct->cmd_resp_wqlock); - oct->dma_comp_wq.wq = create_workqueue("dma-comp"); + oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); if (!oct->dma_comp_wq.wq) { dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); return -ENOMEM; @@ -64,7 +51,8 @@ int octeon_setup_response_list(struct octeon_device *oct) cwq = &oct->dma_comp_wq; INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); cwq->wk.ctxptr = oct; - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); + oct->cmd_resp_state = OCT_DRV_ONLINE; + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50)); return ret; } @@ -72,7 +60,6 @@ int octeon_setup_response_list(struct octeon_device *oct) void octeon_delete_response_list(struct octeon_device *oct) { cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work); - flush_workqueue(oct->dma_comp_wq.wq); destroy_workqueue(oct->dma_comp_wq.wq); } @@ -86,6 +73,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; struct octeon_instr_rdp *rdp; + u64 rptr; ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; @@ -103,7 +91,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, sc = (struct octeon_soft_command *)ordered_sc_list-> head.next; - rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + rptr = sc->cmd.cmd2.rptr; status = OCTEON_REQUEST_PENDING; @@ -111,7 +100,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, * to where rptr is pointing to */ dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev, - sc->cmd.rptr, rdp->rlen, + rptr, rdp->rlen, DMA_FROM_DEVICE); status64 = *sc->status_word; @@ -173,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work) struct cavium_wq *cwq = &oct->dma_comp_wq; lio_process_ordered_list(oct, 0); - - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50)); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 388cd799d..4ab404f45 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -146,7 +146,6 @@ struct octeon_mgmt { struct device *dev; struct napi_struct napi; struct tasklet_struct tx_clean_tasklet; - struct phy_device *phydev; struct device_node *phy_np; resource_size_t mix_phys; resource_size_t mix_size; @@ -787,14 +786,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, static int octeon_mgmt_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { - struct octeon_mgmt *p = netdev_priv(netdev); - switch (cmd) { case SIOCSHWTSTAMP: return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); default: - if (p->phydev) - return phy_mii_ioctl(p->phydev, rq, cmd); + if (netdev->phydev) + return phy_mii_ioctl(netdev->phydev, rq, cmd); return -EINVAL; } } @@ -836,16 +833,18 @@ static void octeon_mgmt_enable_link(struct octeon_mgmt *p) static void octeon_mgmt_update_link(struct octeon_mgmt *p) { + struct net_device *ndev = p->netdev; + struct phy_device *phydev = ndev->phydev; union cvmx_agl_gmx_prtx_cfg prtx_cfg; prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - if (!p->phydev->link) + if (!phydev->link) prtx_cfg.s.duplex = 1; else - prtx_cfg.s.duplex = p->phydev->duplex; + prtx_cfg.s.duplex = phydev->duplex; - switch (p->phydev->speed) { + switch (phydev->speed) { case 10: prtx_cfg.s.speed = 0; prtx_cfg.s.slottime = 0; @@ -871,7 +870,7 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p) prtx_cfg.s.speed_msb = 0; /* Only matters for half-duplex */ prtx_cfg.s.slottime = 1; - prtx_cfg.s.burst = p->phydev->duplex; + prtx_cfg.s.burst = phydev->duplex; } break; case 0: /* No link */ @@ -894,9 +893,9 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p) /* MII (both speeds) and RGMII 1000 speed. */ agl_clk.s.clk_cnt = 1; if (prtx_ctl.s.mode == 0) { /* RGMII mode */ - if (p->phydev->speed == 10) + if (phydev->speed == 10) agl_clk.s.clk_cnt = 50; - else if (p->phydev->speed == 100) + else if (phydev->speed == 100) agl_clk.s.clk_cnt = 5; } cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); @@ -906,39 +905,40 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p) static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; unsigned long flags; int link_changed = 0; - if (!p->phydev) + if (!phydev) return; spin_lock_irqsave(&p->lock, flags); - if (!p->phydev->link && p->last_link) + if (!phydev->link && p->last_link) link_changed = -1; - if (p->phydev->link - && (p->last_duplex != p->phydev->duplex - || p->last_link != p->phydev->link - || p->last_speed != p->phydev->speed)) { + if (phydev->link && + (p->last_duplex != phydev->duplex || + p->last_link != phydev->link || + p->last_speed != phydev->speed)) { octeon_mgmt_disable_link(p); link_changed = 1; octeon_mgmt_update_link(p); octeon_mgmt_enable_link(p); } - p->last_link = p->phydev->link; - p->last_speed = p->phydev->speed; - p->last_duplex = p->phydev->duplex; + p->last_link = phydev->link; + p->last_speed = phydev->speed; + p->last_duplex = phydev->duplex; spin_unlock_irqrestore(&p->lock, flags); if (link_changed != 0) { if (link_changed > 0) { pr_info("%s: Link is up - %d/%s\n", netdev->name, - p->phydev->speed, - DUPLEX_FULL == p->phydev->duplex ? + phydev->speed, + phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); } else { pr_info("%s: Link is down\n", netdev->name); @@ -949,6 +949,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); + struct phy_device *phydev = NULL; if (octeon_is_simulation() || p->phy_np == NULL) { /* No PHYs in the simulator. */ @@ -956,11 +957,11 @@ static int octeon_mgmt_init_phy(struct net_device *netdev) return 0; } - p->phydev = of_phy_connect(netdev, p->phy_np, - octeon_mgmt_adjust_link, 0, - PHY_INTERFACE_MODE_MII); + phydev = of_phy_connect(netdev, p->phy_np, + octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); - if (!p->phydev) + if (!phydev) return -ENODEV; return 0; @@ -1080,9 +1081,9 @@ static int octeon_mgmt_open(struct net_device *netdev) } /* Set the mode of the interface, RGMII/MII. */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { union cvmx_agl_prtx_ctl agl_prtx_ctl; - int rgmii_mode = (p->phydev->supported & + int rgmii_mode = (netdev->phydev->supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); @@ -1205,7 +1206,7 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Configure the port duplex, speed and enables */ octeon_mgmt_disable_link(p); - if (p->phydev) + if (netdev->phydev) octeon_mgmt_update_link(p); octeon_mgmt_enable_link(p); @@ -1214,9 +1215,9 @@ static int octeon_mgmt_open(struct net_device *netdev) /* PHY is not present in simulator. The carrier is enabled * while initializing the phy for simulator, leave it enabled. */ - if (p->phydev) { + if (netdev->phydev) { netif_carrier_off(netdev); - phy_start_aneg(p->phydev); + phy_start_aneg(netdev->phydev); } netif_wake_queue(netdev); @@ -1244,9 +1245,8 @@ static int octeon_mgmt_stop(struct net_device *netdev) napi_disable(&p->napi); netif_stop_queue(netdev); - if (p->phydev) - phy_disconnect(p->phydev); - p->phydev = NULL; + if (netdev->phydev) + phy_disconnect(netdev->phydev); netif_carrier_off(netdev); @@ -1346,50 +1346,23 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); } -static int octeon_mgmt_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (p->phydev) - return phy_ethtool_gset(p->phydev, cmd); - - return -EOPNOTSUPP; -} - -static int octeon_mgmt_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (p->phydev) - return phy_ethtool_sset(p->phydev, cmd); - - return -EOPNOTSUPP; -} - static int octeon_mgmt_nway_reset(struct net_device *dev) { - struct octeon_mgmt *p = netdev_priv(dev); - if (!capable(CAP_NET_ADMIN)) return -EPERM; - if (p->phydev) - return phy_start_aneg(p->phydev); + if (dev->phydev) + return phy_start_aneg(dev->phydev); return -EOPNOTSUPP; } static const struct ethtool_ops octeon_mgmt_ethtool_ops = { .get_drvinfo = octeon_mgmt_get_drvinfo, - .get_settings = octeon_mgmt_get_settings, - .set_settings = octeon_mgmt_set_settings, .nway_reset = octeon_mgmt_nway_reset, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops octeon_mgmt_ops = { @@ -1540,6 +1513,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev) return 0; err: + of_node_put(p->phy_np); free_netdev(netdev); return result; } @@ -1547,8 +1521,10 @@ err: static int octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); + struct octeon_mgmt *p = netdev_priv(netdev); unregister_netdev(netdev); + of_node_put(p->phy_np); free_netdev(netdev); return 0; } diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 83025bb47..e29815d9e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -279,6 +279,7 @@ struct nicvf { u8 sqs_id; bool sqs_mode; bool hw_tso; + bool t88; /* Receive buffer alloc */ u32 rb_page_offset; diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 16ed20357..85cc782b9 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size) int lmac; u64 lmac_cfg; - /* Max value that can be set is 60 */ - if (size > 60) - size = 60; + /* There is a issue in HW where-in while sending GSO sized + * pkts as part of TSO, if pkt len falls below this size + * NIC will zero PAD packet and also updates IP total length. + * Hence set this value to lessthan min pkt size of MAC+IP+TCP + * headers, BGX will do the padding to transmit 64 byte pkt. + */ + if (size > 52) + size = 52; for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) { lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3)); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index a19e73f11..324034961 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, struct nicvf *nic = netdev_priv(netdev); struct snd_queue *sq; struct sq_hdr_subdesc *hdr; + struct sq_hdr_subdesc *tso_sqe; sq = &nic->qs->sq[cqe_tx->sq_idx]; @@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; - /* For TSO offloaded packets only one SQE will have a valid SKB */ if (skb) { + /* Check for dummy descriptor used for HW TSO offload on 88xx */ + if (hdr->dont_send) { + /* Get actual TSO descriptors and free them */ + tso_sqe = + (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); + nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1); + } nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); prefetch(skb); dev_consume_skb_any(skb); sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; } else { - /* In case of HW TSO, HW sends a CQE for each segment of a TSO - * packet instead of a single CQE for the whole TSO packet - * transmitted. Each of this CQE points to the same SQE, so - * avoid freeing same SQE multiple times. + /* In case of SW TSO on 88xx, only last segment will have + * a SKB attached, so just free SQEs here. */ if (!nic->hw_tso) nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); @@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct net_device *netdev; struct nicvf *nic; int err, qcount; + u16 sdevid; err = pci_enable_device(pdev); if (err) { @@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!pass1_silicon(nic->pdev)) nic->hw_tso = true; + pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid); + if (sdevid == 0xA134) + nic->t88 = true; + /* Check if this VF is in QS only mode */ if (nic->sqs_mode) return 0; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 0ff8e60de..dda3ea3f3 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb) return num_edescs + sh->gso_segs; } +#define POST_CQE_DESC_COUNT 2 + /* Get the number of SQ descriptors needed to xmit this skb */ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) { @@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb) return subdesc_cnt; } + /* Dummy descriptors to get TSO pkt completion notification */ + if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) + subdesc_cnt += POST_CQE_DESC_COUNT; + if (skb_shinfo(skb)->nr_frags) subdesc_cnt += skb_shinfo(skb)->nr_frags; @@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, struct sq_hdr_subdesc *hdr; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); - sq->skbuff[qentry] = (u64)skb; - memset(hdr, 0, SND_QUEUE_DESC_SIZE); hdr->subdesc_type = SQ_DESC_TYPE_HEADER; - /* Enable notification via CQE after processing SQE */ - hdr->post_cqe = 1; - /* No of subdescriptors following this */ - hdr->subdesc_cnt = subdesc_cnt; + + if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { + /* post_cqe = 0, to avoid HW posting a CQE for every TSO + * segment transmitted on 88xx. + */ + hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT; + } else { + sq->skbuff[qentry] = (u64)skb; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* No of subdescriptors following this */ + hdr->subdesc_cnt = subdesc_cnt; + } hdr->tot_len = len; /* Offload checksum calculation to HW */ @@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, gather->addr = data; } +/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO + * packet so that a CQE is posted as a notifation for transmission of + * TSO packet. + */ +static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, + int tso_sqe, struct sk_buff *skb) +{ + struct sq_imm_subdesc *imm; + struct sq_hdr_subdesc *hdr; + + sq->skbuff[qentry] = (u64)skb; + + hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); + memset(hdr, 0, SND_QUEUE_DESC_SIZE); + hdr->subdesc_type = SQ_DESC_TYPE_HEADER; + /* Enable notification via CQE after processing SQE */ + hdr->post_cqe = 1; + /* There is no packet to transmit here */ + hdr->dont_send = 1; + hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1; + hdr->tot_len = 1; + /* Actual TSO header SQE index, needed for cleanup */ + hdr->rsvd2 = tso_sqe; + + qentry = nicvf_get_nxt_sqentry(sq, qentry); + imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); + memset(imm, 0, SND_QUEUE_DESC_SIZE); + imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE; + imm->len = 1; +} + /* Segment a TSO packet into 'gso_size' segments and append * them to SQ for transfer */ @@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) { int i, size; - int subdesc_cnt; + int subdesc_cnt, tso_sqe = 0; int sq_num, qentry; struct queue_set *qs; struct snd_queue *sq; @@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) /* Add SQ header subdesc */ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, skb, skb->len); + tso_sqe = qentry; /* Add SQ gather subdescs */ qentry = nicvf_get_nxt_sqentry(sq, qentry); @@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb) } doorbell: + if (nic->t88 && skb_shinfo(skb)->gso_size) { + qentry = nicvf_get_nxt_sqentry(sq, qentry); + nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb); + } + /* make sure all memory stores are done before ringing doorbell */ smp_wmb(); diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 4686a85a8..5713e83be 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -96,17 +96,6 @@ config CHELSIO_T4_DCB If unsure, say N. -config CHELSIO_T4_UWIRE - bool "Unified Wire Support for Chelsio T5 cards" - default n - depends on CHELSIO_T4 - ---help--- - Enable unified-wire offload features. - Say Y here if you want to enable unified-wire over Ethernet - in the driver. - - If unsure, say N. - config CHELSIO_T4_FCOE bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards" default n @@ -137,4 +126,9 @@ config CHELSIO_T4VF To compile this driver as a module choose M here; the module will be called cxgb4vf. +config CHELSIO_LIB + tristate + ---help--- + Common library for Chelsio drivers. + endif # NET_VENDOR_CHELSIO diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile index 390510b5e..b6a5eec6e 100644 --- a/drivers/net/ethernet/chelsio/Makefile +++ b/drivers/net/ethernet/chelsio/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb/ obj-$(CONFIG_CHELSIO_T3) += cxgb3/ obj-$(CONFIG_CHELSIO_T4) += cxgb4/ obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/ +obj-$(CONFIG_CHELSIO_LIB) += libcxgb/ diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 85c92821b..ace0ab98d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -7,5 +7,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o -cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index b4fceb924..edd23386b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -418,8 +418,9 @@ struct trace_params { struct link_config { unsigned short supported; /* link capabilities */ unsigned short advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ + unsigned short lp_advertising; /* peer advertised capabilities */ + unsigned int requested_speed; /* speed user has requested */ + unsigned int speed; /* actual link speed */ unsigned char requested_fc; /* flow control user has requested */ unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 7a0b92b2f..02f80febe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -480,178 +480,293 @@ static int identify_port(struct net_device *dev, return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val); } -static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps) +/** + * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool + * @port_type: Firmware Port Type + * @mod_type: Firmware Module Type + * + * Translate Firmware Port/Module type to Ethtool Port Type. + */ +static int from_fw_port_mod_type(enum fw_port_type port_type, + enum fw_port_module_type mod_type) { - unsigned int v = 0; - - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || - type == FW_PORT_TYPE_BT_XAUI) { - v |= SUPPORTED_TP; - if (caps & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { - v |= SUPPORTED_Backplane; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseKX_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_KR) { - v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - } else if (type == FW_PORT_TYPE_BP_AP) { - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; - } else if (type == FW_PORT_TYPE_BP4_AP) { - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | - SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_FIBER_XFI || - type == FW_PORT_TYPE_FIBER_XAUI || - type == FW_PORT_TYPE_SFP || - type == FW_PORT_TYPE_QSFP_10G || - type == FW_PORT_TYPE_QSA) { - v |= SUPPORTED_FIBRE; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_BP40_BA || - type == FW_PORT_TYPE_QSFP) { - v |= SUPPORTED_40000baseSR4_Full; - v |= SUPPORTED_FIBRE; + if (port_type == FW_PORT_TYPE_BT_SGMII || + port_type == FW_PORT_TYPE_BT_XFI || + port_type == FW_PORT_TYPE_BT_XAUI) { + return PORT_TP; + } else if (port_type == FW_PORT_TYPE_FIBER_XFI || + port_type == FW_PORT_TYPE_FIBER_XAUI) { + return PORT_FIBRE; + } else if (port_type == FW_PORT_TYPE_SFP || + port_type == FW_PORT_TYPE_QSFP_10G || + port_type == FW_PORT_TYPE_QSA || + port_type == FW_PORT_TYPE_QSFP) { + if (mod_type == FW_PORT_MOD_TYPE_LR || + mod_type == FW_PORT_MOD_TYPE_SR || + mod_type == FW_PORT_MOD_TYPE_ER || + mod_type == FW_PORT_MOD_TYPE_LRM) + return PORT_FIBRE; + else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + return PORT_DA; + else + return PORT_OTHER; } - if (caps & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - return v; + return PORT_OTHER; } -static unsigned int to_fw_linkcaps(unsigned int caps) +/** + * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities + * @speed: speed in Kb/s + * + * Translates a specific Port Speed into a Firmware Port Capabilities + * value. + */ +static unsigned int speed_to_fw_caps(int speed) { - unsigned int v = 0; - - if (caps & ADVERTISED_100baseT_Full) - v |= FW_PORT_CAP_SPEED_100M; - if (caps & ADVERTISED_1000baseT_Full) - v |= FW_PORT_CAP_SPEED_1G; - if (caps & ADVERTISED_10000baseT_Full) - v |= FW_PORT_CAP_SPEED_10G; - if (caps & ADVERTISED_40000baseSR4_Full) - v |= FW_PORT_CAP_SPEED_40G; - return v; + if (speed == 100) + return FW_PORT_CAP_SPEED_100M; + if (speed == 1000) + return FW_PORT_CAP_SPEED_1G; + if (speed == 10000) + return FW_PORT_CAP_SPEED_10G; + if (speed == 25000) + return FW_PORT_CAP_SPEED_25G; + if (speed == 40000) + return FW_PORT_CAP_SPEED_40G; + if (speed == 100000) + return FW_PORT_CAP_SPEED_100G; + return 0; } -static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +/** + * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask + * @port_type: Firmware Port Type + * @fw_caps: Firmware Port Capabilities + * @link_mode_mask: ethtool Link Mode Mask + * + * Translate a Firmware Port Capabilities specification to an ethtool + * Link Mode Mask. + */ +static void fw_caps_to_lmm(enum fw_port_type port_type, + unsigned int fw_caps, + unsigned long *link_mode_mask) { - const struct port_info *p = netdev_priv(dev); - - if (p->port_type == FW_PORT_TYPE_BT_SGMII || - p->port_type == FW_PORT_TYPE_BT_XFI || - p->port_type == FW_PORT_TYPE_BT_XAUI) { - cmd->port = PORT_TP; - } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || - p->port_type == FW_PORT_TYPE_FIBER_XAUI) { - cmd->port = PORT_FIBRE; - } else if (p->port_type == FW_PORT_TYPE_SFP || - p->port_type == FW_PORT_TYPE_QSFP_10G || - p->port_type == FW_PORT_TYPE_QSA || - p->port_type == FW_PORT_TYPE_QSFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_LR || - p->mod_type == FW_PORT_MOD_TYPE_SR || - p->mod_type == FW_PORT_MOD_TYPE_ER || - p->mod_type == FW_PORT_MOD_TYPE_LRM) - cmd->port = PORT_FIBRE; - else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) - cmd->port = PORT_DA; - else - cmd->port = PORT_OTHER; + #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \ + ## _BIT, link_mode_mask) + + #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ + do { \ + if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + SET_LMM(__lmm_name); \ + } while (0) + + switch (port_type) { + case FW_PORT_TYPE_BT_SGMII: + case FW_PORT_TYPE_BT_XFI: + case FW_PORT_TYPE_BT_XAUI: + SET_LMM(TP); + FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_KX4: + case FW_PORT_TYPE_KX: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); + break; + + case FW_PORT_TYPE_KR: + SET_LMM(Backplane); + SET_LMM(10000baseKR_Full); + break; + + case FW_PORT_TYPE_BP_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + break; + + case FW_PORT_TYPE_BP4_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + SET_LMM(10000baseKX4_Full); + break; + + case FW_PORT_TYPE_FIBER_XFI: + case FW_PORT_TYPE_FIBER_XAUI: + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_QSA: + SET_LMM(FIBRE); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_BP40_BA: + case FW_PORT_TYPE_QSFP: + SET_LMM(FIBRE); + SET_LMM(40000baseSR4_Full); + break; + + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_SFP28: + SET_LMM(FIBRE); + SET_LMM(25000baseCR_Full); + break; + + case FW_PORT_TYPE_KR4_100G: + case FW_PORT_TYPE_CR4_QSFP: + SET_LMM(FIBRE); + SET_LMM(100000baseCR4_Full); + break; + + default: + break; + } + + FW_CAPS_TO_LMM(ANEG, Autoneg); + FW_CAPS_TO_LMM(802_3_PAUSE, Pause); + FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); + + #undef FW_CAPS_TO_LMM + #undef SET_LMM +} + +/** + * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware + * capabilities + * + * @link_mode_mask: ethtool Link Mode Mask + * + * Translate ethtool Link Mode Mask into a Firmware Port capabilities + * value. + */ +static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) +{ + unsigned int fw_caps = 0; + + #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \ + do { \ + if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask)) \ + fw_caps |= FW_PORT_CAP_ ## __fw_name; \ + } while (0) + + LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M); + LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G); + LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G); + LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G); + LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G); + LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G); + + #undef LMM_TO_FW_CAPS + + return fw_caps; +} + +static int get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) +{ + const struct port_info *pi = netdev_priv(dev); + struct ethtool_link_settings *base = &link_ksettings->base; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); + + if (pi->mdio_addr >= 0) { + base->phy_address = pi->mdio_addr; + base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII + ? ETH_MDIO_SUPPORTS_C22 + : ETH_MDIO_SUPPORTS_C45); } else { - cmd->port = PORT_OTHER; + base->phy_address = 255; + base->mdio_support = 0; } - if (p->mdio_addr >= 0) { - cmd->phy_address = p->mdio_addr; - cmd->transceiver = XCVR_EXTERNAL; - cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? - MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + link_ksettings->link_modes.supported); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + link_ksettings->link_modes.advertising); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + link_ksettings->link_modes.lp_advertising); + + if (netif_carrier_ok(dev)) { + base->speed = pi->link_cfg.speed; + base->duplex = DUPLEX_FULL; } else { - cmd->phy_address = 0; /* not really, but no better option */ - cmd->transceiver = XCVR_INTERNAL; - cmd->mdio_support = 0; + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); - cmd->advertising = from_fw_linkcaps(p->port_type, - p->link_cfg.advertising); - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? p->link_cfg.speed : 0); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = p->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - return 0; -} + base->autoneg = pi->link_cfg.autoneg; + if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + if (pi->link_cfg.autoneg) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); -static unsigned int speed_to_caps(int speed) -{ - if (speed == 100) - return FW_PORT_CAP_SPEED_100M; - if (speed == 1000) - return FW_PORT_CAP_SPEED_1G; - if (speed == 10000) - return FW_PORT_CAP_SPEED_10G; - if (speed == 40000) - return FW_PORT_CAP_SPEED_40G; return 0; } -static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings + *link_ksettings) { - unsigned int cap; - struct port_info *p = netdev_priv(dev); - struct link_config *lc = &p->link_cfg; - u32 speed = ethtool_cmd_speed(cmd); + struct port_info *pi = netdev_priv(dev); + struct link_config *lc = &pi->link_cfg; + const struct ethtool_link_settings *base = &link_ksettings->base; struct link_config old_lc; - int ret; + unsigned int fw_caps; + int ret = 0; - if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ + /* only full-duplex supported */ + if (base->duplex != DUPLEX_FULL) return -EINVAL; if (!(lc->supported & FW_PORT_CAP_ANEG)) { /* PHY offers a single speed. See if that's what's * being requested. */ - if (cmd->autoneg == AUTONEG_DISABLE && - (lc->supported & speed_to_caps(speed))) + if (base->autoneg == AUTONEG_DISABLE && + (lc->supported & speed_to_fw_caps(base->speed))) return 0; return -EINVAL; } old_lc = *lc; - if (cmd->autoneg == AUTONEG_DISABLE) { - cap = speed_to_caps(speed); + if (base->autoneg == AUTONEG_DISABLE) { + fw_caps = speed_to_fw_caps(base->speed); - if (!(lc->supported & cap)) + if (!(lc->supported & fw_caps)) return -EINVAL; - lc->requested_speed = cap; + lc->requested_speed = fw_caps; lc->advertising = 0; } else { - cap = to_fw_linkcaps(cmd->advertising); - if (!(lc->supported & cap)) + fw_caps = + lmm_to_fw_caps(link_ksettings->link_modes.advertising); + + if (!(lc->supported & fw_caps)) return -EINVAL; lc->requested_speed = 0; - lc->advertising = cap | FW_PORT_CAP_ANEG; + lc->advertising = fw_caps | FW_PORT_CAP_ANEG; } - lc->autoneg = cmd->autoneg; + lc->autoneg = base->autoneg; /* If the firmware rejects the Link Configuration request, back out * the changes and report the error. */ - ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc); + ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc); if (ret) *lc = old_lc; @@ -1093,8 +1208,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, } static const struct ethtool_ops cxgb_ethtool_ops = { - .get_settings = get_settings, - .set_settings = set_settings, + .get_link_ksettings = get_link_ksettings, + .set_link_ksettings = set_link_ksettings, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6eea06a31..6694233e2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -64,6 +64,7 @@ #include <net/bonding.h> #include <net/addrconf.h> #include <asm/uaccess.h> +#include <linux/crash_dump.h> #include "cxgb4.h" #include "t4_regs.h" @@ -204,7 +205,7 @@ static int rx_dma_offset = 2; static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; module_param_array(num_vf, uint, NULL, 0644); -MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); +MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface."); #endif /* TX Queue select used to determine what algorithm to use for selecting TX @@ -458,11 +459,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - if (!(dev->flags & IFF_PROMISC)) { - __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); - if (!(dev->flags & IFF_ALLMULTI)) - __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); - } + __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, (dev->flags & IFF_PROMISC) ? 1 : 0, @@ -3733,7 +3731,8 @@ static int adap_init0(struct adapter *adap) return ret; /* Contact FW, advertising Master capability */ - ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); + ret = t4_fw_hello(adap, adap->mbox, adap->mbox, + is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); @@ -4304,10 +4303,17 @@ static const struct pci_error_handlers cxgb4_eeh = { .resume = eeh_resume, }; +/* Return true if the Link Configuration supports "High Speeds" (those greater + * than 1Gb/s). + */ static inline bool is_x_10g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || - (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; + unsigned int speeds, high_speeds; + + speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); + high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + + return high_speeds != 0; } static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, @@ -4334,6 +4340,11 @@ static void cfg_queues(struct adapter *adap) #endif int ciq_size; + /* Reduce memory usage in kdump environment, disable all offload. + */ + if (is_kdump_kernel()) + adap->params.offload = 0; + for_each_port(adap, i) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); #ifdef CONFIG_CHELSIO_T4_DCB @@ -4750,8 +4761,12 @@ static void print_port_info(const struct net_device *dev) bufp += sprintf(bufp, "1000/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) bufp += sprintf(bufp, "10G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) + bufp += sprintf(bufp, "25G/"); if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) bufp += sprintf(bufp, "40G/"); + if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) + bufp += sprintf(bufp, "100G/"); if (bufp != buf) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); @@ -4827,6 +4842,60 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev) return -EINVAL; } +#ifdef CONFIG_PCI_IOV +static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) +{ + int err = 0; + int current_vfs = pci_num_vf(pdev); + u32 pcie_fw; + void __iomem *regs; + + regs = pci_ioremap_bar(pdev, 0); + if (!regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + return -ENOMEM; + } + + pcie_fw = readl(regs + PCIE_FW_A); + iounmap(regs); + /* Check if cxgb4 is the MASTER and fw is initialized */ + if (!(pcie_fw & PCIE_FW_INIT_F) || + !(pcie_fw & PCIE_FW_MASTER_VLD_F) || + PCIE_FW_MASTER_G(pcie_fw) != 4) { + dev_warn(&pdev->dev, + "cxgb4 driver needs to be MASTER to support SRIOV\n"); + return -EOPNOTSUPP; + } + + /* If any of the VF's is already assigned to Guest OS, then + * SRIOV for the same cannot be modified + */ + if (current_vfs && pci_vfs_assigned(pdev)) { + dev_err(&pdev->dev, + "Cannot modify SR-IOV while VFs are assigned\n"); + num_vfs = current_vfs; + return num_vfs; + } + + /* Disable SRIOV when zero is passed. + * One needs to disable SRIOV before modifying it, else + * stack throws the below warning: + * " 'n' VFs already enabled. Disable before enabling 'm' VFs." + */ + if (!num_vfs) { + pci_disable_sriov(pdev); + return num_vfs; + } + + if (num_vfs != current_vfs) { + err = pci_enable_sriov(pdev, num_vfs); + if (err) + return err; + } + return num_vfs; +} +#endif + static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int func, i, err, s_qpp, qpp, num_seg; @@ -5160,11 +5229,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) sriov: #ifdef CONFIG_PCI_IOV - if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) + if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) { + dev_warn(&pdev->dev, + "Enabling SR-IOV VFs using the num_vf module " + "parameter is deprecated - please use the pci sysfs " + "interface instead.\n"); if (pci_enable_sriov(pdev, num_vf[func]) == 0) dev_info(&pdev->dev, "instantiated %u virtual functions\n", num_vf[func]); + } #endif return 0; @@ -5257,6 +5331,9 @@ static struct pci_driver cxgb4_driver = { .probe = init_one, .remove = remove_one, .shutdown = remove_one, +#ifdef CONFIG_PCI_IOV + .sriov_configure = cxgb4_iov_configure, +#endif .err_handler = &cxgb4_eeh, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index bad253beb..ad3552df0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -1192,7 +1192,7 @@ out_free: dev_kfree_skb_any(skb); /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a63addb4e..660204bff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ + FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ FW_PORT_CAP_ANEG) /** @@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) speed = 1000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + speed = 25000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + speed = 100000; lc = &pi->link_cfg; @@ -7219,6 +7224,7 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->speed = speed; lc->fc = fc; lc->supported = be16_to_cpu(p->u.info.pcap); + lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); t4_os_link_changed(adap, pi->port_id, link_ok); } } @@ -7284,6 +7290,7 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) static void init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; + lc->lp_advertising = 0; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index 4705e2dea..e0ebe1378 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -104,6 +104,8 @@ enum { enum CPL_error { CPL_ERR_NONE = 0, + CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_MISS = 2, CPL_ERR_TCAM_FULL = 3, CPL_ERR_BAD_LENGTH = 15, CPL_ERR_BAD_ROUTE = 18, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 392d6644f..30507d444 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2249,22 +2249,28 @@ struct fw_acl_vlan_cmd { enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, - FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_25G = 0x0004, FW_PORT_CAP_SPEED_10G = 0x0008, FW_PORT_CAP_SPEED_40G = 0x0010, FW_PORT_CAP_SPEED_100G = 0x0020, FW_PORT_CAP_FC_RX = 0x0040, FW_PORT_CAP_FC_TX = 0x0080, FW_PORT_CAP_ANEG = 0x0100, - FW_PORT_CAP_MDI_0 = 0x0200, - FW_PORT_CAP_MDI_1 = 0x0400, - FW_PORT_CAP_BEAN = 0x0800, - FW_PORT_CAP_PMA_LPBK = 0x1000, - FW_PORT_CAP_PCS_LPBK = 0x2000, - FW_PORT_CAP_PHYXS_LPBK = 0x4000, - FW_PORT_CAP_FAR_END_LPBK = 0x8000, + FW_PORT_CAP_MDIX = 0x0200, + FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_FEC = 0x0800, + FW_PORT_CAP_TECHKR = 0x1000, + FW_PORT_CAP_TECHKX4 = 0x2000, + FW_PORT_CAP_802_3_PAUSE = 0x4000, + FW_PORT_CAP_802_3_ASM_DIR = 0x8000, }; +#define FW_PORT_CAP_SPEED_S 0 +#define FW_PORT_CAP_SPEED_M 0x3f +#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S) +#define FW_PORT_CAP_SPEED_G(x) \ + (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M) + enum fw_port_mdi { FW_PORT_CAP_MDI_UNCHANGED, FW_PORT_CAP_MDI_AUTO, @@ -2376,7 +2382,8 @@ struct fw_port_cmd { __u8 cbllen; __u8 auxlinfo; __u8 dcbxdis_pkd; - __u8 r8_lo[3]; + __u8 r8_lo; + __be16 lpacap; __be64 r9; } info; struct fw_port_diags { @@ -2555,6 +2562,11 @@ enum fw_port_type { FW_PORT_TYPE_QSA, FW_PORT_TYPE_QSFP, FW_PORT_TYPE_BP40_BA, + FW_PORT_TYPE_KR4_100G, + FW_PORT_TYPE_CR4_QSFP, + FW_PORT_TYPE_CR_QSFP, + FW_PORT_TYPE_CR2_QSFP, + FW_PORT_TYPE_SFP28, FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 734dd776c..109bc6304 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -353,6 +353,10 @@ struct hash_mac_addr { u8 addr[ETH_ALEN]; }; +struct mbox_list { + struct list_head list; +}; + /* * Per-"adapter" (Virtual Function) information. */ @@ -387,6 +391,10 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + /* lock for mailbox cmd list */ + spinlock_t mbox_lock; + struct mbox_list mlist; + /* support for mailbox command/reply logging */ #define T4VF_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 04fc6f6d1..e116bb8d1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -937,12 +937,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { struct port_info *pi = netdev_priv(dev); - if (!(dev->flags & IFF_PROMISC)) { - __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); - if (!(dev->flags & IFF_ALLMULTI)) - __dev_mc_sync(dev, cxgb4vf_mac_sync, - cxgb4vf_mac_unsync); - } + __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); + __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); return t4vf_set_rxmode(pi->adapter, pi->viid, -1, (dev->flags & IFF_PROMISC) != 0, (dev->flags & IFF_ALLMULTI) != 0, @@ -1205,105 +1201,187 @@ static void cxgb4vf_poll_controller(struct net_device *dev) * state of the port to which we're linked. */ -static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type, - unsigned int caps) -{ - unsigned int v = 0; - - if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || - type == FW_PORT_TYPE_BT_XAUI) { - v |= SUPPORTED_TP; - if (caps & FW_PORT_CAP_SPEED_100M) - v |= SUPPORTED_100baseT_Full; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { - v |= SUPPORTED_Backplane; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseKX_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseKX4_Full; - } else if (type == FW_PORT_TYPE_KR) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; - else if (type == FW_PORT_TYPE_BP_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; - else if (type == FW_PORT_TYPE_BP4_AP) - v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | - SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | - SUPPORTED_10000baseKX4_Full; - else if (type == FW_PORT_TYPE_FIBER_XFI || - type == FW_PORT_TYPE_FIBER_XAUI || - type == FW_PORT_TYPE_SFP || - type == FW_PORT_TYPE_QSFP_10G || - type == FW_PORT_TYPE_QSA) { - v |= SUPPORTED_FIBRE; - if (caps & FW_PORT_CAP_SPEED_1G) - v |= SUPPORTED_1000baseT_Full; - if (caps & FW_PORT_CAP_SPEED_10G) - v |= SUPPORTED_10000baseT_Full; - } else if (type == FW_PORT_TYPE_BP40_BA || - type == FW_PORT_TYPE_QSFP) { - v |= SUPPORTED_40000baseSR4_Full; - v |= SUPPORTED_FIBRE; - } - - if (caps & FW_PORT_CAP_ANEG) - v |= SUPPORTED_Autoneg; - return v; -} - -static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - const struct port_info *p = netdev_priv(dev); - - if (p->port_type == FW_PORT_TYPE_BT_SGMII || - p->port_type == FW_PORT_TYPE_BT_XFI || - p->port_type == FW_PORT_TYPE_BT_XAUI) - cmd->port = PORT_TP; - else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || - p->port_type == FW_PORT_TYPE_FIBER_XAUI) - cmd->port = PORT_FIBRE; - else if (p->port_type == FW_PORT_TYPE_SFP || - p->port_type == FW_PORT_TYPE_QSFP_10G || - p->port_type == FW_PORT_TYPE_QSA || - p->port_type == FW_PORT_TYPE_QSFP) { - if (p->mod_type == FW_PORT_MOD_TYPE_LR || - p->mod_type == FW_PORT_MOD_TYPE_SR || - p->mod_type == FW_PORT_MOD_TYPE_ER || - p->mod_type == FW_PORT_MOD_TYPE_LRM) - cmd->port = PORT_FIBRE; - else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || - p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) - cmd->port = PORT_DA; +/** + * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool + * @port_type: Firmware Port Type + * @mod_type: Firmware Module Type + * + * Translate Firmware Port/Module type to Ethtool Port Type. + */ +static int from_fw_port_mod_type(enum fw_port_type port_type, + enum fw_port_module_type mod_type) +{ + if (port_type == FW_PORT_TYPE_BT_SGMII || + port_type == FW_PORT_TYPE_BT_XFI || + port_type == FW_PORT_TYPE_BT_XAUI) { + return PORT_TP; + } else if (port_type == FW_PORT_TYPE_FIBER_XFI || + port_type == FW_PORT_TYPE_FIBER_XAUI) { + return PORT_FIBRE; + } else if (port_type == FW_PORT_TYPE_SFP || + port_type == FW_PORT_TYPE_QSFP_10G || + port_type == FW_PORT_TYPE_QSA || + port_type == FW_PORT_TYPE_QSFP) { + if (mod_type == FW_PORT_MOD_TYPE_LR || + mod_type == FW_PORT_MOD_TYPE_SR || + mod_type == FW_PORT_MOD_TYPE_ER || + mod_type == FW_PORT_MOD_TYPE_LRM) + return PORT_FIBRE; + else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || + mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) + return PORT_DA; else - cmd->port = PORT_OTHER; - } else - cmd->port = PORT_OTHER; + return PORT_OTHER; + } + + return PORT_OTHER; +} + +/** + * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask + * @port_type: Firmware Port Type + * @fw_caps: Firmware Port Capabilities + * @link_mode_mask: ethtool Link Mode Mask + * + * Translate a Firmware Port Capabilities specification to an ethtool + * Link Mode Mask. + */ +static void fw_caps_to_lmm(enum fw_port_type port_type, + unsigned int fw_caps, + unsigned long *link_mode_mask) +{ + #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\ + ## _BIT, link_mode_mask) + + #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ + do { \ + if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + SET_LMM(__lmm_name); \ + } while (0) + + switch (port_type) { + case FW_PORT_TYPE_BT_SGMII: + case FW_PORT_TYPE_BT_XFI: + case FW_PORT_TYPE_BT_XAUI: + SET_LMM(TP); + FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_KX4: + case FW_PORT_TYPE_KX: + SET_LMM(Backplane); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full); + break; + + case FW_PORT_TYPE_KR: + SET_LMM(Backplane); + SET_LMM(10000baseKR_Full); + break; + + case FW_PORT_TYPE_BP_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + break; + + case FW_PORT_TYPE_BP4_AP: + SET_LMM(Backplane); + SET_LMM(10000baseR_FEC); + SET_LMM(10000baseKR_Full); + SET_LMM(1000baseKX_Full); + SET_LMM(10000baseKX4_Full); + break; + + case FW_PORT_TYPE_FIBER_XFI: + case FW_PORT_TYPE_FIBER_XAUI: + case FW_PORT_TYPE_SFP: + case FW_PORT_TYPE_QSFP_10G: + case FW_PORT_TYPE_QSA: + SET_LMM(FIBRE); + FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full); + FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full); + break; + + case FW_PORT_TYPE_BP40_BA: + case FW_PORT_TYPE_QSFP: + SET_LMM(FIBRE); + SET_LMM(40000baseSR4_Full); + break; + + case FW_PORT_TYPE_CR_QSFP: + case FW_PORT_TYPE_SFP28: + SET_LMM(FIBRE); + SET_LMM(25000baseCR_Full); + break; + + case FW_PORT_TYPE_KR4_100G: + case FW_PORT_TYPE_CR4_QSFP: + SET_LMM(FIBRE); + SET_LMM(100000baseCR4_Full); + break; + + default: + break; + } + + FW_CAPS_TO_LMM(ANEG, Autoneg); + FW_CAPS_TO_LMM(802_3_PAUSE, Pause); + FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause); + + #undef FW_CAPS_TO_LMM + #undef SET_LMM +} + +static int cxgb4vf_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings + *link_ksettings) +{ + const struct port_info *pi = netdev_priv(dev); + struct ethtool_link_settings *base = &link_ksettings->base; - if (p->mdio_addr >= 0) { - cmd->phy_address = p->mdio_addr; - cmd->transceiver = XCVR_EXTERNAL; - cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? - MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); + + if (pi->mdio_addr >= 0) { + base->phy_address = pi->mdio_addr; + base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII + ? ETH_MDIO_SUPPORTS_C22 + : ETH_MDIO_SUPPORTS_C45); } else { - cmd->phy_address = 0; /* not really, but no better option */ - cmd->transceiver = XCVR_INTERNAL; - cmd->mdio_support = 0; - } - - cmd->supported = t4vf_from_fw_linkcaps(p->port_type, - p->link_cfg.supported); - cmd->advertising = t4vf_from_fw_linkcaps(p->port_type, - p->link_cfg.advertising); - ethtool_cmd_speed_set(cmd, - netif_carrier_ok(dev) ? p->link_cfg.speed : 0); - cmd->duplex = DUPLEX_FULL; - cmd->autoneg = p->link_cfg.autoneg; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; + base->phy_address = 255; + base->mdio_support = 0; + } + + fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + link_ksettings->link_modes.supported); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + link_ksettings->link_modes.advertising); + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + link_ksettings->link_modes.lp_advertising); + + if (netif_carrier_ok(dev)) { + base->speed = pi->link_cfg.speed; + base->duplex = DUPLEX_FULL; + } else { + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; + } + + base->autoneg = pi->link_cfg.autoneg; + if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + if (pi->link_cfg.autoneg) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); + return 0; } @@ -1679,7 +1757,7 @@ static void cxgb4vf_get_wol(struct net_device *dev, #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) static const struct ethtool_ops cxgb4vf_ethtool_ops = { - .get_settings = cxgb4vf_get_settings, + .get_link_ksettings = cxgb4vf_get_link_ksettings, .get_drvinfo = cxgb4vf_get_drvinfo, .get_msglevel = cxgb4vf_get_msglevel, .set_msglevel = cxgb4vf_set_msglevel, @@ -2778,6 +2856,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Initialize SMP data synchronization resources. */ spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->mbox_lock); + INIT_LIST_HEAD(&adapter->mlist.list); /* * Map our I/O registers in BAR0. diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1bb57d3fb..c8fd4f8fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) /* Discard the packet if the length is greater than mtu */ max_pkt_len = ETH_HLEN + dev->mtu; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tagged(skb)) max_pkt_len += VLAN_HLEN; if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) goto out_free; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 438374a05..17a2bbcf9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -107,8 +107,9 @@ struct t4vf_port_stats { struct link_config { unsigned int supported; /* link capabilities */ unsigned int advertising; /* advertised capabilities */ - unsigned short requested_speed; /* speed user has requested */ - unsigned short speed; /* actual link speed */ + unsigned short lp_advertising; /* peer advertised capabilities */ + unsigned int requested_speed; /* speed user has requested */ + unsigned int speed; /* actual link speed */ unsigned char requested_fc; /* flow control user has requested */ unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ @@ -270,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc) return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; } +/* Return true if the Link Configuration supports "High Speeds" (those greater + * than 1Gb/s). + */ static inline bool is_x_10g_port(const struct link_config *lc) { - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || - (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; + unsigned int speeds, high_speeds; + + speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); + high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + + return high_speeds != 0; } static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 955ff7c61..b5622b168 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -139,6 +139,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); __be64 cmd_rpl[MBOX_LEN / 8]; + struct mbox_list entry; /* In T6, mailbox size is changed to 128 bytes to avoid * invalidating the entire prefetch buffer. @@ -156,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4) return -EINVAL; + /* Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + spin_lock(&adapter->mbox_lock); + list_add_tail(&entry.list, &adapter->mlist.list); + spin_unlock(&adapter->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rearely + * contend on access to the mailbox ... + */ + if (i > FW_CMD_MAX_TIMEOUT) { + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); + ret = -EBUSY; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; + } + + /* If we're at the head, break out and start the mailbox + * protocol. + */ + if (list_first_entry(&adapter->mlist.list, struct mbox_list, + list) == &entry) + break; + + /* Delay for a bit before checking again ... */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + mdelay(ms); + } + } + /* * Loop trying to get ownership of the mailbox. Return an error * if we can't gain ownership. @@ -164,6 +210,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); if (v != MBOX_OWNER_DRV) { + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; t4vf_record_mbox(adapter, cmd, size, access, ret); return ret; @@ -248,6 +297,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, if (cmd_op != FW_VI_STATS_CMD) t4vf_record_mbox(adapter, cmd_rpl, size, access, execute); + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); return -FW_CMD_RETVAL_G(v); } } @@ -255,12 +307,16 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, /* We timed out. Return the error ... */ ret = -ETIMEDOUT; t4vf_record_mbox(adapter, cmd, size, access, ret); + spin_lock(&adapter->mbox_lock); + list_del(&entry.list); + spin_unlock(&adapter->mbox_lock); return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ - FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ + FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ + FW_PORT_CAP_ANEG) /** * init_link_config - initialize a link's SW state @@ -273,6 +329,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, static void init_link_config(struct link_config *lc, unsigned int caps) { lc->supported = caps; + lc->lp_advertising = 0; lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; @@ -1656,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) speed = 1000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + speed = 25000; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) speed = 40000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + speed = 100000; /* * Scan all of our "ports" (Virtual Interfaces) looking for @@ -1688,6 +1749,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) lc->fc = fc; lc->supported = be16_to_cpu(port_cmd->u.info.pcap); + lc->lp_advertising = + be16_to_cpu(port_cmd->u.info.lpacap); t4vf_os_link_changed(adapter, pidx, link_ok); } } diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile new file mode 100644 index 000000000..2362230ef --- /dev/null +++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o + +libcxgb-y := libcxgb_ppm.o diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c new file mode 100644 index 000000000..0ed161642 --- /dev/null +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c @@ -0,0 +1,498 @@ +/* + * libcxgb_ppm.c: Chelsio common library for T3/T4/T5 iSCSI PagePod Manager + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#define DRV_NAME "libcxgb" +#define DRV_VERSION "1.0.0-ko" +#define pr_fmt(fmt) DRV_NAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/pci.h> +#include <linux/scatterlist.h> + +#include "libcxgb_ppm.h" + +/* Direct Data Placement - + * Directly place the iSCSI Data-In or Data-Out PDU's payload into + * pre-posted final destination host-memory buffers based on the + * Initiator Task Tag (ITT) in Data-In or Target Task Tag (TTT) + * in Data-Out PDUs. The host memory address is programmed into + * h/w in the format of pagepod entries. The location of the + * pagepod entry is encoded into ddp tag which is used as the base + * for ITT/TTT. + */ + +/* Direct-Data Placement page size adjustment + */ +int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + int i; + + for (i = 0; i < DDP_PGIDX_MAX; i++) { + if (pgsz == 1UL << (DDP_PGSZ_BASE_SHIFT + + tformat->pgsz_order[i])) { + pr_debug("%s: %s ppm, pgsz %lu -> idx %d.\n", + __func__, ppm->ndev->name, pgsz, i); + return i; + } + } + pr_info("ippm: ddp page size %lu not supported.\n", pgsz); + return DDP_PGIDX_MAX; +} + +/* DDP setup & teardown + */ +static int ppm_find_unused_entries(unsigned long *bmap, + unsigned int max_ppods, + unsigned int start, + unsigned int nr, + unsigned int align_mask) +{ + unsigned long i; + + i = bitmap_find_next_zero_area(bmap, max_ppods, start, nr, align_mask); + + if (unlikely(i >= max_ppods) && (start > nr)) + i = bitmap_find_next_zero_area(bmap, max_ppods, 0, start - 1, + align_mask); + if (unlikely(i >= max_ppods)) + return -ENOSPC; + + bitmap_set(bmap, i, nr); + return (int)i; +} + +static void ppm_mark_entries(struct cxgbi_ppm *ppm, int i, int count, + unsigned long caller_data) +{ + struct cxgbi_ppod_data *pdata = ppm->ppod_data + i; + + pdata->caller_data = caller_data; + pdata->npods = count; + + if (pdata->color == ((1 << PPOD_IDX_SHIFT) - 1)) + pdata->color = 0; + else + pdata->color++; +} + +static int ppm_get_cpu_entries(struct cxgbi_ppm *ppm, unsigned int count, + unsigned long caller_data) +{ + struct cxgbi_ppm_pool *pool; + unsigned int cpu; + int i; + + cpu = get_cpu(); + pool = per_cpu_ptr(ppm->pool, cpu); + spin_lock_bh(&pool->lock); + put_cpu(); + + i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max, + pool->next, count, 0); + if (i < 0) { + pool->next = 0; + spin_unlock_bh(&pool->lock); + return -ENOSPC; + } + + pool->next = i + count; + if (pool->next >= ppm->pool_index_max) + pool->next = 0; + + spin_unlock_bh(&pool->lock); + + pr_debug("%s: cpu %u, idx %d + %d (%d), next %u.\n", + __func__, cpu, i, count, i + cpu * ppm->pool_index_max, + pool->next); + + i += cpu * ppm->pool_index_max; + ppm_mark_entries(ppm, i, count, caller_data); + + return i; +} + +static int ppm_get_entries(struct cxgbi_ppm *ppm, unsigned int count, + unsigned long caller_data) +{ + int i; + + spin_lock_bh(&ppm->map_lock); + i = ppm_find_unused_entries(ppm->ppod_bmap, ppm->bmap_index_max, + ppm->next, count, 0); + if (i < 0) { + ppm->next = 0; + spin_unlock_bh(&ppm->map_lock); + pr_debug("ippm: NO suitable entries %u available.\n", + count); + return -ENOSPC; + } + + ppm->next = i + count; + if (ppm->next >= ppm->bmap_index_max) + ppm->next = 0; + + spin_unlock_bh(&ppm->map_lock); + + pr_debug("%s: idx %d + %d (%d), next %u, caller_data 0x%lx.\n", + __func__, i, count, i + ppm->pool_rsvd, ppm->next, + caller_data); + + i += ppm->pool_rsvd; + ppm_mark_entries(ppm, i, count, caller_data); + + return i; +} + +static void ppm_unmark_entries(struct cxgbi_ppm *ppm, int i, int count) +{ + pr_debug("%s: idx %d + %d.\n", __func__, i, count); + + if (i < ppm->pool_rsvd) { + unsigned int cpu; + struct cxgbi_ppm_pool *pool; + + cpu = i / ppm->pool_index_max; + i %= ppm->pool_index_max; + + pool = per_cpu_ptr(ppm->pool, cpu); + spin_lock_bh(&pool->lock); + bitmap_clear(pool->bmap, i, count); + + if (i < pool->next) + pool->next = i; + spin_unlock_bh(&pool->lock); + + pr_debug("%s: cpu %u, idx %d, next %u.\n", + __func__, cpu, i, pool->next); + } else { + spin_lock_bh(&ppm->map_lock); + + i -= ppm->pool_rsvd; + bitmap_clear(ppm->ppod_bmap, i, count); + + if (i < ppm->next) + ppm->next = i; + spin_unlock_bh(&ppm->map_lock); + + pr_debug("%s: idx %d, next %u.\n", __func__, i, ppm->next); + } +} + +void cxgbi_ppm_ppod_release(struct cxgbi_ppm *ppm, u32 idx) +{ + struct cxgbi_ppod_data *pdata; + + if (idx >= ppm->ppmax) { + pr_warn("ippm: idx too big %u > %u.\n", idx, ppm->ppmax); + return; + } + + pdata = ppm->ppod_data + idx; + if (!pdata->npods) { + pr_warn("ippm: idx %u, npods 0.\n", idx); + return; + } + + pr_debug("release idx %u, npods %u.\n", idx, pdata->npods); + ppm_unmark_entries(ppm, idx, pdata->npods); +} +EXPORT_SYMBOL(cxgbi_ppm_ppod_release); + +int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *ppm, unsigned short nr_pages, + u32 per_tag_pg_idx, u32 *ppod_idx, + u32 *ddp_tag, unsigned long caller_data) +{ + struct cxgbi_ppod_data *pdata; + unsigned int npods; + int idx = -1; + unsigned int hwidx; + u32 tag; + + npods = (nr_pages + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT; + if (!npods) { + pr_warn("%s: pages %u -> npods %u, full.\n", + __func__, nr_pages, npods); + return -EINVAL; + } + + /* grab from cpu pool first */ + idx = ppm_get_cpu_entries(ppm, npods, caller_data); + /* try the general pool */ + if (idx < 0) + idx = ppm_get_entries(ppm, npods, caller_data); + if (idx < 0) { + pr_debug("ippm: pages %u, nospc %u, nxt %u, 0x%lx.\n", + nr_pages, npods, ppm->next, caller_data); + return idx; + } + + pdata = ppm->ppod_data + idx; + hwidx = ppm->base_idx + idx; + + tag = cxgbi_ppm_make_ddp_tag(hwidx, pdata->color); + + if (per_tag_pg_idx) + tag |= (per_tag_pg_idx << 30) & 0xC0000000; + + *ppod_idx = idx; + *ddp_tag = tag; + + pr_debug("ippm: sg %u, tag 0x%x(%u,%u), data 0x%lx.\n", + nr_pages, tag, idx, npods, caller_data); + + return npods; +} +EXPORT_SYMBOL(cxgbi_ppm_ppods_reserve); + +void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag, + unsigned int tid, unsigned int offset, + unsigned int length, + struct cxgbi_pagepod_hdr *hdr) +{ + /* The ddp tag in pagepod should be with bit 31:30 set to 0. + * The ddp Tag on the wire should be with non-zero 31:30 to the peer + */ + tag &= 0x3FFFFFFF; + + hdr->vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid)); + + hdr->rsvd = 0; + hdr->pgsz_tag_clr = htonl(tag & ppm->tformat.idx_clr_mask); + hdr->max_offset = htonl(length); + hdr->page_offset = htonl(offset); + + pr_debug("ippm: tag 0x%x, tid 0x%x, xfer %u, off %u.\n", + tag, tid, length, offset); +} +EXPORT_SYMBOL(cxgbi_ppm_make_ppod_hdr); + +static void ppm_free(struct cxgbi_ppm *ppm) +{ + vfree(ppm); +} + +static void ppm_destroy(struct kref *kref) +{ + struct cxgbi_ppm *ppm = container_of(kref, + struct cxgbi_ppm, + refcnt); + pr_info("ippm: kref 0, destroy %s ppm 0x%p.\n", + ppm->ndev->name, ppm); + + *ppm->ppm_pp = NULL; + + free_percpu(ppm->pool); + ppm_free(ppm); +} + +int cxgbi_ppm_release(struct cxgbi_ppm *ppm) +{ + if (ppm) { + int rv; + + rv = kref_put(&ppm->refcnt, ppm_destroy); + return rv; + } + return 1; +} +EXPORT_SYMBOL(cxgbi_ppm_release); + +static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total, + unsigned int *pcpu_ppmax) +{ + struct cxgbi_ppm_pool *pools; + unsigned int ppmax = (*total) / num_possible_cpus(); + unsigned int max = (PCPU_MIN_UNIT_SIZE - sizeof(*pools)) << 3; + unsigned int bmap; + unsigned int alloc_sz; + unsigned int count = 0; + unsigned int cpu; + + /* make sure per cpu pool fits into PCPU_MIN_UNIT_SIZE */ + if (ppmax > max) + ppmax = max; + + /* pool size must be multiple of unsigned long */ + bmap = BITS_TO_LONGS(ppmax); + ppmax = (bmap * sizeof(unsigned long)) << 3; + + alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; + pools = __alloc_percpu(alloc_sz, __alignof__(struct cxgbi_ppm_pool)); + + if (!pools) + return NULL; + + for_each_possible_cpu(cpu) { + struct cxgbi_ppm_pool *ppool = per_cpu_ptr(pools, cpu); + + memset(ppool, 0, alloc_sz); + spin_lock_init(&ppool->lock); + count += ppmax; + } + + *total = count; + *pcpu_ppmax = ppmax; + + return pools; +} + +int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev, + struct pci_dev *pdev, void *lldev, + struct cxgbi_tag_format *tformat, + unsigned int ppmax, + unsigned int llimit, + unsigned int start, + unsigned int reserve_factor) +{ + struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*ppm_pp); + struct cxgbi_ppm_pool *pool = NULL; + unsigned int ppmax_pool = 0; + unsigned int pool_index_max = 0; + unsigned int alloc_sz; + unsigned int ppod_bmap_size; + + if (ppm) { + pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n", + ndev->name, ppm_pp, ppm, ppm->ppmax, ppmax); + kref_get(&ppm->refcnt); + return 1; + } + + if (reserve_factor) { + ppmax_pool = ppmax / reserve_factor; + pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); + + pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", + ndev->name, ppmax, ppmax_pool, pool_index_max); + } + + ppod_bmap_size = BITS_TO_LONGS(ppmax - ppmax_pool); + alloc_sz = sizeof(struct cxgbi_ppm) + + ppmax * (sizeof(struct cxgbi_ppod_data)) + + ppod_bmap_size * sizeof(unsigned long); + + ppm = vmalloc(alloc_sz); + if (!ppm) + goto release_ppm_pool; + + memset(ppm, 0, alloc_sz); + + ppm->ppod_bmap = (unsigned long *)(&ppm->ppod_data[ppmax]); + + if ((ppod_bmap_size >> 3) > (ppmax - ppmax_pool)) { + unsigned int start = ppmax - ppmax_pool; + unsigned int end = ppod_bmap_size >> 3; + + bitmap_set(ppm->ppod_bmap, ppmax, end - start); + pr_info("%s: %u - %u < %u * 8, mask extra bits %u, %u.\n", + __func__, ppmax, ppmax_pool, ppod_bmap_size, start, + end); + } + + spin_lock_init(&ppm->map_lock); + kref_init(&ppm->refcnt); + + memcpy(&ppm->tformat, tformat, sizeof(struct cxgbi_tag_format)); + + ppm->ppm_pp = ppm_pp; + ppm->ndev = ndev; + ppm->pdev = pdev; + ppm->lldev = lldev; + ppm->ppmax = ppmax; + ppm->next = 0; + ppm->llimit = llimit; + ppm->base_idx = start > llimit ? + (start - llimit + 1) >> PPOD_SIZE_SHIFT : 0; + ppm->bmap_index_max = ppmax - ppmax_pool; + + ppm->pool = pool; + ppm->pool_rsvd = ppmax_pool; + ppm->pool_index_max = pool_index_max; + + /* check one more time */ + if (*ppm_pp) { + ppm_free(ppm); + ppm = (struct cxgbi_ppm *)(*ppm_pp); + + pr_info("ippm: %s, ppm 0x%p,0x%p already initialized, %u/%u.\n", + ndev->name, ppm_pp, *ppm_pp, ppm->ppmax, ppmax); + + kref_get(&ppm->refcnt); + return 1; + } + *ppm_pp = ppm; + + ppm->tformat.pgsz_idx_dflt = cxgbi_ppm_find_page_index(ppm, PAGE_SIZE); + + pr_info("ippm %s: ppm 0x%p, 0x%p, base %u/%u, pg %lu,%u, rsvd %u,%u.\n", + ndev->name, ppm_pp, ppm, ppm->base_idx, ppm->ppmax, PAGE_SIZE, + ppm->tformat.pgsz_idx_dflt, ppm->pool_rsvd, + ppm->pool_index_max); + + return 0; + +release_ppm_pool: + free_percpu(pool); + return -ENOMEM; +} +EXPORT_SYMBOL(cxgbi_ppm_init); + +unsigned int cxgbi_tagmask_set(unsigned int ppmax) +{ + unsigned int bits = fls(ppmax); + + if (bits > PPOD_IDX_MAX_SIZE) + bits = PPOD_IDX_MAX_SIZE; + + pr_info("ippm: ppmax %u/0x%x -> bits %u, tagmask 0x%x.\n", + ppmax, ppmax, bits, 1 << (bits + PPOD_IDX_SHIFT)); + + return 1 << (bits + PPOD_IDX_SHIFT); +} +EXPORT_SYMBOL(cxgbi_tagmask_set); + +MODULE_AUTHOR("Chelsio Communications"); +MODULE_DESCRIPTION("Chelsio common library"); +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h new file mode 100644 index 000000000..e995a1a38 --- /dev/null +++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h @@ -0,0 +1,334 @@ +/* + * libcxgb_ppm.h: Chelsio common library for T3/T4/T5 iSCSI ddp operation + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Karen Xie (kxie@chelsio.com) + */ + +#ifndef __LIBCXGB_PPM_H__ +#define __LIBCXGB_PPM_H__ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/vmalloc.h> +#include <linux/bitmap.h> + +struct cxgbi_pagepod_hdr { + u32 vld_tid; + u32 pgsz_tag_clr; + u32 max_offset; + u32 page_offset; + u64 rsvd; +}; + +#define PPOD_PAGES_MAX 4 +struct cxgbi_pagepod { + struct cxgbi_pagepod_hdr hdr; + u64 addr[PPOD_PAGES_MAX + 1]; +}; + +/* ddp tag format + * for a 32-bit tag: + * bit # + * 31 ..... ..... 0 + * X Y...Y Z...Z, where + * ^ ^^^^^ ^^^^ + * | | |____ when ddp bit = 0: color bits + * | | + * | |____ when ddp bit = 0: idx into the ddp memory region + * | + * |____ ddp bit: 0 - ddp tag, 1 - non-ddp tag + * + * [page selector:2] [sw/free bits] [0] [idx] [color:6] + */ + +#define DDP_PGIDX_MAX 4 +#define DDP_PGSZ_BASE_SHIFT 12 /* base page 4K */ + +struct cxgbi_task_tag_info { + unsigned char flags; +#define CXGBI_PPOD_INFO_FLAG_VALID 0x1 +#define CXGBI_PPOD_INFO_FLAG_MAPPED 0x2 + unsigned char cid; + unsigned short pg_shift; + unsigned int npods; + unsigned int idx; + unsigned int tag; + struct cxgbi_pagepod_hdr hdr; + int nents; + int nr_pages; + struct scatterlist *sgl; +}; + +struct cxgbi_tag_format { + unsigned char pgsz_order[DDP_PGIDX_MAX]; + unsigned char pgsz_idx_dflt; + unsigned char free_bits:4; + unsigned char color_bits:4; + unsigned char idx_bits; + unsigned char rsvd_bits; + unsigned int no_ddp_mask; + unsigned int idx_mask; + unsigned int color_mask; + unsigned int idx_clr_mask; + unsigned int rsvd_mask; +}; + +struct cxgbi_ppod_data { + unsigned char pg_idx:2; + unsigned char color:6; + unsigned char chan_id; + unsigned short npods; + unsigned long caller_data; +}; + +/* per cpu ppm pool */ +struct cxgbi_ppm_pool { + unsigned int base; /* base index */ + unsigned int next; /* next possible free index */ + spinlock_t lock; /* ppm pool lock */ + unsigned long bmap[0]; +} ____cacheline_aligned_in_smp; + +struct cxgbi_ppm { + struct kref refcnt; + struct net_device *ndev; /* net_device, 1st port */ + struct pci_dev *pdev; + void *lldev; + void **ppm_pp; + struct cxgbi_tag_format tformat; + unsigned int ppmax; + unsigned int llimit; + unsigned int base_idx; + + unsigned int pool_rsvd; + unsigned int pool_index_max; + struct cxgbi_ppm_pool __percpu *pool; + /* map lock */ + spinlock_t map_lock; /* ppm map lock */ + unsigned int bmap_index_max; + unsigned int next; + unsigned long *ppod_bmap; + struct cxgbi_ppod_data ppod_data[0]; +}; + +#define DDP_THRESHOLD 512 + +#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */ + +#define IPPOD_SIZE sizeof(struct cxgbi_pagepod) /* 64 */ +#define PPOD_SIZE_SHIFT 6 + +/* page pods are allocated in groups of this size (must be power of 2) */ +#define PPOD_CLUSTER_SIZE 16U + +#define ULPMEM_DSGL_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ +#define ULPMEM_IDATA_MAX_NPPODS 3 /* (PPOD_SIZE * 3 + ulptx hdr) < 256B */ +#define PCIE_MEMWIN_MAX_NPPODS 16 /* 1024/PPOD_SIZE */ + +#define PPOD_COLOR_SHIFT 0 +#define PPOD_COLOR(x) ((x) << PPOD_COLOR_SHIFT) + +#define PPOD_IDX_SHIFT 6 +#define PPOD_IDX_MAX_SIZE 24 + +#define PPOD_TID_SHIFT 0 +#define PPOD_TID(x) ((x) << PPOD_TID_SHIFT) + +#define PPOD_TAG_SHIFT 6 +#define PPOD_TAG(x) ((x) << PPOD_TAG_SHIFT) + +#define PPOD_VALID_SHIFT 24 +#define PPOD_VALID(x) ((x) << PPOD_VALID_SHIFT) +#define PPOD_VALID_FLAG PPOD_VALID(1U) + +#define PPOD_PI_EXTRACT_CTL_SHIFT 31 +#define PPOD_PI_EXTRACT_CTL(x) ((x) << PPOD_PI_EXTRACT_CTL_SHIFT) +#define PPOD_PI_EXTRACT_CTL_FLAG V_PPOD_PI_EXTRACT_CTL(1U) + +#define PPOD_PI_TYPE_SHIFT 29 +#define PPOD_PI_TYPE_MASK 0x3 +#define PPOD_PI_TYPE(x) ((x) << PPOD_PI_TYPE_SHIFT) + +#define PPOD_PI_CHECK_CTL_SHIFT 27 +#define PPOD_PI_CHECK_CTL_MASK 0x3 +#define PPOD_PI_CHECK_CTL(x) ((x) << PPOD_PI_CHECK_CTL_SHIFT) + +#define PPOD_PI_REPORT_CTL_SHIFT 25 +#define PPOD_PI_REPORT_CTL_MASK 0x3 +#define PPOD_PI_REPORT_CTL(x) ((x) << PPOD_PI_REPORT_CTL_SHIFT) + +static inline int cxgbi_ppm_is_ddp_tag(struct cxgbi_ppm *ppm, u32 tag) +{ + return !(tag & ppm->tformat.no_ddp_mask); +} + +static inline int cxgbi_ppm_sw_tag_is_usable(struct cxgbi_ppm *ppm, + u32 tag) +{ + /* the sw tag must be using <= 31 bits */ + return !(tag & 0x80000000U); +} + +static inline int cxgbi_ppm_make_non_ddp_tag(struct cxgbi_ppm *ppm, + u32 sw_tag, + u32 *final_tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + + if (!cxgbi_ppm_sw_tag_is_usable(ppm, sw_tag)) { + pr_info("sw_tag 0x%x NOT usable.\n", sw_tag); + return -EINVAL; + } + + if (!sw_tag) { + *final_tag = tformat->no_ddp_mask; + } else { + unsigned int shift = tformat->idx_bits + tformat->color_bits; + u32 lower = sw_tag & tformat->idx_clr_mask; + u32 upper = (sw_tag >> shift) << (shift + 1); + + *final_tag = upper | tformat->no_ddp_mask | lower; + } + return 0; +} + +static inline u32 cxgbi_ppm_decode_non_ddp_tag(struct cxgbi_ppm *ppm, + u32 tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + unsigned int shift = tformat->idx_bits + tformat->color_bits; + u32 lower = tag & tformat->idx_clr_mask; + u32 upper = (tag >> tformat->rsvd_bits) << shift; + + return upper | lower; +} + +static inline u32 cxgbi_ppm_ddp_tag_get_idx(struct cxgbi_ppm *ppm, + u32 ddp_tag) +{ + u32 hw_idx = (ddp_tag >> PPOD_IDX_SHIFT) & + ppm->tformat.idx_mask; + + return hw_idx - ppm->base_idx; +} + +static inline u32 cxgbi_ppm_make_ddp_tag(unsigned int hw_idx, + unsigned char color) +{ + return (hw_idx << PPOD_IDX_SHIFT) | ((u32)color); +} + +static inline unsigned long +cxgbi_ppm_get_tag_caller_data(struct cxgbi_ppm *ppm, + u32 ddp_tag) +{ + u32 idx = cxgbi_ppm_ddp_tag_get_idx(ppm, ddp_tag); + + return ppm->ppod_data[idx].caller_data; +} + +/* sw bits are the free bits */ +static inline int cxgbi_ppm_ddp_tag_update_sw_bits(struct cxgbi_ppm *ppm, + u32 val, u32 orig_tag, + u32 *final_tag) +{ + struct cxgbi_tag_format *tformat = &ppm->tformat; + u32 v = val >> tformat->free_bits; + + if (v) { + pr_info("sw_bits 0x%x too large, avail bits %u.\n", + val, tformat->free_bits); + return -EINVAL; + } + if (!cxgbi_ppm_is_ddp_tag(ppm, orig_tag)) + return -EINVAL; + + *final_tag = (val << tformat->rsvd_bits) | + (orig_tag & ppm->tformat.rsvd_mask); + return 0; +} + +static inline void cxgbi_ppm_ppod_clear(struct cxgbi_pagepod *ppod) +{ + ppod->hdr.vld_tid = 0U; +} + +static inline void cxgbi_tagmask_check(unsigned int tagmask, + struct cxgbi_tag_format *tformat) +{ + unsigned int bits = fls(tagmask); + + /* reserve top most 2 bits for page selector */ + tformat->free_bits = 32 - 2 - bits; + tformat->rsvd_bits = bits; + tformat->color_bits = PPOD_IDX_SHIFT; + tformat->idx_bits = bits - 1 - PPOD_IDX_SHIFT; + tformat->no_ddp_mask = 1 << (bits - 1); + tformat->idx_mask = (1 << tformat->idx_bits) - 1; + tformat->color_mask = (1 << PPOD_IDX_SHIFT) - 1; + tformat->idx_clr_mask = (1 << (bits - 1)) - 1; + tformat->rsvd_mask = (1 << bits) - 1; + + pr_info("ippm: tagmask 0x%x, rsvd %u=%u+%u+1, mask 0x%x,0x%x, " + "pg %u,%u,%u,%u.\n", + tagmask, tformat->rsvd_bits, tformat->idx_bits, + tformat->color_bits, tformat->no_ddp_mask, tformat->rsvd_mask, + tformat->pgsz_order[0], tformat->pgsz_order[1], + tformat->pgsz_order[2], tformat->pgsz_order[3]); +} + +int cxgbi_ppm_find_page_index(struct cxgbi_ppm *ppm, unsigned long pgsz); +void cxgbi_ppm_make_ppod_hdr(struct cxgbi_ppm *ppm, u32 tag, + unsigned int tid, unsigned int offset, + unsigned int length, + struct cxgbi_pagepod_hdr *hdr); +void cxgbi_ppm_ppod_release(struct cxgbi_ppm *, u32 idx); +int cxgbi_ppm_ppods_reserve(struct cxgbi_ppm *, unsigned short nr_pages, + u32 per_tag_pg_idx, u32 *ppod_idx, u32 *ddp_tag, + unsigned long caller_data); +int cxgbi_ppm_init(void **ppm_pp, struct net_device *, struct pci_dev *, + void *lldev, struct cxgbi_tag_format *, + unsigned int ppmax, unsigned int llimit, + unsigned int start, + unsigned int reserve_factor); +int cxgbi_ppm_release(struct cxgbi_ppm *ppm); +void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *); +unsigned int cxgbi_tagmask_set(unsigned int ppmax); + +#endif /*__LIBCXGB_PPM_H__*/ diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 60383040d..c363b5855 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -53,6 +53,8 @@ #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/types.h> @@ -1895,9 +1897,17 @@ static int cs89x0_platform_remove(struct platform_device *pdev) return 0; } +static const struct __maybe_unused of_device_id cs89x0_match[] = { + { .compatible = "cirrus,cs8900", }, + { .compatible = "cirrus,cs8920", }, + { }, +}; +MODULE_DEVICE_TABLE(of, cs89x0_match); + static struct platform_driver cs89x0_driver = { .driver = { - .name = DRV_NAME, + .name = DRV_NAME, + .of_match_table = of_match_ptr(cs89x0_match), }, .remove = cs89x0_platform_remove, }; diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f44a39c40..fd3980cc1 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -103,25 +103,29 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer) } } -static int enic_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +static int enic_get_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) { struct enic *enic = netdev_priv(netdev); + struct ethtool_link_settings *base = &ecmd->base; - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->transceiver = XCVR_EXTERNAL; + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE); + base->port = PORT_FIBRE; if (netif_carrier_ok(netdev)) { - ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); - ecmd->duplex = DUPLEX_FULL; + base->speed = vnic_dev_port_speed(enic->vdev); + base->duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); - ecmd->duplex = DUPLEX_UNKNOWN; + base->speed = SPEED_UNKNOWN; + base->duplex = DUPLEX_UNKNOWN; } - ecmd->autoneg = AUTONEG_DISABLE; + base->autoneg = AUTONEG_DISABLE; return 0; } @@ -500,7 +504,6 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir, } static const struct ethtool_ops enic_ethtool_ops = { - .get_settings = enic_get_settings, .get_drvinfo = enic_get_drvinfo, .get_msglevel = enic_get_msglevel, .set_msglevel = enic_set_msglevel, @@ -516,6 +519,7 @@ static const struct ethtool_ops enic_ethtool_ops = { .get_rxfh_key_size = enic_get_rxfh_key_size, .get_rxfh = enic_get_rxfh, .set_rxfh = enic_set_rxfh, + .get_link_ksettings = enic_get_ksettings, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index f15560a06..48f82ab6c 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1566,7 +1566,7 @@ static int enic_request_intr(struct enic *enic) intr = enic_msix_rq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), - "%.11s-rx-%d", netdev->name, i); + "%.11s-rx-%u", netdev->name, i); enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[i]; } @@ -1577,7 +1577,7 @@ static int enic_request_intr(struct enic *enic) intr = enic_msix_wq_intr(enic, i); snprintf(enic->msix[intr].devname, sizeof(enic->msix[intr].devname), - "%.11s-tx-%d", netdev->name, i); + "%.11s-tx-%u", netdev->name, i); enic->msix[intr].isr = enic_isr_msix; enic->msix[intr].devid = &enic->napi[wq]; } diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 1471e16ba..f45385f5c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1299,6 +1299,7 @@ static int dm9000_open(struct net_device *dev) { struct board_info *db = netdev_priv(dev); + unsigned int irq_flags = irq_get_trigger_type(dev->irq); if (netif_msg_ifup(db)) dev_dbg(db->dev, "enabling %s\n", dev->name); @@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev) /* If there is no IRQ type specified, tell the user that this is a * problem */ - if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE) + if (irq_flags == IRQF_TRIGGER_NONE) dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); + irq_flags |= IRQF_SHARED; + /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ mdelay(1); /* delay needs by DM9000B */ @@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev) /* Initialize DM9000 board */ dm9000_init_dm9000(dev); - if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED, - dev->name, dev)) + if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev)) return -EAGAIN; /* Now that we have an interrupt handler hooked up we can unmask * our interrupts diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index cbe84972f..f0e9e2ef6 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1319,7 +1319,7 @@ de4x5_open(struct net_device *dev) if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, lp->adapter_name, dev)) { - printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); + printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq); if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, lp->adapter_name, dev)) { printk("\n Cannot get IRQ- reconfigure your hardware.\n"); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index b69a9eacc..c3b64cdd0 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, static void dnet_handle_link_change(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; u32 mode_reg, ctl_reg; @@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev) bp->link = 0; bp->speed = 0; bp->duplex = -1; - bp->phy_dev = phydev; return 0; } @@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev) struct dnet *bp = netdev_priv(dev); /* if the phy is not yet register, retry later */ - if (!bp->phy_dev) + if (!dev->phydev) return -EAGAIN; napi_enable(&bp->napi); dnet_init_hw(bp); - phy_start_aneg(bp->phy_dev); + phy_start_aneg(dev->phydev); /* schedule a link state check */ - phy_start(bp->phy_dev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev) netif_stop_queue(dev); napi_disable(&bp->napi); - if (bp->phy_dev) - phy_stop(bp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); dnet_reset_hw(bp); netif_carrier_off(dev); @@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev) return nstat; } -static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct dnet *bp = netdev_priv(dev); - struct phy_device *phydev = bp->phy_dev; + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops dnet_ethtool_ops = { - .get_settings = dnet_get_settings, - .set_settings = dnet_set_settings, .get_drvinfo = dnet_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops dnet_netdev_ops = { @@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev) (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); - phydev = bp->phy_dev; + phydev = dev->phydev; phy_attached_info(phydev); return 0; @@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev) if (dev) { bp = netdev_priv(dev); - if (bp->phy_dev) - phy_disconnect(bp->phy_dev); + if (dev->phydev) + phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); mdiobus_free(bp->mii_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h index 37f5b30fa..d985080bb 100644 --- a/drivers/net/ethernet/dnet.h +++ b/drivers/net/ethernet/dnet.h @@ -216,7 +216,6 @@ struct dnet { /* PHY stuff */ struct mii_bus *mii_bus; - struct phy_device *phy_dev; unsigned int link; unsigned int speed; unsigned int duplex; diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig index 710856326..b4853ec9d 100644 --- a/drivers/net/ethernet/emulex/benet/Kconfig +++ b/drivers/net/ethernet/emulex/benet/Kconfig @@ -13,11 +13,3 @@ config BE2NET_HWMON ---help--- Say Y here if you want to expose thermal sensor data on be2net network adapter. - -config BE2NET_VXLAN - bool "VXLAN offload support on be2net driver" - default y - depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m) - ---help--- - Say Y here if you want to enable VXLAN offload support on - be2net driver. diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index fe3763df3..4555e041e 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -97,7 +97,8 @@ * SURF/DPDK */ -#define MAX_RSS_IFACES 15 +#define MAX_PORT_RSS_TABLES 15 +#define MAX_NIC_FUNCS 16 #define MAX_RX_QS 32 #define MAX_EVT_QS 32 #define MAX_TX_QS 32 @@ -442,8 +443,20 @@ struct be_resources { u16 max_iface_count; u16 max_mcc_count; u16 max_evt_qs; + u16 max_nic_evt_qs; /* NIC's share of evt qs */ u32 if_cap_flags; u32 vf_if_cap_flags; /* VF if capability flags */ + u32 flags; + /* Calculated PF Pool's share of RSS Tables. This is not enforced by + * the FW, but is a self-imposed driver limitation. + */ + u16 max_rss_tables; +}; + +/* These are port-wide values */ +struct be_port_resources { + u16 max_vfs; + u16 nic_pfs; }; #define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC) @@ -513,7 +526,8 @@ struct be_adapter { spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; - u16 cfg_num_qs; /* configured via set-channels */ + u16 cfg_num_rx_irqs; /* configured via set-channels */ + u16 cfg_num_tx_irqs; /* configured via set-channels */ u16 num_evt_qs; u16 num_msix_vec; struct be_eq_obj eq_obj[MAX_EVT_QS]; @@ -632,16 +646,42 @@ struct be_adapter { #define be_max_txqs(adapter) (adapter->res.max_tx_qs) #define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) #define be_max_rxqs(adapter) (adapter->res.max_rx_qs) -#define be_max_eqs(adapter) (adapter->res.max_evt_qs) +/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */ +#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs) +/* Max number of EQs available avaialble only for NIC */ +#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs) #define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) - -static inline u16 be_max_qs(struct be_adapter *adapter) +#define be_max_pf_pool_rss_tables(adapter) \ + (adapter->pool_res.max_rss_tables) +/* Max irqs avaialble for NIC */ +#define be_max_irqs(adapter) \ + (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus())) + +/* Max irqs *needed* for RX queues */ +static inline u16 be_max_rx_irqs(struct be_adapter *adapter) { - /* If no RSS, need atleast the one def RXQ */ + /* If no RSS, need atleast one irq for def-RXQ */ u16 num = max_t(u16, be_max_rss(adapter), 1); - num = min(num, be_max_eqs(adapter)); - return min_t(u16, num, num_online_cpus()); + return min_t(u16, num, be_max_irqs(adapter)); +} + +/* Max irqs *needed* for TX queues */ +static inline u16 be_max_tx_irqs(struct be_adapter *adapter) +{ + return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter)); +} + +/* Max irqs *needed* for combined queues */ +static inline u16 be_max_qp_irqs(struct be_adapter *adapter) +{ + return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter)); +} + +/* Max irqs *needed* for RX and TX queues together */ +static inline u16 be_max_any_irqs(struct be_adapter *adapter) +{ + return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter)); } /* Is BE in pvid_tagging mode */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 22402db27..2cc117568 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = { CMD_SUBSYSTEM_LOWLEVEL, BE_PRIV_DEVCFG | BE_PRIV_DEVSEC }, + { + OPCODE_COMMON_SET_HSW_CONFIG, + CMD_SUBSYSTEM_COMMON, + BE_PRIV_DEVCFG | BE_PRIV_VHADM + }, }; static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) @@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, void *ctxt; int status; + if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG, + CMD_SUBSYSTEM_COMMON)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); } - if (!BEx_chip(adapter) && hsw_mode) { + if (hsw_mode) { AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, adapter->hba_port_num); AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); @@ -4023,7 +4032,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; adapter->wol_cap = resp->wol_settings; - if (adapter->wol_cap & BE_WOL_CAP) + + /* Non-zero macaddr indicates WOL is enabled */ + if (adapter->wol_cap & BE_WOL_CAP && + !is_zero_ether_addr(resp->magic_mac)) adapter->wol_en = true; } err: @@ -4360,9 +4372,35 @@ err: return status; } +/* This routine returns a list of all the NIC PF_nums in the adapter */ +u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums) +{ + struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; + struct be_pcie_res_desc *pcie = NULL; + int i; + u16 nic_pf_count = 0; + + for (i = 0; i < desc_count; i++) { + if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || + hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) { + pcie = (struct be_pcie_res_desc *)hdr; + if (pcie->pf_state && (pcie->pf_type == MISSION_NIC || + pcie->pf_type == MISSION_RDMA)) { + nic_pf_nums[nic_pf_count++] = pcie->pf_num; + } + } + + hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; + hdr = (void *)hdr + hdr->desc_len; + } + return nic_pf_count; +} + /* Will use MBOX only if MCCQ has not been created */ int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 query, u8 domain) + struct be_resources *res, + struct be_port_resources *port_res, + u8 profile_type, u8 query, u8 domain) { struct be_cmd_resp_get_profile_config *resp; struct be_cmd_req_get_profile_config *req; @@ -4389,7 +4427,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, if (!lancer_chip(adapter)) req->hdr.version = 1; - req->type = ACTIVE_PROFILE_TYPE; + req->type = profile_type; req->hdr.domain = domain; /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the @@ -4406,6 +4444,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, resp = cmd.va; desc_count = le16_to_cpu(resp->desc_count); + if (port_res) { + u16 nic_pf_cnt = 0, i; + u16 nic_pf_num_list[MAX_NIC_FUNCS]; + + nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param, + desc_count, + nic_pf_num_list); + + for (i = 0; i < nic_pf_cnt; i++) { + nic = be_get_func_nic_desc(resp->func_param, desc_count, + nic_pf_num_list[i]); + if (nic->link_param == adapter->port_num) { + port_res->nic_pfs++; + pcie = be_get_pcie_desc(resp->func_param, + desc_count, + nic_pf_num_list[i]); + port_res->max_vfs += le16_to_cpu(pcie->num_vfs); + } + } + return status; + } + pcie = be_get_pcie_desc(resp->func_param, desc_count, adapter->pf_num); if (pcie) @@ -4465,7 +4525,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, } /* Mark all fields invalid */ -static void be_reset_nic_desc(struct be_nic_res_desc *nic) +void be_reset_nic_desc(struct be_nic_res_desc *nic) { memset(nic, 0, sizeof(*nic)); nic->unicast_mac_count = 0xFFFF; @@ -4534,73 +4594,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 1, version, domain); } -static void be_fill_vf_res_template(struct be_adapter *adapter, - struct be_resources pool_res, - u16 num_vfs, u16 num_vf_qs, - struct be_nic_res_desc *nic_vft) -{ - u32 vf_if_cap_flags = pool_res.vf_if_cap_flags; - struct be_resources res_mod = {0}; - - /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, - * which are modifiable using SET_PROFILE_CONFIG cmd. - */ - be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0); - - /* If RSS IFACE capability flags are modifiable for a VF, set the - * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if - * more than 1 RSSQ is available for a VF. - * Otherwise, provision only 1 queue pair for VF. - */ - if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { - nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); - if (num_vf_qs > 1) { - vf_if_cap_flags |= BE_IF_FLAGS_RSS; - if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) - vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; - } else { - vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | - BE_IF_FLAGS_DEFQ_RSS); - } - } else { - num_vf_qs = 1; - } - - if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { - nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); - vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; - } - - nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags); - nic_vft->rq_count = cpu_to_le16(num_vf_qs); - nic_vft->txq_count = cpu_to_le16(num_vf_qs); - nic_vft->rssq_count = cpu_to_le16(num_vf_qs); - nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count / - (num_vfs + 1)); - - /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally - * among the PF and it's VFs, if the fields are changeable - */ - if (res_mod.max_uc_mac == FIELD_MODIFIABLE) - nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac / - (num_vfs + 1)); - - if (res_mod.max_vlans == FIELD_MODIFIABLE) - nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans / - (num_vfs + 1)); - - if (res_mod.max_iface_count == FIELD_MODIFIABLE) - nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count / - (num_vfs + 1)); - - if (res_mod.max_mcc_count == FIELD_MODIFIABLE) - nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count / - (num_vfs + 1)); -} - int be_cmd_set_sriov_config(struct be_adapter *adapter, struct be_resources pool_res, u16 num_vfs, - u16 num_vf_qs) + struct be_resources *vft_res) { struct { struct be_pcie_res_desc pcie; @@ -4620,12 +4616,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter, be_reset_nic_desc(&desc.nic_vft); desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; - desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); + desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) | + BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); desc.nic_vft.pf_num = adapter->pdev->devfn; desc.nic_vft.vf_num = 0; - - be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs, - &desc.nic_vft); + desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags); + desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs); + desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs); + desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs); + desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count); + + if (vft_res->max_uc_mac) + desc.nic_vft.unicast_mac_count = + cpu_to_le16(vft_res->max_uc_mac); + if (vft_res->max_vlans) + desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans); + if (vft_res->max_iface_count) + desc.nic_vft.iface_count = + cpu_to_le16(vft_res->max_iface_count); + if (vft_res->max_mcc_count) + desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count); return be_cmd_set_profile_config(adapter, &desc, 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index d8540ae95..0d6be224a 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 { u8 rsvd0[2]; u8 wol_settings; u8 rsvd1[5]; - u32 rsvd2[295]; + u32 rsvd2[288]; + u8 magic_mac[6]; + u8 rsvd3[22]; } __packed; #define BE_GET_WOL_CAP 2 @@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps { #define IMM_SHIFT 6 /* Immediate */ #define NOSV_SHIFT 7 /* No save */ +#define MISSION_NIC 1 +#define MISSION_RDMA 8 + struct be_res_desc_hdr { u8 desc_type; u8 desc_len; @@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config { struct be_cmd_req_hdr hdr; u8 rsvd; #define ACTIVE_PROFILE_TYPE 0x2 +#define SAVED_PROFILE_TYPE 0x0 #define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3) u8 type; u16 rsvd1; @@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter); int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res); int be_cmd_get_profile_config(struct be_adapter *adapter, - struct be_resources *res, u8 query, u8 domain); + struct be_resources *res, + struct be_port_resources *port_res, + u8 profile_type, u8 query, u8 domain); int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile); int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, int vf_num); @@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port); int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op); int be_cmd_set_sriov_config(struct be_adapter *adapter, struct be_resources res, u16 num_vfs, - u16 num_vf_qs); + struct be_resources *vft_res); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 2ff691636..50e7be5da 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); + struct device *dev = &adapter->pdev->dev; + struct be_dma_mem cmd; + u8 mac[ETH_ALEN]; + bool enable; + int status; if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; @@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; } - if (wol->wolopts & WAKE_MAGIC) - adapter->wol_en = true; - else - adapter->wol_en = false; + cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); + cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); + if (!cmd.va) + return -ENOMEM; - return 0; + eth_zero_addr(mac); + + enable = wol->wolopts & WAKE_MAGIC; + if (enable) + ether_addr_copy(mac, adapter->netdev->dev_addr); + + status = be_cmd_enable_magic_wol(adapter, mac, &cmd); + if (status) { + dev_err(dev, "Could not set Wake-on-lan mac address\n"); + status = be_cmd_status(status); + goto err; + } + + pci_enable_wake(adapter->pdev, PCI_D3hot, enable); + pci_enable_wake(adapter->pdev, PCI_D3cold, enable); + + adapter->wol_en = enable ? true : false; + +err: + dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); + return status; } static int be_test_ddr_dma(struct be_adapter *adapter) @@ -1171,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct be_adapter *adapter = netdev_priv(netdev); + u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1); - ch->combined_count = adapter->num_evt_qs; - ch->max_combined = be_max_qs(adapter); + /* num_tx_qs is always same as the number of irqs used for TX */ + ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs); + ch->rx_count = num_rx_irqs - ch->combined_count; + ch->tx_count = adapter->num_tx_qs - ch->combined_count; + + ch->max_combined = be_max_qp_irqs(adapter); + /* The user must create atleast one combined channel */ + ch->max_rx = be_max_rx_irqs(adapter) - 1; + ch->max_tx = be_max_tx_irqs(adapter) - 1; } static int be_set_channels(struct net_device *netdev, @@ -1182,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev, struct be_adapter *adapter = netdev_priv(netdev); int status; - if (ch->rx_count || ch->tx_count || ch->other_count || - !ch->combined_count || ch->combined_count > be_max_qs(adapter)) + /* we support either only combined channels or a combination of + * combined and either RX-only or TX-only channels. + */ + if (ch->other_count || !ch->combined_count || + (ch->rx_count && ch->tx_count)) + return -EINVAL; + + if (ch->combined_count > be_max_qp_irqs(adapter) || + (ch->rx_count && + (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) || + (ch->tx_count && + (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter))) return -EINVAL; - adapter->cfg_num_qs = ch->combined_count; + adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count; + adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count; status = be_update_queues(adapter); return be_cmd_status(status); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index ed98ef1ec..874c7539a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter) struct be_aic_obj *aic; int i, rc; + /* need enough EQs to service both RX and TX queues */ adapter->num_evt_qs = min_t(u16, num_irqs(adapter), - adapter->cfg_num_qs); + max(adapter->cfg_num_rx_irqs, + adapter->cfg_num_tx_irqs)); for_all_evt_queues(adapter, eqo, i) { int numa_node = dev_to_node(&adapter->pdev->dev); @@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter) struct be_eq_obj *eqo; int status, i; - adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter)); + adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs); for_all_tx_queues(adapter, txo, i) { cq = &txo->cq; @@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter) struct be_rx_obj *rxo; int rc, i; - /* We can create as many RSS rings as there are EQs. */ - adapter->num_rss_qs = adapter->num_evt_qs; + adapter->num_rss_qs = + min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs); /* We'll use RSS only if atleast 2 RSS rings are supported. */ - if (adapter->num_rss_qs <= 1) + if (adapter->num_rss_qs < 2) adapter->num_rss_qs = 0; adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq; @@ -3249,18 +3251,23 @@ static void be_msix_disable(struct be_adapter *adapter) static int be_msix_enable(struct be_adapter *adapter) { - int i, num_vec; + unsigned int i, max_roce_eqs; struct device *dev = &adapter->pdev->dev; + int num_vec; - /* If RoCE is supported, program the max number of NIC vectors that - * may be configured via set-channels, along with vectors needed for - * RoCe. Else, just program the number we'll use initially. + /* If RoCE is supported, program the max number of vectors that + * could be used for NIC and RoCE, else, just program the number + * we'll use initially. */ - if (be_roce_supported(adapter)) - num_vec = min_t(int, 2 * be_max_eqs(adapter), - 2 * num_online_cpus()); - else - num_vec = adapter->cfg_num_qs; + if (be_roce_supported(adapter)) { + max_roce_eqs = + be_max_func_eqs(adapter) - be_max_nic_eqs(adapter); + max_roce_eqs = min(max_roce_eqs, num_online_cpus()); + num_vec = be_max_any_irqs(adapter) + max_roce_eqs; + } else { + num_vec = max(adapter->cfg_num_rx_irqs, + adapter->cfg_num_tx_irqs); + } for (i = 0; i < num_vec; i++) adapter->msix_entries[i].entry = i; @@ -3625,10 +3632,8 @@ static int be_open(struct net_device *netdev) be_link_status_update(adapter, link_status); netif_tx_start_all_queues(netdev); -#ifdef CONFIG_BE2NET_VXLAN if (skyhawk_chip(adapter)) - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); return 0; err: @@ -3636,40 +3641,6 @@ err: return -EIO; } -static int be_setup_wol(struct be_adapter *adapter, bool enable) -{ - struct device *dev = &adapter->pdev->dev; - struct be_dma_mem cmd; - u8 mac[ETH_ALEN]; - int status; - - eth_zero_addr(mac); - - cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); - cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL); - if (!cmd.va) - return -ENOMEM; - - if (enable) { - status = pci_write_config_dword(adapter->pdev, - PCICFG_PM_CONTROL_OFFSET, - PCICFG_PM_CONTROL_MASK); - if (status) { - dev_err(dev, "Could not enable Wake-on-lan\n"); - goto err; - } - } else { - ether_addr_copy(mac, adapter->netdev->dev_addr); - } - - status = be_cmd_enable_magic_wol(adapter, mac, &cmd); - pci_enable_wake(adapter->pdev, PCI_D3hot, enable); - pci_enable_wake(adapter->pdev, PCI_D3cold, enable); -err: - dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma); - return status; -} - static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) { u32 addr; @@ -3759,6 +3730,11 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } + + if (BE3_chip(adapter)) + be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + PORT_FWD_TYPE_PASSTHRU, 0); done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; @@ -3789,7 +3765,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter) } } -#ifdef CONFIG_BE2NET_VXLAN static void be_disable_vxlan_offloads(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3808,37 +3783,87 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter) netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL); netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL); } -#endif -static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) +static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs, + struct be_resources *vft_res) { struct be_resources res = adapter->pool_res; + u32 vf_if_cap_flags = res.vf_if_cap_flags; + struct be_resources res_mod = {0}; u16 num_vf_qs = 1; - /* Distribute the queue resources among the PF and it's VFs - * Do not distribute queue resources in multi-channel configuration. - */ - if (num_vfs && !be_is_mc(adapter)) { - /* Divide the qpairs evenly among the VFs and the PF, capped - * at VF-EQ-count. Any remainder qpairs belong to the PF. - */ + /* Distribute the queue resources among the PF and it's VFs */ + if (num_vfs) { + /* Divide the rx queues evenly among the VFs and the PF, capped + * at VF-EQ-count. Any remainder queues belong to the PF. + */ num_vf_qs = min(SH_VF_MAX_NIC_EQS, res.max_rss_qs / (num_vfs + 1)); - /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable - * interfaces per port. Provide RSS on VFs, only if number - * of VFs requested is less than MAX_RSS_IFACES limit. + /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES + * RSS Tables per port. Provide RSS on VFs, only if number of + * VFs requested is less than it's PF Pool's RSS Tables limit. */ - if (num_vfs >= MAX_RSS_IFACES) + if (num_vfs >= be_max_pf_pool_rss_tables(adapter)) num_vf_qs = 1; } - return num_vf_qs; + + /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd, + * which are modifiable using SET_PROFILE_CONFIG cmd. + */ + be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE, + RESOURCE_MODIFIABLE, 0); + + /* If RSS IFACE capability flags are modifiable for a VF, set the + * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if + * more than 1 RSSQ is available for a VF. + * Otherwise, provision only 1 queue pair for VF. + */ + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) { + vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + if (num_vf_qs > 1) { + vf_if_cap_flags |= BE_IF_FLAGS_RSS; + if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS) + vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS; + } else { + vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS | + BE_IF_FLAGS_DEFQ_RSS); + } + } else { + num_vf_qs = 1; + } + + if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) { + vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT); + vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS; + } + + vft_res->vf_if_cap_flags = vf_if_cap_flags; + vft_res->max_rx_qs = num_vf_qs; + vft_res->max_rss_qs = num_vf_qs; + vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1); + vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1); + + /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally + * among the PF and it's VFs, if the fields are changeable + */ + if (res_mod.max_uc_mac == FIELD_MODIFIABLE) + vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1); + + if (res_mod.max_vlans == FIELD_MODIFIABLE) + vft_res->max_vlans = res.max_vlans / (num_vfs + 1); + + if (res_mod.max_iface_count == FIELD_MODIFIABLE) + vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1); + + if (res_mod.max_mcc_count == FIELD_MODIFIABLE) + vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1); } static int be_clear(struct be_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; - u16 num_vf_qs; + struct be_resources vft_res = {0}; be_cancel_worker(adapter); @@ -3850,16 +3875,15 @@ static int be_clear(struct be_adapter *adapter) */ if (skyhawk_chip(adapter) && be_physfn(adapter) && !pci_vfs_assigned(pdev)) { - num_vf_qs = be_calculate_vf_qs(adapter, - pci_sriov_get_totalvfs(pdev)); + be_calculate_vf_res(adapter, + pci_sriov_get_totalvfs(pdev), + &vft_res); be_cmd_set_sriov_config(adapter, adapter->pool_res, pci_sriov_get_totalvfs(pdev), - num_vf_qs); + &vft_res); } -#ifdef CONFIG_BE2NET_VXLAN be_disable_vxlan_offloads(adapter); -#endif kfree(adapter->pmac_id); adapter->pmac_id = NULL; @@ -3884,7 +3908,8 @@ static int be_vfs_if_create(struct be_adapter *adapter) for_all_vfs(adapter, vf_cfg, vf) { if (!BE3_chip(adapter)) { - status = be_cmd_get_profile_config(adapter, &res, + status = be_cmd_get_profile_config(adapter, &res, NULL, + ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS, vf + 1); if (!status) { @@ -4000,6 +4025,15 @@ static int be_vf_setup(struct be_adapter *adapter) } } + if (BE3_chip(adapter)) { + /* On BE3, enable VEB only when SRIOV is enabled */ + status = be_cmd_set_hsw_config(adapter, 0, 0, + adapter->if_handle, + PORT_FWD_TYPE_VEB, 0); + if (status) + goto err; + } + adapter->flags |= BE_FLAGS_SRIOV_ENABLED; return 0; err: @@ -4069,8 +4103,9 @@ static void BEx_get_resources(struct be_adapter *adapter, /* On a SuperNIC profile, the driver needs to use the * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits */ - be_cmd_get_profile_config(adapter, &super_nic_res, - RESOURCE_LIMITS, 0); + be_cmd_get_profile_config(adapter, &super_nic_res, NULL, + ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS, + 0); /* Some old versions of BE3 FW don't report max_tx_qs value */ res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS; } else { @@ -4109,12 +4144,38 @@ static void be_setup_init(struct be_adapter *adapter) adapter->cmd_privileges = MIN_PRIVILEGES; } +/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port. + * However, this HW limitation is not exposed to the host via any SLI cmd. + * As a result, in the case of SRIOV and in particular multi-partition configs + * the driver needs to calcuate a proportional share of RSS Tables per PF-pool + * for distribution between the VFs. This self-imposed limit will determine the + * no: of VFs for which RSS can be enabled. + */ +void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter) +{ + struct be_port_resources port_res = {0}; + u8 rss_tables_on_port; + u16 max_vfs = be_max_vfs(adapter); + + be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE, + RESOURCE_LIMITS, 0); + + rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs; + + /* Each PF Pool's RSS Tables limit = + * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port + */ + adapter->pool_res.max_rss_tables = + max_vfs * rss_tables_on_port / port_res.max_vfs; +} + static int be_get_sriov_config(struct be_adapter *adapter) { struct be_resources res = {0}; int max_vfs, old_vfs; - be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0); + be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE, + RESOURCE_LIMITS, 0); /* Some old versions of BE3 FW don't report max_vfs value */ if (BE3_chip(adapter) && !res.max_vfs) { @@ -4138,13 +4199,19 @@ static int be_get_sriov_config(struct be_adapter *adapter) adapter->num_vfs = old_vfs; } + if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { + be_calculate_pf_pool_rss_tables(adapter); + dev_info(&adapter->pdev->dev, + "RSS can be enabled for all VFs if num_vfs <= %d\n", + be_max_pf_pool_rss_tables(adapter)); + } return 0; } static void be_alloc_sriov_res(struct be_adapter *adapter) { int old_vfs = pci_num_vf(adapter->pdev); - u16 num_vf_qs; + struct be_resources vft_res = {0}; int status; be_get_sriov_config(adapter); @@ -4158,9 +4225,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter) * Also, this is done by FW in Lancer chip. */ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) { - num_vf_qs = be_calculate_vf_qs(adapter, 0); + be_calculate_vf_res(adapter, 0, &vft_res); status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0, - num_vf_qs); + &vft_res); if (status) dev_err(&adapter->pdev->dev, "Failed to optimize SRIOV resources\n"); @@ -4173,16 +4240,13 @@ static int be_get_resources(struct be_adapter *adapter) struct be_resources res = {0}; int status; - if (BEx_chip(adapter)) { - BEx_get_resources(adapter, &res); - adapter->res = res; - } - /* For Lancer, SH etc read per-function resource limits from FW. * GET_FUNC_CONFIG returns per function guaranteed limits. * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits */ - if (!BEx_chip(adapter)) { + if (BEx_chip(adapter)) { + BEx_get_resources(adapter, &res); + } else { status = be_cmd_get_func_config(adapter, &res); if (status) return status; @@ -4191,13 +4255,13 @@ static int be_get_resources(struct be_adapter *adapter) if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs && !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)) res.max_rss_qs -= 1; - - /* If RoCE may be enabled stash away half the EQs for RoCE */ - if (be_roce_supported(adapter)) - res.max_evt_qs /= 2; - adapter->res = res; } + /* If RoCE is supported stash away half the EQs for RoCE */ + res.max_nic_evt_qs = be_roce_supported(adapter) ? + res.max_evt_qs / 2 : res.max_evt_qs; + adapter->res = res; + /* If FW supports RSS default queue, then skip creating non-RSS * queue for non-IP traffic. */ @@ -4206,15 +4270,17 @@ static int be_get_resources(struct be_adapter *adapter) dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n", be_max_txqs(adapter), be_max_rxqs(adapter), - be_max_rss(adapter), be_max_eqs(adapter), + be_max_rss(adapter), be_max_nic_eqs(adapter), be_max_vfs(adapter)); dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n", be_max_uc(adapter), be_max_mc(adapter), be_max_vlans(adapter)); - /* Sanitize cfg_num_qs based on HW and platform limits */ - adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(), - be_max_qs(adapter)); + /* Ensure RX and TX queues are created in pairs at init time */ + adapter->cfg_num_rx_irqs = + min_t(u16, netif_get_num_default_rss_queues(), + be_max_qp_irqs(adapter)); + adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs; return 0; } @@ -4241,6 +4307,8 @@ static int be_get_config(struct be_adapter *adapter) } be_cmd_get_acpi_wol_cap(adapter); + pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en); + pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en); be_cmd_query_port_name(adapter); @@ -4251,15 +4319,6 @@ static int be_get_config(struct be_adapter *adapter) "Using profile 0x%x\n", profile_id); } - status = be_get_resources(adapter); - if (status) - return status; - - adapter->pmac_id = kcalloc(be_max_uc(adapter), - sizeof(*adapter->pmac_id), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - return 0; } @@ -4334,7 +4393,7 @@ static int be_if_create(struct be_adapter *adapter) u32 cap_flags = be_if_cap_flags(adapter); int status; - if (adapter->cfg_num_qs == 1) + if (adapter->cfg_num_rx_irqs == 1) cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); en_flags &= cap_flags; @@ -4460,13 +4519,22 @@ static int be_setup(struct be_adapter *adapter) return status; } + status = be_get_config(adapter); + if (status) + goto err; + if (!BE2_chip(adapter) && be_physfn(adapter)) be_alloc_sriov_res(adapter); - status = be_get_config(adapter); + status = be_get_resources(adapter); if (status) goto err; + adapter->pmac_id = kcalloc(be_max_uc(adapter), + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + status = be_msix_enable(adapter); if (status) goto err; @@ -4511,6 +4579,15 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_logical_link_config(adapter, IFLA_VF_LINK_STATE_AUTO, 0); + /* BE3 EVB echoes broadcast/multicast packets back to PF's vport + * confusing a linux bridge or OVS that it might be connected to. + * Set the EVB to PASSTHRU mode which effectively disables the EVB + * when SRIOV is not enabled. + */ + if (BE3_chip(adapter)) + be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle, + PORT_FWD_TYPE_PASSTHRU, 0); + if (adapter->num_vfs) be_vf_setup(adapter); @@ -4651,7 +4728,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 0, 0, nlflags, filter_mask, NULL); } -#ifdef CONFIG_BE2NET_VXLAN /* VxLAN offload Notes: * * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't @@ -4666,13 +4742,17 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, * adds more than one port, disable offloads and don't re-enable them again * until after all the tunnels are removed. */ -static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, - __be16 port) +static void be_add_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; + __be16 port = ti->port; int status; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return; @@ -4720,10 +4800,14 @@ err: be_disable_vxlan_offloads(adapter); } -static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, - __be16 port) +static void be_del_vxlan_port(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct be_adapter *adapter = netdev_priv(netdev); + __be16 port = ti->port; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter)) return; @@ -4785,7 +4869,6 @@ static netdev_features_t be_features_check(struct sk_buff *skb, return features; } -#endif static int be_get_phys_port_id(struct net_device *dev, struct netdev_phys_item_id *ppid) @@ -4833,11 +4916,9 @@ static const struct net_device_ops be_netdev_ops = { #ifdef CONFIG_NET_RX_BUSY_POLL .ndo_busy_poll = be_busy_poll, #endif -#ifdef CONFIG_BE2NET_VXLAN - .ndo_add_vxlan_port = be_add_vxlan_port, - .ndo_del_vxlan_port = be_del_vxlan_port, + .ndo_udp_tunnel_add = be_add_vxlan_port, + .ndo_udp_tunnel_del = be_del_vxlan_port, .ndo_features_check = be_features_check, -#endif .ndo_get_phys_port_id = be_get_phys_port_id, }; @@ -4996,6 +5077,10 @@ static void be_worker(struct work_struct *work) struct be_rx_obj *rxo; int i; + if (be_physfn(adapter) && + MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) + be_cmd_get_die_temperature(adapter); + /* when interrupts are not yet enabled, just reap any pending * mcc completions */ @@ -5014,10 +5099,6 @@ static void be_worker(struct work_struct *work) be_cmd_get_stats(adapter, &adapter->stats_cmd); } - if (be_physfn(adapter) && - MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0) - be_cmd_get_die_temperature(adapter); - for_all_rx_queues(adapter, rxo, i) { /* Replenish RX-queues starved due to memory * allocation failures. @@ -5410,9 +5491,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state) { struct be_adapter *adapter = pci_get_drvdata(pdev); - if (adapter->wol_en) - be_setup_wol(adapter, true); - be_intr_set(adapter, false); be_cancel_err_detection(adapter); @@ -5441,9 +5519,6 @@ static int be_pci_resume(struct pci_dev *pdev) be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); - if (adapter->wol_en) - be_setup_wol(adapter, false); - return 0; } @@ -5552,7 +5627,7 @@ err: static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct be_adapter *adapter = pci_get_drvdata(pdev); - u16 num_vf_qs; + struct be_resources vft_res = {0}; int status; if (!num_vfs) @@ -5575,9 +5650,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) * Also, this is done by FW in Lancer chip. */ if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) { - num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs); + be_calculate_vf_res(adapter, adapter->num_vfs, + &vft_res); status = be_cmd_set_sriov_config(adapter, adapter->pool_res, - adapter->num_vfs, num_vf_qs); + adapter->num_vfs, &vft_res); if (status) dev_err(&pdev->dev, "Failed to optimize SR-IOV resources\n"); diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 4089156a7..2b62841c4 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h index fde609789..e51719a73 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.h +++ b/drivers/net/ethernet/emulex/benet/be_roce.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2005 - 2015 Emulex + * Copyright (C) 2005 - 2016 Broadcom * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 4466a1187..c044667a0 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -192,7 +192,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * @napi: NAPI structure * @msg_enable: device state flags * @lock: device lock - * @phy: attached PHY * @mdio: MDIO bus for PHY access * @phy_id: address of attached PHY */ @@ -219,7 +218,6 @@ struct ethoc { spinlock_t lock; - struct phy_device *phy; struct mii_bus *mdio; struct clk *clk; s8 phy_id; @@ -694,7 +692,6 @@ static int ethoc_mdio_probe(struct net_device *dev) return err; } - priv->phy = phy; phy->advertising &= ~(ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half); phy->supported &= ~(SUPPORTED_1000baseT_Full | @@ -724,7 +721,7 @@ static int ethoc_open(struct net_device *dev) netif_start_queue(dev); } - phy_start(priv->phy); + phy_start(dev->phydev); napi_enable(&priv->napi); if (netif_msg_ifup(priv)) { @@ -741,8 +738,8 @@ static int ethoc_stop(struct net_device *dev) napi_disable(&priv->napi); - if (priv->phy) - phy_stop(priv->phy); + if (dev->phydev) + phy_stop(dev->phydev); ethoc_disable_rx_and_tx(priv); free_irq(dev->irq, dev); @@ -770,7 +767,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (!phy) return -ENODEV; } else { - phy = priv->phy; + phy = dev->phydev; } return phy_mii_ioctl(phy, ifr, cmd); @@ -903,28 +900,6 @@ out_no_free: return NETDEV_TX_OK; } -static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ethoc *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phy; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); -} - -static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ethoc *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phy; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); -} - static int ethoc_get_regs_len(struct net_device *netdev) { return ETH_END; @@ -989,14 +964,14 @@ static int ethoc_set_ringparam(struct net_device *dev, } const struct ethtool_ops ethoc_ethtool_ops = { - .get_settings = ethoc_get_settings, - .set_settings = ethoc_set_settings, .get_regs_len = ethoc_get_regs_len, .get_regs = ethoc_get_regs, .get_link = ethtool_op_get_link, .get_ringparam = ethoc_get_ringparam, .set_ringparam = ethoc_set_ringparam, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops ethoc_netdev_ops = { @@ -1267,8 +1242,7 @@ static int ethoc_remove(struct platform_device *pdev) if (netdev) { netif_napi_del(&priv->napi); - phy_disconnect(priv->phy); - priv->phy = NULL; + phy_disconnect(netdev->phydev); if (priv->mdio) { mdiobus_unregister(priv->mdio); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 9b7a3f5a2..f928e6f79 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -24,6 +24,14 @@ #define DRV_NAME "nps_mgt_enet" +static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv) +{ + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + + return (!tx_ctrl_ct && priv->tx_skb); +} + static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len) { struct nps_enet_priv *priv = netdev_priv(ndev); @@ -46,16 +54,17 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, if (dst_is_aligned) { ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); reg += len; - } - else { /* !dst_is_aligned */ + } else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + put_unaligned_be32(buf, reg); } } /* copy last bytes (if any) */ if (last) { u32 buf; + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); memcpy((u8 *)reg, &buf, last); } @@ -140,12 +149,11 @@ static void nps_enet_tx_handler(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; /* Check if we got TX */ - if (!priv->tx_skb || tx_ctrl_ct) + if (!nps_enet_is_tx_pending(priv)) return; /* Ack Tx ctrl register */ @@ -183,9 +191,6 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { u32 buf_int_enable_value = 0; - u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = - (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; napi_complete(napi); @@ -204,8 +209,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) * the two code lines below will solve this situation by * re-adding ourselves to the poll list. */ - - if (priv->tx_skb && !tx_ctrl_ct) { + if (nps_enet_is_tx_pending(priv)) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); napi_reschedule(napi); } @@ -230,11 +234,9 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); - u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; - if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) + if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@ -460,7 +462,6 @@ static void nps_enet_set_rx_mode(struct net_device *ndev) | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; - } nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index e7cf313e3..36361f8bf 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -31,6 +31,7 @@ #include <linux/phy.h> #include <linux/platform_device.h> #include <net/ip.h> +#include <net/ncsi.h> #include "ftgmac100.h" @@ -68,10 +69,14 @@ struct ftgmac100 { struct net_device *netdev; struct device *dev; + struct ncsi_dev *ndev; struct napi_struct napi; struct mii_bus *mii_bus; int old_speed; + int int_mask_all; + bool use_ncsi; + bool enabled; }; static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, @@ -80,14 +85,6 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv, /****************************************************************************** * internal functions (hardware register access) *****************************************************************************/ -#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \ - FTGMAC100_INT_XPKT_ETH | \ - FTGMAC100_INT_XPKT_LOST | \ - FTGMAC100_INT_AHB_ERR | \ - FTGMAC100_INT_PHYSTS_CHG | \ - FTGMAC100_INT_RPKT_BUF | \ - FTGMAC100_INT_NO_RXBUF) - static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr) { iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR); @@ -141,6 +138,55 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac) iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR); } +static void ftgmac100_setup_mac(struct ftgmac100 *priv) +{ + u8 mac[ETH_ALEN]; + unsigned int m; + unsigned int l; + void *addr; + + addr = device_get_mac_address(priv->dev, mac, ETH_ALEN); + if (addr) { + ether_addr_copy(priv->netdev->dev_addr, mac); + dev_info(priv->dev, "Read MAC address %pM from device tree\n", + mac); + return; + } + + m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR); + l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR); + + mac[0] = (m >> 8) & 0xff; + mac[1] = m & 0xff; + mac[2] = (l >> 24) & 0xff; + mac[3] = (l >> 16) & 0xff; + mac[4] = (l >> 8) & 0xff; + mac[5] = l & 0xff; + + if (is_valid_ether_addr(mac)) { + ether_addr_copy(priv->netdev->dev_addr, mac); + dev_info(priv->dev, "Read MAC address %pM from chip\n", mac); + } else { + eth_hw_addr_random(priv->netdev); + dev_info(priv->dev, "Generated random MAC address %pM\n", + priv->netdev->dev_addr); + } +} + +static int ftgmac100_set_mac_addr(struct net_device *dev, void *p) +{ + int ret; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret < 0) + return ret; + + eth_commit_mac_addr_change(dev, p); + ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr); + + return 0; +} + static void ftgmac100_init_hw(struct ftgmac100 *priv) { /* setup ring buffer base registers */ @@ -952,7 +998,10 @@ static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id) struct net_device *netdev = dev_id; struct ftgmac100 *priv = netdev_priv(netdev); - if (likely(netif_running(netdev))) { + /* When running in NCSI mode, the interface should be ready for + * receiving or transmitting NCSI packets before it's opened. + */ + if (likely(priv->use_ncsi || netif_running(netdev))) { /* Disable interrupts for polling */ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); napi_schedule(&priv->napi); @@ -1005,8 +1054,9 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) ftgmac100_tx_complete(priv); } - if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST | - FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) { + if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF | + FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR | + FTGMAC100_INT_PHYSTS_CHG)) { if (net_ratelimit()) netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "", @@ -1029,7 +1079,8 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget) napi_complete(napi); /* enable all interrupts */ - iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER); + iowrite32(priv->int_mask_all, + priv->base + FTGMAC100_OFFSET_IER); } return rx; @@ -1065,17 +1116,33 @@ static int ftgmac100_open(struct net_device *netdev) goto err_hw; ftgmac100_init_hw(priv); - ftgmac100_start_hw(priv, 10); - - phy_start(netdev->phydev); + ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10); + if (netdev->phydev) + phy_start(netdev->phydev); + else if (priv->use_ncsi) + netif_carrier_on(netdev); napi_enable(&priv->napi); netif_start_queue(netdev); /* enable all interrupts */ - iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER); + iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER); + + /* Start the NCSI device */ + if (priv->use_ncsi) { + err = ncsi_start_dev(priv->ndev); + if (err) + goto err_ncsi; + } + + priv->enabled = true; + return 0; +err_ncsi: + napi_disable(&priv->napi); + netif_stop_queue(netdev); + iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); err_hw: free_irq(priv->irq, netdev); err_irq: @@ -1088,12 +1155,17 @@ static int ftgmac100_stop(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); + if (!priv->enabled) + return 0; + /* disable all interrupts */ + priv->enabled = false; iowrite32(0, priv->base + FTGMAC100_OFFSET_IER); netif_stop_queue(netdev); napi_disable(&priv->napi); - phy_stop(netdev->phydev); + if (netdev->phydev) + phy_stop(netdev->phydev); ftgmac100_stop_hw(priv); free_irq(priv->irq, netdev); @@ -1134,6 +1206,9 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb, /* optional */ static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { + if (!netdev->phydev) + return -ENXIO; + return phy_mii_ioctl(netdev->phydev, ifr, cmd); } @@ -1141,11 +1216,74 @@ static const struct net_device_ops ftgmac100_netdev_ops = { .ndo_open = ftgmac100_open, .ndo_stop = ftgmac100_stop, .ndo_start_xmit = ftgmac100_hard_start_xmit, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = ftgmac100_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = ftgmac100_do_ioctl, }; +static int ftgmac100_setup_mdio(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + struct platform_device *pdev = to_platform_device(priv->dev); + int i, err = 0; + + /* initialize mdio bus */ + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) + return -EIO; + + priv->mii_bus->name = "ftgmac100_mdio"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d", + pdev->name, pdev->id); + priv->mii_bus->priv = priv->netdev; + priv->mii_bus->read = ftgmac100_mdiobus_read; + priv->mii_bus->write = ftgmac100_mdiobus_write; + + for (i = 0; i < PHY_MAX_ADDR; i++) + priv->mii_bus->irq[i] = PHY_POLL; + + err = mdiobus_register(priv->mii_bus); + if (err) { + dev_err(priv->dev, "Cannot register MDIO bus!\n"); + goto err_register_mdiobus; + } + + err = ftgmac100_mii_probe(priv); + if (err) { + dev_err(priv->dev, "MII Probe failed!\n"); + goto err_mii_probe; + } + + return 0; + +err_mii_probe: + mdiobus_unregister(priv->mii_bus); +err_register_mdiobus: + mdiobus_free(priv->mii_bus); + return err; +} + +static void ftgmac100_destroy_mdio(struct net_device *netdev) +{ + struct ftgmac100 *priv = netdev_priv(netdev); + + if (!netdev->phydev) + return; + + phy_disconnect(netdev->phydev); + mdiobus_unregister(priv->mii_bus); + mdiobus_free(priv->mii_bus); +} + +static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) +{ + if (unlikely(nd->state != ncsi_dev_state_functional)) + return; + + netdev_info(nd->dev, "NCSI interface %s\n", + nd->link_up ? "up" : "down"); +} + /****************************************************************************** * struct platform_driver functions *****************************************************************************/ @@ -1155,7 +1293,7 @@ static int ftgmac100_probe(struct platform_device *pdev) int irq; struct net_device *netdev; struct ftgmac100 *priv; - int err; + int err = 0; if (!pdev) return -ENODEV; @@ -1179,7 +1317,6 @@ static int ftgmac100_probe(struct platform_device *pdev) netdev->ethtool_ops = &ftgmac100_ethtool_ops; netdev->netdev_ops = &ftgmac100_netdev_ops; - netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; platform_set_drvdata(pdev, netdev); @@ -1211,31 +1348,45 @@ static int ftgmac100_probe(struct platform_device *pdev) priv->irq = irq; - /* initialize mdio bus */ - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - err = -EIO; - goto err_alloc_mdiobus; - } - - priv->mii_bus->name = "ftgmac100_mdio"; - snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii"); - - priv->mii_bus->priv = netdev; - priv->mii_bus->read = ftgmac100_mdiobus_read; - priv->mii_bus->write = ftgmac100_mdiobus_write; + /* MAC address from chip or random one */ + ftgmac100_setup_mac(priv); + + priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST | + FTGMAC100_INT_XPKT_ETH | + FTGMAC100_INT_XPKT_LOST | + FTGMAC100_INT_AHB_ERR | + FTGMAC100_INT_PHYSTS_CHG | + FTGMAC100_INT_RPKT_BUF | + FTGMAC100_INT_NO_RXBUF); + if (pdev->dev.of_node && + of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) { + if (!IS_ENABLED(CONFIG_NET_NCSI)) { + dev_err(&pdev->dev, "NCSI stack not enabled\n"); + goto err_ncsi_dev; + } - err = mdiobus_register(priv->mii_bus); - if (err) { - dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); - goto err_register_mdiobus; + dev_info(&pdev->dev, "Using NCSI interface\n"); + priv->use_ncsi = true; + priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG; + priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler); + if (!priv->ndev) + goto err_ncsi_dev; + } else { + priv->use_ncsi = false; + err = ftgmac100_setup_mdio(netdev); + if (err) + goto err_setup_mdio; } - err = ftgmac100_mii_probe(priv); - if (err) { - dev_err(&pdev->dev, "MII Probe failed!\n"); - goto err_mii_probe; - } + /* We have to disable on-chip IP checksum functionality + * when NCSI is enabled on the interface. It doesn't work + * in that case. + */ + netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO; + if (priv->use_ncsi && + of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL)) + netdev->features &= ~NETIF_F_IP_CSUM; + /* register network device */ err = register_netdev(netdev); @@ -1246,21 +1397,12 @@ static int ftgmac100_probe(struct platform_device *pdev) netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_hw_addr_random(netdev); - netdev_info(netdev, "generated random MAC address %pM\n", - netdev->dev_addr); - } - return 0; +err_ncsi_dev: err_register_netdev: - phy_disconnect(netdev->phydev); -err_mii_probe: - mdiobus_unregister(priv->mii_bus); -err_register_mdiobus: - mdiobus_free(priv->mii_bus); -err_alloc_mdiobus: + ftgmac100_destroy_mdio(netdev); +err_setup_mdio: iounmap(priv->base); err_ioremap: release_resource(priv->res); @@ -1280,10 +1422,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) priv = netdev_priv(netdev); unregister_netdev(netdev); - - phy_disconnect(netdev->phydev); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + ftgmac100_destroy_mdio(netdev); iounmap(priv->base); release_resource(priv->res); @@ -1293,14 +1432,20 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id ftgmac100_of_match[] = { + { .compatible = "faraday,ftgmac100" }, + { } +}; +MODULE_DEVICE_TABLE(of, ftgmac100_of_match); + static struct platform_driver ftgmac100_driver = { - .probe = ftgmac100_probe, - .remove = __exit_p(ftgmac100_remove), - .driver = { - .name = DRV_NAME, + .probe = ftgmac100_probe, + .remove = __exit_p(ftgmac100_remove), + .driver = { + .name = DRV_NAME, + .of_match_table = ftgmac100_of_match, }, }; - module_platform_driver(ftgmac100_driver); MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index f58f9ea51..c865135f3 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -442,6 +442,10 @@ struct bufdesc_ex { #define FEC_QUIRK_SINGLE_MDIO (1 << 11) /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +/* Controller supports interrupt coalesc */ +#define FEC_QUIRK_HAS_COALESCE (1 << 13) +/* Interrupt doesn't wake CPU from deep idle */ +#define FEC_QUIRK_ERR006687 (1 << 14) struct bufdesc_prop { int qid; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index fea0f330d..692ee248e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -60,6 +60,7 @@ #include <linux/if_vlan.h> #include <linux/pinctrl/consumer.h> #include <linux/prefetch.h> +#include <soc/imx/cpuidle.h> #include <asm/cacheflush.h> @@ -88,10 +89,10 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, + .driver_data = FEC_QUIRK_USE_GASKET, }, { .name = "imx27-fec", - .driver_data = FEC_QUIRK_HAS_RACC, + .driver_data = 0, }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | @@ -111,7 +112,13 @@ static struct platform_device_id fec_devtype[] = { FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + }, { + .name = "imx6ul-fec", + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, }, { /* sentinel */ } @@ -125,6 +132,7 @@ enum imx_fec_type { IMX6Q_FEC, MVF600_FEC, IMX6SX_FEC, + IMX6UL_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -134,6 +142,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, + { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -171,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) #define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_SHIFT16 BIT(7) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) /* @@ -936,9 +946,11 @@ fec_restart(struct net_device *ndev) #if !defined(CONFIG_M5272) if (fep->quirks & FEC_QUIRK_HAS_RACC) { - /* set RX checksum */ val = readl(fep->hwp + FEC_RACC); + /* align IP header */ + val |= FEC_RACC_SHIFT16; if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + /* set RX checksum */ val |= FEC_RACC_OPTIONS; else val &= ~FEC_RACC_OPTIONS; @@ -1419,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) prefetch(skb->data - NET_IP_ALIGN); skb_put(skb, pkt_len - 4); data = skb->data; + +#if !defined(CONFIG_M5272) + if (fep->quirks & FEC_QUIRK_HAS_RACC) + data = skb_pull_inline(skb, 2); +#endif + if (!is_copybreak && need_swap) swap_buffer(data, pkt_len); @@ -2358,9 +2376,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int rx_itr, tx_itr; - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) - return; - /* Must be greater than zero to avoid unpredictable behavior */ if (!fep->rx_time_itr || !fep->rx_pkts_itr || !fep->tx_time_itr || !fep->tx_pkts_itr) @@ -2383,10 +2398,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) writel(tx_itr, fep->hwp + FEC_TXIC0); writel(rx_itr, fep->hwp + FEC_RXIC0); - writel(tx_itr, fep->hwp + FEC_TXIC1); - writel(rx_itr, fep->hwp + FEC_RXIC1); - writel(tx_itr, fep->hwp + FEC_TXIC2); - writel(rx_itr, fep->hwp + FEC_RXIC2); + if (fep->quirks & FEC_QUIRK_HAS_AVB) { + writel(tx_itr, fep->hwp + FEC_TXIC1); + writel(rx_itr, fep->hwp + FEC_RXIC1); + writel(tx_itr, fep->hwp + FEC_TXIC2); + writel(rx_itr, fep->hwp + FEC_RXIC2); + } } static int @@ -2394,7 +2411,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) { struct fec_enet_private *fep = netdev_priv(ndev); - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; ec->rx_coalesce_usecs = fep->rx_time_itr; @@ -2412,7 +2429,7 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) struct fec_enet_private *fep = netdev_priv(ndev); unsigned int cycle; - if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) + if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) return -EOPNOTSUPP; if (ec->rx_max_coalesced_frames > 255) { @@ -2818,6 +2835,9 @@ fec_enet_open(struct net_device *ndev) if (ret) goto err_enet_mii_probe; + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_used(); + napi_enable(&fep->napi); phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); @@ -2853,6 +2873,9 @@ fec_enet_close(struct net_device *ndev) phy_disconnect(ndev->phydev); + if (fep->quirks & FEC_QUIRK_ERR006687) + imx6q_cpuidle_fec_irqs_unused(); + fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); pm_runtime_mark_last_busy(&fep->pdev->dev); @@ -3191,7 +3214,12 @@ static void fec_reset_phy(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } - msleep(msec); + + if (msec > 20) + msleep(msec); + else + usleep_range(msec * 1000, msec * 1000 + 1000); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ @@ -3292,6 +3320,11 @@ fec_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); + if ((of_machine_is_compatible("fsl,imx6q") || + of_machine_is_compatible("fsl,imx6dl")) && + !of_property_read_bool(np, "fsl,err006687-workaround-present")) + fep->quirks |= FEC_QUIRK_ERR006687; + if (of_get_property(np, "fsl,magic-packet", NULL)) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2e6785b6e..4b4f5bc0e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2275,7 +2275,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb, fcb->flags = flags; } -void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) +static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb) { fcb->flags |= TXFCB_VLN; fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb)); @@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus, { unsigned int size = lstatus & BD_LENGTH_MASK; struct page *page = rxb->page; + bool last = !!(lstatus & BD_LFLAG(RXBD_LAST)); /* Remove the FCS from the packet length */ - if (likely(lstatus & BD_LFLAG(RXBD_LAST))) + if (last) size -= ETH_FCS_LEN; - if (likely(first)) + if (likely(first)) { skb_put(skb, size); - else - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, - rxb->page_offset + RXBUF_ALIGNMENT, - size, GFAR_RXB_TRUESIZE); + } else { + /* the last fragments' length contains the full frame length */ + if (last) + size -= skb->len; + + /* in case the last fragment consisted only of the FCS */ + if (size > 0) + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rxb->page_offset + RXBUF_ALIGNMENT, + size, GFAR_RXB_TRUESIZE); + } /* try reuse page */ if (unlikely(page_count(page) != 1)) diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 373fd094f..6e8a9c846 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -100,7 +100,8 @@ extern const char gfar_driver_version[]; #define DEFAULT_RX_LFC_THR 16 #define DEFAULT_LFC_PTVVAL 4 -#define GFAR_RXB_SIZE 1536 +/* prevent fragmenation by HW in DSA environments */ +#define GFAR_RXB_SIZE roundup(1536 + 8, 64) #define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define GFAR_RXB_TRUESIZE 2048 diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 4ccc03263..d11287e11 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_HISILICON bool "Hisilicon devices" default y - depends on OF && HAS_DMA + depends on (OF || ACPI) && HAS_DMA depends on ARM || ARM64 || COMPILE_TEST ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -23,6 +23,18 @@ config HIX5HD2_GMAC help This selects the hix5hd2 mac family network device. +config HISI_FEMAC + tristate "Hisilicon Fast Ethernet MAC device support" + depends on HAS_IOMEM + select PHYLIB + select RESET_CONTROLLER + help + This selects the Hisilicon Fast Ethernet MAC device(FEMAC). + The FEMAC receives and transmits data over Ethernet + ports at 10/100 Mbps in full-duplex or half-duplex mode. + The FEMAC exchanges data with the CPU, and supports + the energy efficient Ethernet (EEE). + config HIP04_ETH tristate "HISILICON P04 Ethernet support" depends on HAS_IOMEM # For MFD_SYSCON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 390b71fb3..866169502 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o obj-$(CONFIG_HIP04_ETH) += hip04_eth.o obj-$(CONFIG_HNS_MDIO) += hns_mdio.o obj-$(CONFIG_HNS) += hns/ +obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c new file mode 100644 index 000000000..b5d7ad025 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -0,0 +1,1007 @@ +/* + * Hisilicon Fast Ethernet MAC Driver + * + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/circ_buf.h> +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +/* MAC control register list */ +#define MAC_PORTSEL 0x0200 +#define MAC_PORTSEL_STAT_CPU BIT(0) +#define MAC_PORTSEL_RMII BIT(1) +#define MAC_PORTSET 0x0208 +#define MAC_PORTSET_DUPLEX_FULL BIT(0) +#define MAC_PORTSET_LINKED BIT(1) +#define MAC_PORTSET_SPEED_100M BIT(2) +#define MAC_SET 0x0210 +#define MAX_FRAME_SIZE 1600 +#define MAX_FRAME_SIZE_MASK GENMASK(10, 0) +#define BIT_PAUSE_EN BIT(18) +#define RX_COALESCE_SET 0x0340 +#define RX_COALESCED_FRAME_OFFSET 24 +#define RX_COALESCED_FRAMES 8 +#define RX_COALESCED_TIMER 0x74 +#define QLEN_SET 0x0344 +#define RX_DEPTH_OFFSET 8 +#define MAX_HW_FIFO_DEPTH 64 +#define HW_TX_FIFO_DEPTH 12 +#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH) +#define IQFRM_DES 0x0354 +#define RX_FRAME_LEN_MASK GENMASK(11, 0) +#define IQ_ADDR 0x0358 +#define EQ_ADDR 0x0360 +#define EQFRM_LEN 0x0364 +#define ADDRQ_STAT 0x036C +#define TX_CNT_INUSE_MASK GENMASK(5, 0) +#define BIT_TX_READY BIT(24) +#define BIT_RX_READY BIT(25) +/* global control register list */ +#define GLB_HOSTMAC_L32 0x0000 +#define GLB_HOSTMAC_H16 0x0004 +#define GLB_SOFT_RESET 0x0008 +#define SOFT_RESET_ALL BIT(0) +#define GLB_FWCTRL 0x0010 +#define FWCTRL_VLAN_ENABLE BIT(0) +#define FWCTRL_FW2CPU_ENA BIT(5) +#define FWCTRL_FWALL2CPU BIT(7) +#define GLB_MACTCTRL 0x0014 +#define MACTCTRL_UNI2CPU BIT(1) +#define MACTCTRL_MULTI2CPU BIT(3) +#define MACTCTRL_BROAD2CPU BIT(5) +#define MACTCTRL_MACT_ENA BIT(7) +#define GLB_IRQ_STAT 0x0030 +#define GLB_IRQ_ENA 0x0034 +#define IRQ_ENA_PORT0_MASK GENMASK(7, 0) +#define IRQ_ENA_PORT0 BIT(18) +#define IRQ_ENA_ALL BIT(19) +#define GLB_IRQ_RAW 0x0038 +#define IRQ_INT_RX_RDY BIT(0) +#define IRQ_INT_TX_PER_PACKET BIT(1) +#define IRQ_INT_TX_FIFO_EMPTY BIT(6) +#define IRQ_INT_MULTI_RXRDY BIT(7) +#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \ + IRQ_INT_TX_PER_PACKET | \ + IRQ_INT_TX_FIFO_EMPTY) +#define GLB_MAC_L32_BASE 0x0100 +#define GLB_MAC_H16_BASE 0x0104 +#define MACFLT_HI16_MASK GENMASK(15, 0) +#define BIT_MACFLT_ENA BIT(17) +#define BIT_MACFLT_FW2CPU BIT(21) +#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8)) +#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8)) +#define MAX_MAC_FILTER_NUM 8 +#define MAX_UNICAST_ADDRESSES 2 +#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \ + MAX_UNICAST_ADDRESSES) +/* software tx and rx queue number, should be power of 2 */ +#define TXQ_NUM 64 +#define RXQ_NUM 128 +#define FEMAC_POLL_WEIGHT 16 + +#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us" + +enum phy_reset_delays { + PRE_DELAY, + PULSE, + POST_DELAY, + DELAYS_NUM, +}; + +struct hisi_femac_queue { + struct sk_buff **skb; + dma_addr_t *dma_phys; + int num; + unsigned int head; + unsigned int tail; +}; + +struct hisi_femac_priv { + void __iomem *port_base; + void __iomem *glb_base; + struct clk *clk; + struct reset_control *mac_rst; + struct reset_control *phy_rst; + u32 phy_reset_delays[DELAYS_NUM]; + u32 link_status; + + struct device *dev; + struct net_device *ndev; + + struct hisi_femac_queue txq; + struct hisi_femac_queue rxq; + u32 tx_fifo_used_cnt; + struct napi_struct napi; +}; + +static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs) +{ + u32 val; + + val = readl(priv->glb_base + GLB_IRQ_ENA); + writel(val | irqs, priv->glb_base + GLB_IRQ_ENA); +} + +static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs) +{ + u32 val; + + val = readl(priv->glb_base + GLB_IRQ_ENA); + writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA); +} + +static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv, + struct sk_buff *skb, unsigned int pos) +{ + dma_addr_t dma_addr; + + dma_addr = priv->txq.dma_phys[pos]; + dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE); +} + +static void hisi_femac_xmit_reclaim(struct net_device *dev) +{ + struct sk_buff *skb; + struct hisi_femac_priv *priv = netdev_priv(dev); + struct hisi_femac_queue *txq = &priv->txq; + unsigned int bytes_compl = 0, pkts_compl = 0; + u32 val; + + netif_tx_lock(dev); + + val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK; + while (val < priv->tx_fifo_used_cnt) { + skb = txq->skb[txq->tail]; + if (unlikely(!skb)) { + netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n", + val, priv->tx_fifo_used_cnt); + break; + } + hisi_femac_tx_dma_unmap(priv, skb, txq->tail); + pkts_compl++; + bytes_compl += skb->len; + dev_kfree_skb_any(skb); + + priv->tx_fifo_used_cnt--; + + val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK; + txq->skb[txq->tail] = NULL; + txq->tail = (txq->tail + 1) % txq->num; + } + + netdev_completed_queue(dev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(dev)) && pkts_compl) + netif_wake_queue(dev); + + netif_tx_unlock(dev); +} + +static void hisi_femac_adjust_link(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct phy_device *phy = dev->phydev; + u32 status = 0; + + if (phy->link) + status |= MAC_PORTSET_LINKED; + if (phy->duplex == DUPLEX_FULL) + status |= MAC_PORTSET_DUPLEX_FULL; + if (phy->speed == SPEED_100) + status |= MAC_PORTSET_SPEED_100M; + + if ((status != priv->link_status) && + ((status | priv->link_status) & MAC_PORTSET_LINKED)) { + writel(status, priv->port_base + MAC_PORTSET); + priv->link_status = status; + phy_print_status(phy); + } +} + +static void hisi_femac_rx_refill(struct hisi_femac_priv *priv) +{ + struct hisi_femac_queue *rxq = &priv->rxq; + struct sk_buff *skb; + u32 pos; + u32 len = MAX_FRAME_SIZE; + dma_addr_t addr; + + pos = rxq->head; + while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) { + if (!CIRC_SPACE(pos, rxq->tail, rxq->num)) + break; + if (unlikely(rxq->skb[pos])) { + netdev_err(priv->ndev, "err skb[%d]=%p\n", + pos, rxq->skb[pos]); + break; + } + skb = netdev_alloc_skb_ip_align(priv->ndev, len); + if (unlikely(!skb)) + break; + + addr = dma_map_single(priv->dev, skb->data, len, + DMA_FROM_DEVICE); + if (dma_mapping_error(priv->dev, addr)) { + dev_kfree_skb_any(skb); + break; + } + rxq->dma_phys[pos] = addr; + rxq->skb[pos] = skb; + writel(addr, priv->port_base + IQ_ADDR); + pos = (pos + 1) % rxq->num; + } + rxq->head = pos; +} + +static int hisi_femac_rx(struct net_device *dev, int limit) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct hisi_femac_queue *rxq = &priv->rxq; + struct sk_buff *skb; + dma_addr_t addr; + u32 rx_pkt_info, pos, len, rx_pkts_num = 0; + + pos = rxq->tail; + while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) { + rx_pkt_info = readl(priv->port_base + IQFRM_DES); + len = rx_pkt_info & RX_FRAME_LEN_MASK; + len -= ETH_FCS_LEN; + + /* tell hardware we will deal with this packet */ + writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW); + + rx_pkts_num++; + + skb = rxq->skb[pos]; + if (unlikely(!skb)) { + netdev_err(dev, "rx skb NULL. pos=%d\n", pos); + break; + } + rxq->skb[pos] = NULL; + + addr = rxq->dma_phys[pos]; + dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + skb_put(skb, len); + if (unlikely(skb->len > MAX_FRAME_SIZE)) { + netdev_err(dev, "rcv len err, len = %d\n", skb->len); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb_any(skb); + goto next; + } + + skb->protocol = eth_type_trans(skb, dev); + napi_gro_receive(&priv->napi, skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; +next: + pos = (pos + 1) % rxq->num; + if (rx_pkts_num >= limit) + break; + } + rxq->tail = pos; + + hisi_femac_rx_refill(priv); + + return rx_pkts_num; +} + +static int hisi_femac_poll(struct napi_struct *napi, int budget) +{ + struct hisi_femac_priv *priv = container_of(napi, + struct hisi_femac_priv, napi); + struct net_device *dev = priv->ndev; + int work_done = 0, task = budget; + int ints, num; + + do { + hisi_femac_xmit_reclaim(dev); + num = hisi_femac_rx(dev, task); + work_done += num; + task -= num; + if (work_done >= budget) + break; + + ints = readl(priv->glb_base + GLB_IRQ_RAW); + writel(ints & DEF_INT_MASK, + priv->glb_base + GLB_IRQ_RAW); + } while (ints & DEF_INT_MASK); + + if (work_done < budget) { + napi_complete(napi); + hisi_femac_irq_enable(priv, DEF_INT_MASK & + (~IRQ_INT_TX_PER_PACKET)); + } + + return work_done; +} + +static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id) +{ + int ints; + struct net_device *dev = (struct net_device *)dev_id; + struct hisi_femac_priv *priv = netdev_priv(dev); + + ints = readl(priv->glb_base + GLB_IRQ_RAW); + + if (likely(ints & DEF_INT_MASK)) { + writel(ints & DEF_INT_MASK, + priv->glb_base + GLB_IRQ_RAW); + hisi_femac_irq_disable(priv, DEF_INT_MASK); + napi_schedule(&priv->napi); + } + + return IRQ_HANDLED; +} + +static int hisi_femac_init_queue(struct device *dev, + struct hisi_femac_queue *queue, + unsigned int num) +{ + queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *), + GFP_KERNEL); + if (!queue->skb) + return -ENOMEM; + + queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t), + GFP_KERNEL); + if (!queue->dma_phys) + return -ENOMEM; + + queue->num = num; + queue->head = 0; + queue->tail = 0; + + return 0; +} + +static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv) +{ + int ret; + + ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM); + if (ret) + return ret; + + ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM); + if (ret) + return ret; + + priv->tx_fifo_used_cnt = 0; + + return 0; +} + +static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv) +{ + struct hisi_femac_queue *txq = &priv->txq; + struct hisi_femac_queue *rxq = &priv->rxq; + struct sk_buff *skb; + dma_addr_t dma_addr; + u32 pos; + + pos = rxq->tail; + while (pos != rxq->head) { + skb = rxq->skb[pos]; + if (unlikely(!skb)) { + netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n", + pos, rxq->head); + continue; + } + + dma_addr = rxq->dma_phys[pos]; + dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE, + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + rxq->skb[pos] = NULL; + pos = (pos + 1) % rxq->num; + } + rxq->tail = pos; + + pos = txq->tail; + while (pos != txq->head) { + skb = txq->skb[pos]; + if (unlikely(!skb)) { + netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n", + pos, txq->head); + continue; + } + hisi_femac_tx_dma_unmap(priv, skb, pos); + dev_kfree_skb_any(skb); + txq->skb[pos] = NULL; + pos = (pos + 1) % txq->num; + } + txq->tail = pos; + priv->tx_fifo_used_cnt = 0; +} + +static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv, + unsigned char *mac) +{ + u32 reg; + + reg = mac[1] | (mac[0] << 8); + writel(reg, priv->glb_base + GLB_HOSTMAC_H16); + + reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24); + writel(reg, priv->glb_base + GLB_HOSTMAC_L32); + + return 0; +} + +static int hisi_femac_port_reset(struct hisi_femac_priv *priv) +{ + u32 val; + + val = readl(priv->glb_base + GLB_SOFT_RESET); + val |= SOFT_RESET_ALL; + writel(val, priv->glb_base + GLB_SOFT_RESET); + + usleep_range(500, 800); + + val &= ~SOFT_RESET_ALL; + writel(val, priv->glb_base + GLB_SOFT_RESET); + + return 0; +} + +static int hisi_femac_net_open(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + + hisi_femac_port_reset(priv); + hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); + hisi_femac_rx_refill(priv); + + netif_carrier_off(dev); + netdev_reset_queue(dev); + netif_start_queue(dev); + napi_enable(&priv->napi); + + priv->link_status = 0; + if (dev->phydev) + phy_start(dev->phydev); + + writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW); + hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK); + + return 0; +} + +static int hisi_femac_net_close(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + + hisi_femac_irq_disable(priv, IRQ_ENA_PORT0); + + if (dev->phydev) + phy_stop(dev->phydev); + + netif_stop_queue(dev); + napi_disable(&priv->napi); + + hisi_femac_free_skb_rings(priv); + + return 0; +} + +static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct hisi_femac_queue *txq = &priv->txq; + dma_addr_t addr; + u32 val; + + val = readl(priv->port_base + ADDRQ_STAT); + val &= BIT_TX_READY; + if (!val) { + hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET); + dev->stats.tx_dropped++; + dev->stats.tx_fifo_errors++; + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + if (unlikely(!CIRC_SPACE(txq->head, txq->tail, + txq->num))) { + hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET); + dev->stats.tx_dropped++; + dev->stats.tx_fifo_errors++; + netif_stop_queue(dev); + return NETDEV_TX_BUSY; + } + + addr = dma_map_single(priv->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, addr))) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + txq->dma_phys[txq->head] = addr; + + txq->skb[txq->head] = skb; + txq->head = (txq->head + 1) % txq->num; + + writel(addr, priv->port_base + EQ_ADDR); + writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN); + + priv->tx_fifo_used_cnt++; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + netdev_sent_queue(dev, skb->len); + + return NETDEV_TX_OK; +} + +static int hisi_femac_set_mac_address(struct net_device *dev, void *p) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + struct sockaddr *skaddr = p; + + if (!is_valid_ether_addr(skaddr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; + + hisi_femac_set_hw_mac_addr(priv, dev->dev_addr); + + return 0; +} + +static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv, + unsigned int reg_n, bool enable) +{ + u32 val; + + val = readl(priv->glb_base + GLB_MAC_H16(reg_n)); + if (enable) + val |= BIT_MACFLT_ENA; + else + val &= ~BIT_MACFLT_ENA; + writel(val, priv->glb_base + GLB_MAC_H16(reg_n)); +} + +static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv, + unsigned char *addr, + unsigned int reg_n) +{ + unsigned int high, low; + u32 val; + + high = GLB_MAC_H16(reg_n); + low = GLB_MAC_L32(reg_n); + + val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5]; + writel(val, priv->glb_base + low); + + val = readl(priv->glb_base + high); + val &= ~MACFLT_HI16_MASK; + val |= ((addr[0] << 8) | addr[1]); + val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU); + writel(val, priv->glb_base + high); +} + +static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv, + bool promisc_mode) +{ + u32 val; + + val = readl(priv->glb_base + GLB_FWCTRL); + if (promisc_mode) + val |= FWCTRL_FWALL2CPU; + else + val &= ~FWCTRL_FWALL2CPU; + writel(val, priv->glb_base + GLB_FWCTRL); +} + +/* Handle multiple multicast addresses (perfect filtering)*/ +static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv) +{ + struct net_device *dev = priv->ndev; + u32 val; + + val = readl(priv->glb_base + GLB_MACTCTRL); + if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) || + (dev->flags & IFF_ALLMULTI)) { + val |= MACTCTRL_MULTI2CPU; + } else { + int reg = MAX_UNICAST_ADDRESSES; + int i; + struct netdev_hw_addr *ha; + + for (i = reg; i < MAX_MAC_FILTER_NUM; i++) + hisi_femac_enable_hw_addr_filter(priv, i, false); + + netdev_for_each_mc_addr(ha, dev) { + hisi_femac_set_hw_addr_filter(priv, ha->addr, reg); + reg++; + } + val &= ~MACTCTRL_MULTI2CPU; + } + writel(val, priv->glb_base + GLB_MACTCTRL); +} + +/* Handle multiple unicast addresses (perfect filtering)*/ +static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv) +{ + struct net_device *dev = priv->ndev; + u32 val; + + val = readl(priv->glb_base + GLB_MACTCTRL); + if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) { + val |= MACTCTRL_UNI2CPU; + } else { + int reg = 0; + int i; + struct netdev_hw_addr *ha; + + for (i = reg; i < MAX_UNICAST_ADDRESSES; i++) + hisi_femac_enable_hw_addr_filter(priv, i, false); + + netdev_for_each_uc_addr(ha, dev) { + hisi_femac_set_hw_addr_filter(priv, ha->addr, reg); + reg++; + } + val &= ~MACTCTRL_UNI2CPU; + } + writel(val, priv->glb_base + GLB_MACTCTRL); +} + +static void hisi_femac_net_set_rx_mode(struct net_device *dev) +{ + struct hisi_femac_priv *priv = netdev_priv(dev); + + if (dev->flags & IFF_PROMISC) { + hisi_femac_set_promisc_mode(priv, true); + } else { + hisi_femac_set_promisc_mode(priv, false); + hisi_femac_set_mc_addr_filter(priv); + hisi_femac_set_uc_addr_filter(priv); + } +} + +static int hisi_femac_net_ioctl(struct net_device *dev, + struct ifreq *ifreq, int cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -EINVAL; + + return phy_mii_ioctl(dev->phydev, ifreq, cmd); +} + +static struct ethtool_ops hisi_femac_ethtools_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct net_device_ops hisi_femac_netdev_ops = { + .ndo_open = hisi_femac_net_open, + .ndo_stop = hisi_femac_net_close, + .ndo_start_xmit = hisi_femac_net_xmit, + .ndo_do_ioctl = hisi_femac_net_ioctl, + .ndo_set_mac_address = hisi_femac_set_mac_address, + .ndo_set_rx_mode = hisi_femac_net_set_rx_mode, + .ndo_change_mtu = eth_change_mtu, +}; + +static void hisi_femac_core_reset(struct hisi_femac_priv *priv) +{ + reset_control_assert(priv->mac_rst); + reset_control_deassert(priv->mac_rst); +} + +static void hisi_femac_sleep_us(u32 time_us) +{ + u32 time_ms; + + if (!time_us) + return; + + time_ms = DIV_ROUND_UP(time_us, 1000); + if (time_ms < 20) + usleep_range(time_us, time_us + 500); + else + msleep(time_ms); +} + +static void hisi_femac_phy_reset(struct hisi_femac_priv *priv) +{ + /* To make sure PHY hardware reset success, + * we must keep PHY in deassert state first and + * then complete the hardware reset operation + */ + reset_control_deassert(priv->phy_rst); + hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]); + + reset_control_assert(priv->phy_rst); + /* delay some time to ensure reset ok, + * this depends on PHY hardware feature + */ + hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]); + reset_control_deassert(priv->phy_rst); + /* delay some time to ensure later MDIO access */ + hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]); +} + +static void hisi_femac_port_init(struct hisi_femac_priv *priv) +{ + u32 val; + + /* MAC gets link status info and phy mode by software config */ + val = MAC_PORTSEL_STAT_CPU; + if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII) + val |= MAC_PORTSEL_RMII; + writel(val, priv->port_base + MAC_PORTSEL); + + /*clear all interrupt status */ + writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW); + hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0); + + val = readl(priv->glb_base + GLB_FWCTRL); + val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU); + val |= FWCTRL_FW2CPU_ENA; + writel(val, priv->glb_base + GLB_FWCTRL); + + val = readl(priv->glb_base + GLB_MACTCTRL); + val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA); + writel(val, priv->glb_base + GLB_MACTCTRL); + + val = readl(priv->port_base + MAC_SET); + val &= ~MAX_FRAME_SIZE_MASK; + val |= MAX_FRAME_SIZE; + writel(val, priv->port_base + MAC_SET); + + val = RX_COALESCED_TIMER | + (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET); + writel(val, priv->port_base + RX_COALESCE_SET); + + val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH; + writel(val, priv->port_base + QLEN_SET); +} + +static int hisi_femac_drv_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource *res; + struct net_device *ndev; + struct hisi_femac_priv *priv; + struct phy_device *phy; + const char *mac_addr; + int ret; + + ndev = alloc_etherdev(sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + platform_set_drvdata(pdev, ndev); + + priv = netdev_priv(ndev); + priv->dev = dev; + priv->ndev = ndev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->port_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->port_base)) { + ret = PTR_ERR(priv->port_base); + goto out_free_netdev; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->glb_base = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->glb_base)) { + ret = PTR_ERR(priv->glb_base); + goto out_free_netdev; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clk\n"); + ret = -ENODEV; + goto out_free_netdev; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(dev, "failed to enable clk %d\n", ret); + goto out_free_netdev; + } + + priv->mac_rst = devm_reset_control_get(dev, "mac"); + if (IS_ERR(priv->mac_rst)) { + ret = PTR_ERR(priv->mac_rst); + goto out_disable_clk; + } + hisi_femac_core_reset(priv); + + priv->phy_rst = devm_reset_control_get(dev, "phy"); + if (IS_ERR(priv->phy_rst)) { + priv->phy_rst = NULL; + } else { + ret = of_property_read_u32_array(node, + PHY_RESET_DELAYS_PROPERTY, + priv->phy_reset_delays, + DELAYS_NUM); + if (ret) + goto out_disable_clk; + hisi_femac_phy_reset(priv); + } + + phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link); + if (!phy) { + dev_err(dev, "connect to PHY failed!\n"); + ret = -ENODEV; + goto out_disable_clk; + } + + phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n", + (unsigned long)phy->phy_id, + phy_modes(phy->interface)); + + mac_addr = of_get_mac_address(node); + if (mac_addr) + ether_addr_copy(ndev->dev_addr, mac_addr); + if (!is_valid_ether_addr(ndev->dev_addr)) { + eth_hw_addr_random(ndev); + dev_warn(dev, "using random MAC address %pM\n", + ndev->dev_addr); + } + + ndev->watchdog_timeo = 6 * HZ; + ndev->priv_flags |= IFF_UNICAST_FLT; + ndev->netdev_ops = &hisi_femac_netdev_ops; + ndev->ethtool_ops = &hisi_femac_ethtools_ops; + netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT); + SET_NETDEV_DEV(ndev, &pdev->dev); + + hisi_femac_port_init(priv); + + ret = hisi_femac_init_tx_and_rx_queues(priv); + if (ret) + goto out_disconnect_phy; + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq <= 0) { + dev_err(dev, "No irq resource\n"); + ret = -ENODEV; + goto out_disconnect_phy; + } + + ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt, + IRQF_SHARED, pdev->name, ndev); + if (ret) { + dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq); + goto out_disconnect_phy; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(dev, "register_netdev failed!\n"); + goto out_disconnect_phy; + } + + return ret; + +out_disconnect_phy: + netif_napi_del(&priv->napi); + phy_disconnect(phy); +out_disable_clk: + clk_disable_unprepare(priv->clk); +out_free_netdev: + free_netdev(ndev); + + return ret; +} + +static int hisi_femac_drv_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hisi_femac_priv *priv = netdev_priv(ndev); + + netif_napi_del(&priv->napi); + unregister_netdev(ndev); + + phy_disconnect(ndev->phydev); + clk_disable_unprepare(priv->clk); + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +int hisi_femac_drv_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hisi_femac_priv *priv = netdev_priv(ndev); + + disable_irq(ndev->irq); + if (netif_running(ndev)) { + hisi_femac_net_close(ndev); + netif_device_detach(ndev); + } + + clk_disable_unprepare(priv->clk); + + return 0; +} + +int hisi_femac_drv_resume(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct hisi_femac_priv *priv = netdev_priv(ndev); + + clk_prepare_enable(priv->clk); + if (priv->phy_rst) + hisi_femac_phy_reset(priv); + + if (netif_running(ndev)) { + hisi_femac_port_init(priv); + hisi_femac_net_open(ndev); + netif_device_attach(ndev); + } + enable_irq(ndev->irq); + + return 0; +} +#endif + +static const struct of_device_id hisi_femac_match[] = { + {.compatible = "hisilicon,hisi-femac-v1",}, + {.compatible = "hisilicon,hisi-femac-v2",}, + {.compatible = "hisilicon,hi3516cv300-femac",}, + {}, +}; + +MODULE_DEVICE_TABLE(of, hisi_femac_match); + +static struct platform_driver hisi_femac_driver = { + .driver = { + .name = "hisi-femac", + .of_match_table = hisi_femac_match, + }, + .probe = hisi_femac_drv_probe, + .remove = hisi_femac_drv_remove, +#ifdef CONFIG_PM + .suspend = hisi_femac_drv_suspend, + .resume = hisi_femac_drv_resume, +#endif +}; + +module_platform_driver(hisi_femac_driver); + +MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver"); +MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:hisi-femac"); diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index b9f2ea593..275618bb4 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -218,7 +218,6 @@ struct hix5hd2_priv { struct device *dev; struct net_device *netdev; - struct phy_device *phy; struct device_node *phy_node; phy_interface_t phy_mode; @@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p) static void hix5hd2_adjust_link(struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); - struct phy_device *phy = priv->phy; + struct phy_device *phy = dev->phydev; if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) { hix5hd2_config_port(dev, phy->speed, phy->duplex); @@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv) static int hix5hd2_net_open(struct net_device *dev) { struct hix5hd2_priv *priv = netdev_priv(dev); + struct phy_device *phy; int ret; ret = clk_prepare_enable(priv->clk); @@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev) return ret; } - priv->phy = of_phy_connect(dev, priv->phy_node, - &hix5hd2_adjust_link, 0, priv->phy_mode); - if (!priv->phy) + phy = of_phy_connect(dev, priv->phy_node, + &hix5hd2_adjust_link, 0, priv->phy_mode); + if (!phy) return -ENODEV; - phy_start(priv->phy); + phy_start(phy); hix5hd2_hw_init(priv); hix5hd2_rx_refill(priv); @@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev) netif_stop_queue(dev); hix5hd2_free_dma_desc_rings(priv); - if (priv->phy) { - phy_stop(priv->phy); - phy_disconnect(priv->phy); + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); } clk_disable_unprepare(priv->clk); @@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = { .ndo_set_mac_address = hix5hd2_net_set_mac_address, }; -static int hix5hd2_get_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) -{ - struct hix5hd2_priv *priv = netdev_priv(net_dev); - - if (!priv->phy) - return -ENODEV; - - return phy_ethtool_gset(priv->phy, cmd); -} - -static int hix5hd2_set_settings(struct net_device *net_dev, - struct ethtool_cmd *cmd) -{ - struct hix5hd2_priv *priv = netdev_priv(net_dev); - - if (!priv->phy) - return -ENODEV; - - return phy_ethtool_sset(priv->phy, cmd); -} - static struct ethtool_ops hix5hd2_ethtools_ops = { .get_link = ethtool_op_get_link, - .get_settings = hix5hd2_get_settings, - .set_settings = hix5hd2_set_settings, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int hix5hd2_mdio_wait_ready(struct mii_bus *bus) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 3bfe36f94..c54c6fac0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -96,16 +96,22 @@ static int __ae_match(struct device *dev, const void *data) { struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - return hdev->dev->of_node == data; + if (dev_of_node(hdev->dev)) + return (data == &hdev->dev->of_node->fwnode); + else if (is_acpi_node(hdev->dev->fwnode)) + return (data == hdev->dev->fwnode); + + dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n"); + return 0; } -static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) +static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode) { struct device *dev; - WARN_ON(!ae_node); + WARN_ON(!fwnode); - dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); + dev = class_find_device(hnae_class, NULL, fwnode, __ae_match); return dev ? cls_to_ae_dev(dev) : NULL; } @@ -312,7 +318,7 @@ EXPORT_SYMBOL(hnae_reinit_handle); * return handle ptr or ERR_PTR */ struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const struct device_node *ae_node, + const struct fwnode_handle *fwnode, u32 port_id, struct hnae_buf_ops *bops) { @@ -321,7 +327,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, int i, j; int ret; - dev = find_ae(ae_node); + dev = find_ae(fwnode); if (!dev) return ERR_PTR(-ENODEV); @@ -394,7 +400,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) if (!hdev->ops || !hdev->ops->get_handle || !hdev->ops->toggle_ring_irq || - !hdev->ops->toggle_queue_status || !hdev->ops->get_status || !hdev->ops->adjust_link) return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index e8d36aaea..e093cbf26 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -27,6 +27,7 @@ * "cb" means control block */ +#include <linux/acpi.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/module.h> @@ -362,6 +363,14 @@ enum hnae_port_type { HNAE_PORT_DEBUG }; +/* mac media type */ +enum hnae_media_type { + HNAE_MEDIA_TYPE_UNKNOWN = 0, + HNAE_MEDIA_TYPE_FIBER, + HNAE_MEDIA_TYPE_COPPER, + HNAE_MEDIA_TYPE_BACKPLANE, +}; + /* This struct defines the operation on the handle. * * get_handle(): (mandatory) @@ -453,7 +462,6 @@ struct hnae_ae_ops { int (*get_info)(struct hnae_handle *handle, u8 *auto_neg, u16 *speed, u8 *duplex); void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val); - void (*toggle_queue_status)(struct hnae_queue *queue, u32 val); void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex); int (*set_loopback)(struct hnae_handle *handle, enum hnae_loop loop_mode, int en); @@ -472,6 +480,11 @@ struct hnae_ae_ops { int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout); int (*set_coalesce_frames)(struct hnae_handle *handle, u32 coalesce_frames); + void (*get_coalesce_range)(struct hnae_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high); void (*set_promisc_mode)(struct hnae_handle *handle, u32 en); int (*get_mac_addr)(struct hnae_handle *handle, void **p); int (*set_mac_addr)(struct hnae_handle *handle, void *p); @@ -512,7 +525,7 @@ struct hnae_ae_dev { struct hnae_handle { struct device *owner_dev; /* the device which make use of this handle */ struct hnae_ae_dev *dev; /* the device who provides this handle */ - struct device_node *phy_node; + struct phy_device *phy_dev; phy_interface_t phy_if; u32 if_support; int q_num; @@ -520,6 +533,7 @@ struct hnae_handle { u32 eport_id; u32 dport_id; /* v2 tx bd should fill the dport_id */ enum hnae_port_type port_type; + enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ struct hnae_buf_ops *bops; /* operation for the buffer */ struct hnae_queue **qs; /* array base of all queues */ @@ -528,7 +542,7 @@ struct hnae_handle { #define ring_to_dev(ring) ((ring)->q->dev->dev) struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const struct device_node *ae_node, + const struct fwnode_handle *fwnode, u32 port_id, struct hnae_buf_ops *bops); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 7a757e88c..e28d96099 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -131,9 +131,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, vf_cb->mac_cb = dsaf_dev->mac_cb[port_id]; ae_handle->phy_if = vf_cb->mac_cb->phy_if; - ae_handle->phy_node = vf_cb->mac_cb->phy_node; + ae_handle->phy_dev = vf_cb->mac_cb->phy_dev; ae_handle->if_support = vf_cb->mac_cb->if_support; ae_handle->port_type = vf_cb->mac_cb->mac_type; + ae_handle->media_type = vf_cb->mac_cb->media_type; ae_handle->dport_id = port_id; return ae_handle; @@ -247,12 +248,21 @@ static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) static int hns_ae_start(struct hnae_handle *handle) { int ret; + int k; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); if (ret) return ret; + for (k = 0; k < handle->q_num; k++) { + if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) + hns_rcb_int_clr_hw(handle->qs[k], + RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + else + hns_rcbv2_int_clr_hw(handle->qs[k], + RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + } hns_ae_ring_enable_all(handle, 1); msleep(100); @@ -313,18 +323,6 @@ static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask) hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); } -static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) -{ - struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev); - - if (AE_IS_VER1(dsaf_dev->dsaf_ver)) - hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); - else - hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); - - hns_rcb_start(queue, val); -} - static int hns_ae_get_link_status(struct hnae_handle *handle) { u32 link_status; @@ -465,6 +463,30 @@ static int hns_ae_set_coalesce_frames(struct hnae_handle *handle, ring_pair->port_id_in_comm, coalesce_frames); } +static void hns_ae_get_coalesce_range(struct hnae_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high) +{ + struct dsaf_device *dsaf_dev; + + dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); + + *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES; + *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES; + *tx_frames_high = + (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ? + HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1; + *rx_frames_high = + (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ? + HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1; + *tx_usecs_low = 0; + *rx_usecs_low = 0; + *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS; + *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS; +} + void hns_ae_update_stats(struct hnae_handle *handle, struct net_device_stats *net_stats) { @@ -587,6 +609,7 @@ void hns_ae_get_strings(struct hnae_handle *handle, int idx; struct hns_mac_cb *mac_cb; struct hns_ppe_cb *ppe_cb; + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); u8 *p = data; struct hnae_vf_cb *vf_cb; @@ -609,13 +632,14 @@ void hns_ae_get_strings(struct hnae_handle *handle, p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset); if (mac_cb->mac_type == HNAE_PORT_SERVICE) - hns_dsaf_get_strings(stringset, p, port); + hns_dsaf_get_strings(stringset, p, port, dsaf_dev); } int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) { u32 sset_count = 0; struct hns_mac_cb *mac_cb; + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev); assert(handle); @@ -626,7 +650,7 @@ int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset) sset_count += hns_mac_get_sset_count(mac_cb, stringset); if (mac_cb->mac_type == HNAE_PORT_SERVICE) - sset_count += hns_dsaf_get_sset_count(stringset); + sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset); return sset_count; } @@ -637,13 +661,15 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; switch (loop) { case MAC_INTERNALLOOP_PHY: ret = 0; break; case MAC_INTERNALLOOP_SERDES: - ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); + ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb, + !!en); break; case MAC_INTERNALLOOP_MAC: ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en); @@ -780,7 +806,6 @@ static struct hnae_ae_ops hns_dsaf_ops = { .stop = hns_ae_stop, .reset = hns_ae_reset, .toggle_ring_irq = hns_ae_toggle_ring_irq, - .toggle_queue_status = hns_ae_toggle_queue_status, .get_status = hns_ae_get_link_status, .get_info = hns_ae_get_mac_info, .adjust_link = hns_ae_adjust_link, @@ -794,6 +819,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { .get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames, .set_coalesce_usecs = hns_ae_set_coalesce_usecs, .set_coalesce_frames = hns_ae_set_coalesce_frames, + .get_coalesce_range = hns_ae_get_coalesce_range, .set_promisc_mode = hns_ae_set_promisc_mode, .set_mac_addr = hns_ae_set_mac_address, .set_mc_addr = hns_ae_set_multicast_one, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 44abb08de..1e1eb9299 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = { {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)}, {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, - {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, + {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, @@ -110,7 +110,7 @@ static void hns_gmac_free(void *mac_drv) u32 mac_id = drv->mac_id; - hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0); + dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0); } static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval) @@ -317,9 +317,9 @@ static void hns_gmac_init(void *mac_drv) port = drv->mac_id; - hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0); mdelay(10); - hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1); mdelay(10); hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); hns_gmac_tx_loop_pkt_dis(mac_drv); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 611581fcc..5c8afe1a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -7,6 +7,7 @@ * (at your option) any later version. */ +#include <linux/acpi.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> @@ -15,7 +16,8 @@ #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/phy_fixed.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> #include <linux/platform_device.h> #include "hns_dsaf_main.h" @@ -54,20 +56,6 @@ static const enum mac_mode g_mac_mode_1000[] = { [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000 }; -static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb) -{ - switch (mac_cb->max_speed) { - case MAC_SPEED_100: - return g_mac_mode_100[mac_cb->phy_if]; - case MAC_SPEED_1000: - return g_mac_mode_1000[mac_cb->phy_if]; - case MAC_SPEED_10000: - return MAC_MODE_XGMII_10000; - default: - return MAC_MODE_MII_100; - } -} - static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) { switch (mac_cb->max_speed) { @@ -94,7 +82,7 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) else *link_status = 0; - ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); + ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt); if (!ret) *link_status = *link_status && sfp_prsnt; @@ -132,7 +120,6 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) mac_cb->speed = speed; mac_cb->half_duplex = !duplex; - mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb); if (mac_ctrl_drv->adjust_link) { ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv, @@ -511,7 +498,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb) mac_ctrl_drv->mac_en_flg = 0; mac_cb->link = 0; - cpld_led_reset(mac_cb); + mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb); } /** @@ -637,6 +624,127 @@ free_mac_drv: return ret; } +static int +hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode) +{ + u32 addr; + int ret; + + ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr); + if (ret) { + dev_err(dev, "has invalid PHY address ret:%d\n", ret); + return ret; + } + + if (addr >= PHY_MAX_ADDR) { + dev_err(dev, "PHY address %i is too large\n", addr); + return -EINVAL; + } + + return addr; +} + +static int hns_mac_phydev_match(struct device *dev, void *fwnode) +{ + return dev->fwnode == fwnode; +} + +static struct +platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode) +{ + struct device *dev; + + dev = bus_find_device(&platform_bus_type, NULL, + fwnode, hns_mac_phydev_match); + return dev ? to_platform_device(dev) : NULL; +} + +static int +hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb, + u32 addr) +{ + struct phy_device *phy; + const char *phy_type; + bool is_c45; + int rc; + + rc = fwnode_property_read_string(mac_cb->fw_port, + "phy-mode", &phy_type); + if (rc < 0) + return rc; + + if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII))) + is_c45 = 1; + else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII))) + is_c45 = 0; + else + return -ENODATA; + + phy = get_phy_device(mdio, addr, is_c45); + if (!phy || IS_ERR(phy)) + return -EIO; + + if (mdio->irq) + phy->irq = mdio->irq[addr]; + + /* All data is now stored in the phy struct; + * register it + */ + rc = phy_device_register(phy); + if (rc) { + phy_device_free(phy); + return -ENODEV; + } + + mac_cb->phy_dev = phy; + + dev_dbg(&mdio->dev, "registered phy at address %i\n", addr); + + return 0; +} + +static void hns_mac_register_phy(struct hns_mac_cb *mac_cb) +{ + struct acpi_reference_args args; + struct platform_device *pdev; + struct mii_bus *mii_bus; + int rc; + int addr; + + /* Loop over the child nodes and register a phy_device for each one */ + if (!to_acpi_device_node(mac_cb->fw_port)) + return; + + rc = acpi_node_get_property_reference( + mac_cb->fw_port, "mdio-node", 0, &args); + if (rc) + return; + + addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port); + if (addr < 0) + return; + + /* dev address in adev */ + pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev)); + mii_bus = platform_get_drvdata(pdev); + rc = hns_mac_register_phydev(mii_bus, mac_cb, addr); + if (!rc) + dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n", + mac_cb->mac_id, addr); +} + +#define MAC_MEDIA_TYPE_MAX_LEN 16 + +static const struct { + enum hnae_media_type value; + const char *name; +} media_type_defs[] = { + {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" }, + {HNAE_MEDIA_TYPE_FIBER, "fiber" }, + {HNAE_MEDIA_TYPE_COPPER, "copper" }, + {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" }, +}; + /** *hns_mac_get_info - get mac information from device node *@mac_cb: mac device @@ -645,13 +753,16 @@ free_mac_drv: */ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { - struct device_node *np = mac_cb->dev->of_node; + struct device_node *np; struct regmap *syscon; struct of_phandle_args cpld_args; + const char *media_type; + u32 i; u32 ret; mac_cb->link = false; mac_cb->half_duplex = false; + mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN; mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if]; mac_cb->max_speed = mac_cb->speed; @@ -672,62 +783,98 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) * from dsaf node */ if (!mac_cb->fw_port) { - mac_cb->phy_node = of_parse_phandle(np, "phy-handle", - mac_cb->mac_id); - if (mac_cb->phy_node) + np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle", + mac_cb->mac_id); + mac_cb->phy_dev = of_phy_find_device(np); + if (mac_cb->phy_dev) { + /* refcount is held by of_phy_find_device() + * if the phy_dev is found + */ + put_device(&mac_cb->phy_dev->mdio.dev); + dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, mac_cb->phy_node->name); - return 0; - } - if (!is_of_node(mac_cb->fw_port)) - return -EINVAL; - /* parse property from port subnode in dsaf */ - mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port), - "phy-handle", 0); - if (mac_cb->phy_node) - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, mac_cb->phy_node->name); - syscon = syscon_node_to_regmap( - of_parse_phandle(to_of_node(mac_cb->fw_port), - "serdes-syscon", 0)); - if (IS_ERR_OR_NULL(syscon)) { - dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); - return -EINVAL; - } - mac_cb->serdes_ctrl = syscon; + mac_cb->mac_id, np->name); + } + of_node_put(np); - ret = fwnode_property_read_u32(mac_cb->fw_port, - "port-rst-offset", - &mac_cb->port_rst_off); - if (ret) { - dev_dbg(mac_cb->dev, - "mac%d port-rst-offset not found, use default value.\n", - mac_cb->mac_id); + return 0; } - ret = fwnode_property_read_u32(mac_cb->fw_port, - "port-mode-offset", - &mac_cb->port_mode_off); - if (ret) { - dev_dbg(mac_cb->dev, - "mac%d port-mode-offset not found, use default value.\n", - mac_cb->mac_id); - } + if (is_of_node(mac_cb->fw_port)) { + /* parse property from port subnode in dsaf */ + np = of_parse_phandle(to_of_node(mac_cb->fw_port), + "phy-handle", 0); + mac_cb->phy_dev = of_phy_find_device(np); + if (mac_cb->phy_dev) { + /* refcount is held by of_phy_find_device() + * if the phy_dev is found + */ + put_device(&mac_cb->phy_dev->mdio.dev); + dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", + mac_cb->mac_id, np->name); + } + of_node_put(np); - ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port), - "cpld-syscon", 1, 0, &cpld_args); - if (ret) { - dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n", - mac_cb->mac_id); - mac_cb->cpld_ctrl = NULL; - } else { - syscon = syscon_node_to_regmap(cpld_args.np); + np = of_parse_phandle(to_of_node(mac_cb->fw_port), + "serdes-syscon", 0); + syscon = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR_OR_NULL(syscon)) { - dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); + return -EINVAL; + } + mac_cb->serdes_ctrl = syscon; + + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-rst-offset", + &mac_cb->port_rst_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-rst-offset not found, use default value.\n", + mac_cb->mac_id); + } + + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-mode-offset", + &mac_cb->port_mode_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-mode-offset not found, use default value.\n", + mac_cb->mac_id); + } + + ret = of_parse_phandle_with_fixed_args( + to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0, + &cpld_args); + if (ret) { + dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n", + mac_cb->mac_id); mac_cb->cpld_ctrl = NULL; } else { - mac_cb->cpld_ctrl = syscon; - mac_cb->cpld_ctrl_reg = cpld_args.args[0]; + syscon = syscon_node_to_regmap(cpld_args.np); + if (IS_ERR_OR_NULL(syscon)) { + dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + mac_cb->cpld_ctrl = NULL; + } else { + mac_cb->cpld_ctrl = syscon; + mac_cb->cpld_ctrl_reg = cpld_args.args[0]; + } + } + } else if (is_acpi_node(mac_cb->fw_port)) { + hns_mac_register_phy(mac_cb); + } else { + dev_err(mac_cb->dev, "mac%d cannot find phy node\n", + mac_cb->mac_id); + } + + if (!fwnode_property_read_string(mac_cb->fw_port, "media-type", + &media_type)) { + for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) { + if (!strncmp(media_type_defs[i].name, media_type, + MAC_MEDIA_TYPE_MAX_LEN)) { + mac_cb->media_type = media_type_defs[i].value; + break; + } } } @@ -790,7 +937,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) else mac_cb->mac_type = HNAE_PORT_DEBUG; - mac_cb->phy_if = hns_mac_get_phy_if(mac_cb); + mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb); ret = hns_mac_get_mode(mac_cb->phy_if); if (ret < 0) { @@ -805,7 +952,7 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) if (ret) return ret; - cpld_led_reset(mac_cb); + mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb); mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); return 0; @@ -892,7 +1039,7 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev) int max_port_num = hns_mac_get_max_port_num(dsaf_dev); for (i = 0; i < max_port_num; i++) { - cpld_led_reset(dsaf_dev->mac_cb[i]); + dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]); dsaf_dev->mac_cb[i] = NULL; } } @@ -975,7 +1122,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb) nic_data = 0; mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts; mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts; - hns_cpld_set_led(mac_cb, (int)mac_cb->link, + mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link, mac_cb->speed, nic_data); } @@ -985,5 +1132,5 @@ int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, if (!mac_cb || !mac_cb->cpld_ctrl) return 0; - return cpld_set_led_id(mac_cb, status); + return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 97ce9a750..4cbdf14f5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -335,10 +335,11 @@ struct hns_mac_cb { u64 txpkt_for_led; u64 rxpkt_for_led; enum hnae_port_type mac_type; + enum hnae_media_type media_type; phy_interface_t phy_if; enum hnae_loop loop_mode; - struct device_node *phy_node; + struct phy_device *phy_dev; struct mac_hw_stats hw_stats; }; @@ -448,8 +449,6 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en); int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu); int hns_mac_get_port_info(struct hns_mac_cb *mac_cb, u8 *auto_neg, u16 *speed, u8 *duplex); -phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb); -int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en); int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, enum hnae_loop loop, int en); void hns_mac_update_stats(struct hns_mac_cb *mac_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1c2ddb25e..afb5daa37 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -7,6 +7,7 @@ * (at your option) any later version. */ +#include <linux/acpi.h> #include <linux/device.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -24,6 +25,7 @@ #include "hns_dsaf_main.h" #include "hns_dsaf_ppe.h" #include "hns_dsaf_rcb.h" +#include "hns_dsaf_misc.h" const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", @@ -32,6 +34,13 @@ const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_SP] = "single-port", }; +static const struct acpi_device_id hns_dsaf_acpi_match[] = { + { "HISI00B1", 0 }, + { "HISI00B2", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match); + int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) { int ret, i; @@ -42,15 +51,27 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) const char *mode_str; struct regmap *syscon; struct resource *res; - struct device_node *np = dsaf_dev->dev->of_node; + struct device_node *np = dsaf_dev->dev->of_node, *np_temp; struct platform_device *pdev = to_platform_device(dsaf_dev->dev); - if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) - dsaf_dev->dsaf_ver = AE_VERSION_1; - else - dsaf_dev->dsaf_ver = AE_VERSION_2; + if (dev_of_node(dsaf_dev->dev)) { + if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) + dsaf_dev->dsaf_ver = AE_VERSION_1; + else + dsaf_dev->dsaf_ver = AE_VERSION_2; + } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { + if (acpi_dev_found(hns_dsaf_acpi_match[0].id)) + dsaf_dev->dsaf_ver = AE_VERSION_1; + else if (acpi_dev_found(hns_dsaf_acpi_match[1].id)) + dsaf_dev->dsaf_ver = AE_VERSION_2; + else + return -ENXIO; + } else { + dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n"); + return -ENXIO; + } - ret = of_property_read_string(np, "mode", &mode_str); + ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str); if (ret) { dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); return ret; @@ -80,32 +101,41 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; - syscon = syscon_node_to_regmap( - of_parse_phandle(np, "subctrl-syscon", 0)); - if (IS_ERR_OR_NULL(syscon)) { - res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); - if (!res) { - dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); - return -ENOMEM; - } - dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->sc_base) { - dev_err(dsaf_dev->dev, "subctrl can not map!\n"); - return -ENOMEM; - } + if (dev_of_node(dsaf_dev->dev)) { + np_temp = of_parse_phandle(np, "subctrl-syscon", 0); + syscon = syscon_node_to_regmap(np_temp); + of_node_put(np_temp); + if (IS_ERR_OR_NULL(syscon)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, + res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); + return -ENOMEM; + } - res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); - if (!res) { - dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); - return -ENOMEM; - } - dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->sds_base) { - dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); - return -ENOMEM; + dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, + res); + if (IS_ERR(dsaf_dev->sc_base)) { + dev_err(dsaf_dev->dev, "subctrl can not map!\n"); + return PTR_ERR(dsaf_dev->sc_base); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, + res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); + return -ENOMEM; + } + + dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, + res); + if (IS_ERR(dsaf_dev->sds_base)) { + dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); + return PTR_ERR(dsaf_dev->sds_base); + } + } else { + dsaf_dev->sub_ctrl = syscon; } - } else { - dsaf_dev->sub_ctrl = syscon; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base"); @@ -117,9 +147,9 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->ppe_base) { + if (IS_ERR(dsaf_dev->ppe_base)) { dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); - return -ENOMEM; + return PTR_ERR(dsaf_dev->ppe_base); } dsaf_dev->ppe_paddr = res->start; @@ -136,33 +166,34 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } } dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsaf_dev->io_base) { + if (IS_ERR(dsaf_dev->io_base)) { dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); - return -ENOMEM; + return PTR_ERR(dsaf_dev->io_base); } } - ret = of_property_read_u32(np, "desc-num", &desc_num); + ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num); if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT || desc_num > HNS_DSAF_MAX_DESC_CNT) { dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n", desc_num, ret); - goto unmap_base_addr; + return -EINVAL; } dsaf_dev->desc_num = desc_num; - ret = of_property_read_u32(np, "reset-field-offset", &reset_offset); + ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset", + &reset_offset); if (ret < 0) { dev_dbg(dsaf_dev->dev, "get reset-field-offset fail, ret=%d!\r\n", ret); } dsaf_dev->reset_offset = reset_offset; - ret = of_property_read_u32(np, "buf-size", &buf_size); + ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size); if (ret < 0) { dev_err(dsaf_dev->dev, "get buf-size fail, ret=%d!\r\n", ret); - goto unmap_base_addr; + return ret; } dsaf_dev->buf_size = buf_size; @@ -170,41 +201,19 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) if (dsaf_dev->buf_size_type < 0) { dev_err(dsaf_dev->dev, "buf_size(%d) is wrong!\n", buf_size); - goto unmap_base_addr; + return -EINVAL; } + dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev); + if (!dsaf_dev->misc_op) + return -ENOMEM; + if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL))) dev_dbg(dsaf_dev->dev, "set mask to 64bit\n"); else dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n"); return 0; - -unmap_base_addr: - if (dsaf_dev->io_base) - iounmap(dsaf_dev->io_base); - if (dsaf_dev->ppe_base) - iounmap(dsaf_dev->ppe_base); - if (dsaf_dev->sds_base) - iounmap(dsaf_dev->sds_base); - if (dsaf_dev->sc_base) - iounmap(dsaf_dev->sc_base); - return ret; -} - -static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev) -{ - if (dsaf_dev->io_base) - iounmap(dsaf_dev->io_base); - - if (dsaf_dev->ppe_base) - iounmap(dsaf_dev->ppe_base); - - if (dsaf_dev->sds_base) - iounmap(dsaf_dev->sds_base); - - if (dsaf_dev->sc_base) - iounmap(dsaf_dev->sc_base); } /** @@ -508,10 +517,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ @@ -519,29 +528,39 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192); dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, - DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ - reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, - DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, - DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) { + reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M, + DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } + /* RoCEE */ for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, - DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2); - dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, - DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4); dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } @@ -852,6 +871,8 @@ static void hns_dsaf_single_line_tbl_cfg( struct dsaf_device *dsaf_dev, u32 address, struct dsaf_tbl_line_cfg *ptbl_line) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address); @@ -860,6 +881,8 @@ static void hns_dsaf_single_line_tbl_cfg( /*Write Plus*/ hns_dsaf_tbl_line_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -873,6 +896,8 @@ static void hns_dsaf_tcam_uc_cfg( struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*Write Tcam Data*/ @@ -881,6 +906,8 @@ static void hns_dsaf_tcam_uc_cfg( hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast); /*Write Plus*/ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -895,6 +922,8 @@ static void hns_dsaf_tcam_mc_cfg( struct dsaf_tbl_tcam_data *ptbl_tcam_data, struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); /*Write Tcam Data*/ @@ -903,6 +932,8 @@ static void hns_dsaf_tcam_mc_cfg( hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast); /*Write Plus*/ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -912,6 +943,8 @@ static void hns_dsaf_tcam_mc_cfg( */ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) { + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); @@ -924,6 +957,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address) /*Write Plus*/ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -941,6 +976,8 @@ static void hns_dsaf_tcam_uc_get( u32 tcam_read_data0; u32 tcam_read_data4; + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); @@ -949,9 +986,9 @@ static void hns_dsaf_tcam_uc_get( /*read tcam data*/ ptbl_tcam_data->tbl_tcam_data_high - = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); - ptbl_tcam_data->tbl_tcam_data_low = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); + ptbl_tcam_data->tbl_tcam_data_low + = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); /*read tcam mcast*/ tcam_read_data0 = dsaf_read_dev(dsaf_dev, @@ -973,6 +1010,8 @@ static void hns_dsaf_tcam_uc_get( DSAF_TBL_UCAST_CFG1_OUT_PORT_S); ptbl_tcam_ucast->tbl_ucast_dvc = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -989,6 +1028,8 @@ static void hns_dsaf_tcam_mc_get( { u32 data_tmp; + spin_lock_bh(&dsaf_dev->tcam_lock); + /*Write Addr*/ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address); @@ -997,9 +1038,9 @@ static void hns_dsaf_tcam_mc_get( /*read tcam data*/ ptbl_tcam_data->tbl_tcam_data_high = - dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); - ptbl_tcam_data->tbl_tcam_data_low = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG); + ptbl_tcam_data->tbl_tcam_data_low = + dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG); /*read tcam mcast*/ ptbl_tcam_mcast->tbl_mcast_port_msk[0] = @@ -1019,6 +1060,8 @@ static void hns_dsaf_tcam_mc_get( ptbl_tcam_mcast->tbl_mcast_port_msk[4] = dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M, DSAF_TBL_MCAST_CFG4_VM128_112_S); + + spin_unlock_bh(&dsaf_dev->tcam_lock); } /** @@ -1080,10 +1123,10 @@ int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, u32 en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { - if (!en) + if (!en) { dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n"); - - return -EINVAL; + return -EINVAL; + } } dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, @@ -1295,9 +1338,9 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev) dev_dbg(dsaf_dev->dev, "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name); - hns_dsaf_rst(dsaf_dev, 0); + dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0); mdelay(10); - hns_dsaf_rst(dsaf_dev, 1); + dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1); hns_dsaf_comm_init(dsaf_dev); @@ -1325,7 +1368,7 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev) static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev) { /*reset*/ - hns_dsaf_rst(dsaf_dev, 0); + dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0); } /** @@ -1343,6 +1386,7 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev) if (HNS_DSAF_IS_DEBUG(dsaf_dev)) return 0; + spin_lock_init(&dsaf_dev->tcam_lock); ret = hns_dsaf_init_hw(dsaf_dev); if (ret) return ret; @@ -2088,11 +2132,24 @@ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb) hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode); } +static u32 hns_dsaf_get_inode_prio_reg(int index) +{ + int base_index, offset; + u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG; + + base_index = (index + 1) / DSAF_REG_PER_ZONE; + offset = (index + 1) % DSAF_REG_PER_ZONE; + + return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index + + DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset; +} + void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) { struct dsaf_hw_stats *hw_stats = &dsaf_dev->hw_stats[node_num]; bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + int i; u32 reg_tmp; hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, @@ -2127,6 +2184,18 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) hw_stats->stp_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num); + /* pfc pause frame statistics stored in dsaf inode*/ + if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) { + for (i = 0; i < DSAF_PRIO_NR; i++) { + reg_tmp = hns_dsaf_get_inode_prio_reg(i); + hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev, + reg_tmp + 0x4 * (u64)node_num); + hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev, + DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG + + DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i + + 0xF0 * (u64)node_num); + } + } hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev, DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num); } @@ -2464,38 +2533,53 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) p[i] = 0xdddddddd; } -static char *hns_dsaf_get_node_stats_strings(char *data, int node) +static char *hns_dsaf_get_node_stats_strings(char *data, int node, + struct dsaf_device *dsaf_dev) { char *buff = data; + int i; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; + if (node < DSAF_SERVICE_NW_NUM && !is_ver1) { + for (i = 0; i < DSAF_PRIO_NR; i++) { + snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR, + ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts", + node, i); + snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR, + ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts", + node, i); + buff += ETH_GSTRING_LEN; + } + buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN; + } snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node); - buff = buff + ETH_GSTRING_LEN; + buff += ETH_GSTRING_LEN; return buff; } @@ -2504,7 +2588,9 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data, int node_num) { u64 *p = data; + int i; struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num]; + bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver); p[0] = hw_stats->pad_drop; p[1] = hw_stats->man_pkts; @@ -2519,8 +2605,16 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data, p[10] = hw_stats->local_addr_false; p[11] = hw_stats->vlan_drop; p[12] = hw_stats->stp_drop; - p[13] = hw_stats->tx_pkts; + if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) { + for (i = 0; i < DSAF_PRIO_NR; i++) { + p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i]; + p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i]; + } + p[29] = hw_stats->tx_pkts; + return &p[30]; + } + p[13] = hw_stats->tx_pkts; return &p[14]; } @@ -2548,11 +2642,16 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port) *@stringset: type of values in data *return dsaf string name count */ -int hns_dsaf_get_sset_count(int stringset) +int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset) { - if (stringset == ETH_SS_STATS) - return DSAF_STATIC_NUM; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + if (stringset == ETH_SS_STATS) { + if (is_ver1) + return DSAF_STATIC_NUM; + else + return DSAF_V2_STATIC_NUM; + } return 0; } @@ -2562,7 +2661,8 @@ int hns_dsaf_get_sset_count(int stringset) *@data:strings name value *@port:port index */ -void hns_dsaf_get_strings(int stringset, u8 *data, int port) +void hns_dsaf_get_strings(int stringset, u8 *data, int port, + struct dsaf_device *dsaf_dev) { char *buff = (char *)data; int node = port; @@ -2571,11 +2671,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port) return; /* for ge/xge node info */ - buff = hns_dsaf_get_node_stats_strings(buff, node); + buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); /* for ppe node info */ node = port + DSAF_PPE_INODE_BASE; - (void)hns_dsaf_get_node_stats_strings(buff, node); + (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev); } /** @@ -2611,7 +2711,7 @@ static int hns_dsaf_probe(struct platform_device *pdev) ret = hns_dsaf_init(dsaf_dev); if (ret) - goto free_cfg; + goto free_dev; ret = hns_mac_init(dsaf_dev); if (ret) @@ -2636,9 +2736,6 @@ uninit_mac: uninit_dsaf: hns_dsaf_free(dsaf_dev); -free_cfg: - hns_dsaf_free_cfg(dsaf_dev); - free_dev: hns_dsaf_free_dev(dsaf_dev); @@ -2661,8 +2758,6 @@ static int hns_dsaf_remove(struct platform_device *pdev) hns_dsaf_free(dsaf_dev); - hns_dsaf_free_cfg(dsaf_dev); - hns_dsaf_free_dev(dsaf_dev); return 0; @@ -2680,6 +2775,7 @@ static struct platform_driver g_dsaf_driver = { .driver = { .name = DSAF_DRV_NAME, .of_match_table = g_dsaf_match, + .acpi_match_table = hns_dsaf_acpi_match, }, }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index f0502ba0a..1daf018d9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -39,6 +39,9 @@ struct hns_mac_cb; #define DSAF_DUMP_REGS_NUM 504 #define DSAF_STATIC_NUM 28 +#define DSAF_V2_STATIC_NUM 44 +#define DSAF_PRIO_NR 8 +#define DSAF_REG_PER_ZONE 3 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) @@ -176,6 +179,8 @@ struct dsaf_hw_stats { u64 local_addr_false; u64 vlan_drop; u64 stp_drop; + u64 rx_pfc[DSAF_PRIO_NR]; + u64 tx_pfc[DSAF_PRIO_NR]; u64 tx_pkts; }; @@ -268,6 +273,27 @@ struct dsaf_int_stat { }; +struct dsaf_misc_op { + void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status, + u16 speed, int data); + void (*cpld_reset_led)(struct hns_mac_cb *mac_cb); + int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb, + enum hnae_led_state status); + /* reset seris function, it will be reset if the dereseet is 0 */ + void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset); + void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); + void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port, + bool dereset); + void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); + void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset); + void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset); + + phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb); + int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt); + + int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en); +}; + /* Dsaf device struct define ,and mac -> dsaf */ struct dsaf_device { struct device *dev; @@ -292,9 +318,12 @@ struct dsaf_device { struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM]; struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM]; struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM]; + struct dsaf_misc_op *misc_op; struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM]; struct dsaf_int_stat int_stat; + /* make sure tcam table config spinlock */ + spinlock_t tcam_lock; }; static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev) @@ -388,27 +417,17 @@ int hns_dsaf_get_mac_entry_by_index( u16 entry_index, struct dsaf_drv_mac_multi_dest_entry *mac_entry); -void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val); - -void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val); - -void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val); - void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev); -void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val); -void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val); -void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, - u32 port, u32 val); - void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num); -int hns_dsaf_get_sset_count(int stringset); +int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset); void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port); -void hns_dsaf_get_strings(int stringset, u8 *data, int port); +void hns_dsaf_get_strings(int stringset, u8 *data, int port, + struct dsaf_device *dsaf_dev); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index a837bb9e3..611b67b6f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -12,6 +12,27 @@ #include "hns_dsaf_ppe.h" #include "hns_dsaf_reg.h" +enum _dsm_op_index { + HNS_OP_RESET_FUNC = 0x1, + HNS_OP_SERDES_LP_FUNC = 0x2, + HNS_OP_LED_SET_FUNC = 0x3, + HNS_OP_GET_PORT_TYPE_FUNC = 0x4, + HNS_OP_GET_SFP_STAT_FUNC = 0x5, +}; + +enum _dsm_rst_type { + HNS_DSAF_RESET_FUNC = 0x1, + HNS_PPE_RESET_FUNC = 0x2, + HNS_XGE_CORE_RESET_FUNC = 0x3, + HNS_XGE_RESET_FUNC = 0x4, + HNS_GE_RESET_FUNC = 0x5, +}; + +const u8 hns_dsaf_acpi_dsm_uuid[] = { + 0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41, + 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A +}; + static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val) { if (dsaf_dev->sub_ctrl) @@ -32,8 +53,8 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg) return ret; } -void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, - u16 speed, int data) +static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, + u16 speed, int data) { int speed_reg = 0; u8 value; @@ -65,13 +86,14 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, mac_cb->cpld_led_value = value; } } else { - dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, - CPLD_LED_DEFAULT_VALUE); - mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; + value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B); + dsaf_write_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg, value); + mac_cb->cpld_led_value = value; } } -void cpld_led_reset(struct hns_mac_cb *mac_cb) +static void cpld_led_reset(struct hns_mac_cb *mac_cb) { if (!mac_cb || !mac_cb->cpld_ctrl) return; @@ -81,8 +103,8 @@ void cpld_led_reset(struct hns_mac_cb *mac_cb) mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } -int cpld_set_led_id(struct hns_mac_cb *mac_cb, - enum hnae_led_state status) +static int cpld_set_led_id(struct hns_mac_cb *mac_cb, + enum hnae_led_state status) { switch (status) { case HNAE_LED_ACTIVE: @@ -93,7 +115,7 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, CPLD_LED_ON_VALUE); dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, mac_cb->cpld_led_value); - return 2; + break; case HNAE_LED_INACTIVE: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_DEFAULT_VALUE); @@ -101,7 +123,8 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, mac_cb->cpld_led_value); break; default: - break; + dev_err(mac_cb->dev, "invalid led state: %d!", status); + return -EINVAL; } return 0; @@ -109,12 +132,40 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, #define RESET_REQ_OR_DREQ 1 -void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) +static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type, + u32 port_type, u32 port, u32 val) +{ + union acpi_object *obj; + union acpi_object obj_args[3], argv4; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = port_type; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = port; + obj_args[2].integer.type = ACPI_TYPE_INTEGER; + obj_args[2].integer.value = val; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 3; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev), + hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4); + if (!obj) { + dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!", + port_type, port); + return; + } + + ACPI_FREE(obj); +} + +static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset) { u32 xbar_reg_addr; u32 nt_reg_addr; - if (!val) { + if (!dereset) { xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG; nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG; } else { @@ -126,7 +177,15 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ); } -void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) +static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_DSAF_RESET_FUNC, + 0, dereset); +} + +static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + bool dereset) { u32 reg_val = 0; u32 reg_addr; @@ -137,7 +196,7 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val |= RESET_REQ_OR_DREQ; reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; @@ -145,8 +204,15 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } -void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, - u32 port, u32 val) +static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_XGE_RESET_FUNC, port, dereset); +} + +static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) { u32 reg_val = 0; u32 reg_addr; @@ -157,7 +223,7 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, reg_val |= XGMAC_TRX_CORE_SRST_M << dsaf_dev->mac_cb[port]->port_rst_off; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; @@ -165,7 +231,16 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } -void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) +static void +hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_XGE_CORE_RESET_FUNC, port, dereset); +} + +static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + bool dereset) { u32 reg_val_1; u32 reg_val_2; @@ -178,12 +253,11 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_1 = 0x1 << port; port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ - if (AE_IS_VER1(dsaf_dev->dsaf_ver)) - reg_val_2 = 0x1041041 << port_rst_off; - else - reg_val_2 = 0x2082082 << port_rst_off; + reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? + 0x1041041 : 0x2082082; + reg_val_2 <<= port_rst_off; - if (val == 0) { + if (!dereset) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); @@ -197,10 +271,13 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) reg_val_1); } } else { - reg_val_1 = 0x15540 << dsaf_dev->reset_offset; - reg_val_2 = 0x100 << dsaf_dev->reset_offset; + reg_val_1 = 0x15540; + reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40; + + reg_val_1 <<= dsaf_dev->reset_offset; + reg_val_2 <<= dsaf_dev->reset_offset; - if (val == 0) { + if (!dereset) { dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); @@ -216,14 +293,22 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) } } -void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) +static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, + u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_GE_RESET_FUNC, port, dereset); +} + +static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, + bool dereset) { u32 reg_val = 0; u32 reg_addr; reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; @@ -231,15 +316,24 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } -void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) +static void +hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset) +{ + hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC, + HNS_PPE_RESET_FUNC, port, dereset); +} + +static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset) { - struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; u32 reg_val; u32 reg_addr; + if (!(dev_of_node(dsaf_dev->dev))) + return; + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val = RESET_REQ_OR_DREQ; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG; @@ -247,7 +341,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) } else { reg_val = 0x100 << dsaf_dev->reset_offset; - if (val == 0) + if (!dereset) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; @@ -261,7 +355,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) * @mac_cb: mac control block * retuen phy interface */ -phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) +static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) { u32 mode; u32 reg; @@ -293,6 +387,36 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) return phy_if; } +static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb) +{ + phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; + union acpi_object *obj; + union acpi_object obj_args, argv4; + + obj_args.integer.type = ACPI_TYPE_INTEGER; + obj_args.integer.value = mac_cb->mac_id; + + argv4.type = ACPI_TYPE_PACKAGE, + argv4.package.count = 1, + argv4.package.elements = &obj_args, + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + hns_dsaf_acpi_dsm_uuid, 0, + HNS_OP_GET_PORT_TYPE_FUNC, &argv4); + + if (!obj || obj->type != ACPI_TYPE_INTEGER) + return phy_if; + + phy_if = obj->integer.value ? + PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII; + + dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if); + + ACPI_FREE(obj); + + return phy_if; +} + int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) { if (!mac_cb->cpld_ctrl) @@ -309,13 +433,8 @@ int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) * @mac_cb: mac control block * retuen 0 == success */ -int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) +static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en) { - /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000 - * port 4-7 hilink3 base is serdes_vaddr + 0x00200000 - */ - u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + - (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); const u8 lane_id[] = { 0, /* mac 0 -> lane 0 */ 1, /* mac 1 -> lane 1 */ @@ -332,7 +451,7 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) int sfp_prsnt; int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt); - if (!mac_cb->phy_node) { + if (!mac_cb->phy_dev) { if (ret) pr_info("please confirm sfp is present or not\n"); else @@ -341,13 +460,110 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) } if (mac_cb->serdes_ctrl) { - u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + u32 origin; + + if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) { +#define HILINK_ACCESS_SEL_CFG 0x40008 + /* hilink4 & hilink3 use the same xge training and + * xge u adaptor. There is a hilink access sel cfg + * register to select which one to be configed + */ + if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) && + (mac_cb->mac_id <= 3)) + dsaf_write_syscon(mac_cb->serdes_ctrl, + HILINK_ACCESS_SEL_CFG, 0); + else + dsaf_write_syscon(mac_cb->serdes_ctrl, + HILINK_ACCESS_SEL_CFG, 3); + } - dsaf_set_field(origin, 1ull << 10, 10, !!en); + origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + + dsaf_set_field(origin, 1ull << 10, 10, en); dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); } else { - dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); + u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + + (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); + dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); + } + + return 0; +} + +static int +hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en) +{ + union acpi_object *obj; + union acpi_object obj_args[3], argv4; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = mac_cb->mac_id; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = !!en; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 2; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev), + hns_dsaf_acpi_dsm_uuid, 0, + HNS_OP_SERDES_LP_FUNC, &argv4); + if (!obj) { + dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!", + mac_cb->mac_id); + + return -ENOTSUPP; } + ACPI_FREE(obj); + return 0; } + +struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) +{ + struct dsaf_misc_op *misc_op; + + misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL); + if (!misc_op) + return NULL; + + if (dev_of_node(dsaf_dev->dev)) { + misc_op->cpld_set_led = hns_cpld_set_led; + misc_op->cpld_reset_led = cpld_led_reset; + misc_op->cpld_set_led_id = cpld_set_led_id; + + misc_op->dsaf_reset = hns_dsaf_rst; + misc_op->xge_srst = hns_dsaf_xge_srst_by_port; + misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port; + misc_op->ge_srst = hns_dsaf_ge_srst_by_port; + misc_op->ppe_srst = hns_ppe_srst_by_port; + misc_op->ppe_comm_srst = hns_ppe_com_srst; + + misc_op->get_phy_if = hns_mac_get_phy_if; + misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; + + misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback; + } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { + misc_op->cpld_set_led = hns_cpld_set_led; + misc_op->cpld_reset_led = cpld_led_reset; + misc_op->cpld_set_led_id = cpld_set_led_id; + + misc_op->dsaf_reset = hns_dsaf_rst_acpi; + misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi; + misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi; + misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi; + misc_op->ppe_srst = hns_ppe_srst_by_port_acpi; + misc_op->ppe_comm_srst = hns_ppe_com_srst; + + misc_op->get_phy_if = hns_mac_get_phy_if_acpi; + misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt; + + misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi; + } else { + devm_kfree(dsaf_dev->dev, (void *)misc_op); + misc_op = NULL; + } + + return (void *)misc_op; +} diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h index 419f07aa9..f06bb03d4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h @@ -33,11 +33,6 @@ #define DSAF_LED_DATA_B 4 #define DSAF_LED_ANCHOR_B 5 -void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, - u16 speed, int data); -void cpld_led_reset(struct hns_mac_cb *mac_cb); -int cpld_set_led_id(struct hns_mac_cb *mac_cb, - enum hnae_led_state status); -int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt); +struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 8cd151a52..6ea872287 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -112,7 +112,6 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, int ppe_idx) { - return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } @@ -200,11 +199,12 @@ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb, static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common) { enum ppe_qid_mode qid_mode; - enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode; + struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; + enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; - hns_ppe_com_srst(ppe_common, 0); + dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0); mdelay(100); - hns_ppe_com_srst(ppe_common, 1); + dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1); mdelay(100); if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { @@ -288,9 +288,9 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) /* get default RSS key */ netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); - hns_ppe_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); mdelay(10); - hns_ppe_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1); /* clr and msk except irq*/ hns_ppe_exc_irq_en(ppe_cb, 0); @@ -330,8 +330,10 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb) u32 port; if (ppe_cb->ppe_common_cb) { + struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev; + port = ppe_cb->index; - hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0); + dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0); } } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 4ef6d23d9..ef1107777 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -458,7 +458,6 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) u32 i; u32 ring_num = rcb_common->ring_num; int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); - struct device_node *np = rcb_common->dsaf_dev->dev->of_node; struct platform_device *pdev = to_platform_device(rcb_common->dsaf_dev->dev); bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); @@ -473,10 +472,10 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) ring_pair_cb->port_id_in_comm = hns_rcb_get_port_in_comm(rcb_common, i); ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = - is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : + is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) : platform_get_irq(pdev, base_irq_idx + i * 3 + 1); ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = - is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : + is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) : platform_get_irq(pdev, base_irq_idx + i * 3); ring_pair_cb->q.phy_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); @@ -541,7 +540,7 @@ int hns_rcb_set_coalesce_usecs( } if (timeout > HNS_RCB_MAX_COALESCED_USECS) { dev_err(rcb_common->dsaf_dev->dev, - "error: not support coalesce %dus!\n", timeout); + "error: coalesce_usecs setting supports 0~1023us\n"); return -EINVAL; } hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index bd54dac82..99b4e1ba0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -40,7 +40,7 @@ struct rcb_common_cb; #define HNS_RCB_DEF_COALESCED_FRAMES 50 #define HNS_RCB_CLK_FREQ_MHZ 350 #define HNS_RCB_MAX_COALESCED_USECS 0x3ff -#define HNS_RCB_DEF_COALESCED_USECS 3 +#define HNS_RCB_DEF_COALESCED_USECS 50 #define HNS_RCB_COMMON_ENDIAN 1 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 7c3b5103d..235f74444 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -32,7 +32,7 @@ #define DSAFV2_SBM_NUM 8 #define DSAFV2_SBM_XGE_CHN 6 #define DSAFV2_SBM_PPE_CHN 1 -#define DASFV2_ROCEE_CRD_NUM 8 +#define DASFV2_ROCEE_CRD_NUM 1 #define DSAF_VOQ_NUM DSAF_NODE_NUM #define DSAF_INODE_NUM DSAF_NODE_NUM @@ -166,6 +166,9 @@ #define DSAF_INODE_GE_FC_EN_0_REG 0x1B00 #define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50 #define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00 +#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00 +#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100 +#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50 #define DSAF_SBM_CFG_REG_0_REG 0x2000 #define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004 @@ -175,7 +178,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C -#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C +#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 #define DSAF_SBM_BP_CNT_0_0_REG 0x2018 @@ -232,6 +235,8 @@ #define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074 #define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078 #define DSAF_XOD_FIFO_STATUS_0_REG 0x307C +#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00 +#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4 #define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004 #define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008 @@ -791,6 +796,18 @@ #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) +#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0) +#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8 +#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8) + +#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0) +#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0) +#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6) +#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6) +#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12) +#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12) + #define DSAF_TBL_TCAM_ADDR_S 0 #define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index fd90f3737..8f4f0e8da 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -119,7 +119,7 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; - hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1); mdelay(10); /*enable XGE rX/tX */ @@ -157,7 +157,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) } mdelay(10); - hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0); } /** @@ -198,9 +198,9 @@ static void hns_xgmac_init(void *mac_drv) = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; - hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0); + dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); mdelay(100); - hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1); + dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); mdelay(100); hns_xgmac_exc_irq_en(drv, 0); @@ -425,7 +425,7 @@ static void hns_xgmac_free(void *mac_drv) u32 mac_id = drv->mac_id; - hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0); + dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0); } /** diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index e621636e6..d7e1f8c7a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -132,6 +132,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, ring_ptr_move_fw(ring, next_to_use); } +static const struct acpi_device_id hns_enet_acpi_match[] = { + { "HISI00C1", 0 }, + { "HISI00C2", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match); + static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, int buf_num, enum hns_desc_type type, int mtu) @@ -593,6 +600,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ring->stats.sw_err_cnt++; return -ENOMEM; } + skb_reset_mac_header(skb); prefetchw(skb->data); length = le16_to_cpu(desc->rx.pkt_len); @@ -754,16 +762,16 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, recv_pkts = 0, recv_bds = 0, clean_count = 0; recv: while (recv_pkts < budget && recv_bds < num) { - /* reuse or realloc buffers*/ + /* reuse or realloc buffers */ if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { hns_nic_alloc_rx_buffers(ring_data, clean_count); clean_count = 0; } - /* poll one pkg*/ + /* poll one pkt */ err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum); if (unlikely(!skb)) /* this fault cannot be repaired */ - break; + goto out; recv_bds += bnum; clean_count += bnum; @@ -789,6 +797,7 @@ recv: } } +out: /* make all data has been write before submit */ if (clean_count > 0) hns_nic_alloc_rx_buffers(ring_data, clean_count); @@ -983,8 +992,26 @@ static void hns_nic_adjust_link(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; + int state = 1; + + if (priv->phy) { + h->dev->ops->adjust_link(h, ndev->phydev->speed, + ndev->phydev->duplex); + state = priv->phy->link; + } + state = state && h->dev->ops->get_status(h); - h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex); + if (state != priv->link) { + if (state) { + netif_carrier_on(ndev); + netif_tx_wake_all_queues(ndev); + netdev_info(ndev, "link up\n"); + } else { + netif_carrier_off(ndev); + netdev_info(ndev, "link down\n"); + } + priv->link = state; + } } /** @@ -996,19 +1023,22 @@ static void hns_nic_adjust_link(struct net_device *ndev) int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) { struct hns_nic_priv *priv = netdev_priv(ndev); - struct phy_device *phy_dev = NULL; + struct phy_device *phy_dev = h->phy_dev; + int ret; - if (!h->phy_node) + if (!h->phy_dev) return 0; - if (h->phy_if != PHY_INTERFACE_MODE_XGMII) - phy_dev = of_phy_connect(ndev, h->phy_node, - hns_nic_adjust_link, 0, h->phy_if); - else - phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if); + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { + phy_dev->dev_flags = 0; - if (unlikely(!phy_dev) || IS_ERR(phy_dev)) - return !phy_dev ? -ENODEV : PTR_ERR(phy_dev); + ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, + h->phy_if); + } else { + ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if); + } + if (unlikely(ret)) + return -ENODEV; phy_dev->supported &= h->if_support; phy_dev->advertising = phy_dev->supported; @@ -1067,13 +1097,8 @@ void hns_nic_update_stats(struct net_device *netdev) static void hns_init_mac_addr(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); - struct device_node *node = priv->dev->of_node; - const void *mac_addr_temp; - mac_addr_temp = of_get_mac_address(node); - if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) { - memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len); - } else { + if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) { eth_hw_addr_random(ndev); dev_warn(priv->dev, "No valid mac, use random mac %pM", ndev->dev_addr); @@ -1176,7 +1201,7 @@ static int hns_nic_net_up(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); struct hnae_handle *h = priv->ae_handle; - int i, j, k; + int i, j; int ret; ret = hns_nic_init_irq(priv); @@ -1191,9 +1216,6 @@ static int hns_nic_net_up(struct net_device *ndev) goto out_has_some_queues; } - for (k = 0; k < h->q_num; k++) - h->dev->ops->toggle_queue_status(h->qs[k], 1); - ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr); if (ret) goto out_set_mac_addr_err; @@ -1213,8 +1235,6 @@ static int hns_nic_net_up(struct net_device *ndev) out_start_err: netif_stop_queue(ndev); out_set_mac_addr_err: - for (k = 0; k < h->q_num; k++) - h->dev->ops->toggle_queue_status(h->qs[k], 0); out_has_some_queues: for (j = i - 1; j >= 0; j--) hns_nic_ring_close(ndev, j); @@ -1421,7 +1441,6 @@ static int hns_nic_set_features(struct net_device *netdev, netdev_features_t features) { struct hns_nic_priv *priv = netdev_priv(netdev); - struct hnae_handle *h = priv->ae_handle; switch (priv->enet_ver) { case AE_VERSION_1: @@ -1434,11 +1453,9 @@ static int hns_nic_set_features(struct net_device *netdev, priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* The chip only support 7*4096 */ netif_set_gso_max_size(netdev, 7 * 4096); - h->dev->ops->set_tso_stats(h, 1); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; - h->dev->ops->set_tso_stats(h, 0); } break; } @@ -1571,27 +1588,14 @@ static void hns_nic_update_link_status(struct net_device *netdev) struct hns_nic_priv *priv = netdev_priv(netdev); struct hnae_handle *h = priv->ae_handle; - int state = 1; - if (priv->phy) { - if (!genphy_update_link(priv->phy)) - state = priv->phy->link; - else - state = 0; - } - state = state && h->dev->ops->get_status(h); + if (h->phy_dev) { + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) + return; - if (state != priv->link) { - if (state) { - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); - } else { - netif_carrier_off(netdev); - netdev_info(netdev, "link down\n"); - } - priv->link = state; + (void)genphy_read_status(h->phy_dev); } + hns_nic_adjust_link(netdev); } /* for dumping key regs*/ @@ -1627,7 +1631,7 @@ static void hns_nic_dump(struct hns_nic_priv *priv) } } -/* for resetting suntask*/ +/* for resetting subtask */ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) { enum hnae_port_type type = priv->ae_handle->port_type; @@ -1797,11 +1801,14 @@ static void hns_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; /* This chip only support 7*4096 */ netif_set_gso_max_size(netdev, 7 * 4096); - h->dev->ops->set_tso_stats(h, 1); } else { priv->ops.fill_desc = fill_v2_desc; priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; } + /* enable tso when init + * control tso on/off through TSE bit in bd + */ + h->dev->ops->set_tso_stats(h, 1); } } @@ -1812,7 +1819,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) int ret; h = hnae_get_handle(&priv->netdev->dev, - priv->ae_node, priv->port_id, NULL); + priv->fwnode, priv->port_id, NULL); if (IS_ERR_OR_NULL(h)) { ret = -ENODEV; dev_dbg(priv->dev, "has not handle, register notifier!\n"); @@ -1872,7 +1879,6 @@ static int hns_nic_dev_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct net_device *ndev; struct hns_nic_priv *priv; - struct device_node *node = dev->of_node; u32 port_id; int ret; @@ -1886,22 +1892,49 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) - priv->enet_ver = AE_VERSION_1; - else - priv->enet_ver = AE_VERSION_2; + if (dev_of_node(dev)) { + struct device_node *ae_node; - priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); - if (IS_ERR_OR_NULL(priv->ae_node)) { - ret = PTR_ERR(priv->ae_node); - dev_err(dev, "not find ae-handle\n"); - goto out_read_prop_fail; + if (of_device_is_compatible(dev->of_node, + "hisilicon,hns-nic-v1")) + priv->enet_ver = AE_VERSION_1; + else + priv->enet_ver = AE_VERSION_2; + + ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0); + if (IS_ERR_OR_NULL(ae_node)) { + ret = PTR_ERR(ae_node); + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } + priv->fwnode = &ae_node->fwnode; + } else if (is_acpi_node(dev->fwnode)) { + struct acpi_reference_args args; + + if (acpi_dev_found(hns_enet_acpi_match[0].id)) + priv->enet_ver = AE_VERSION_1; + else if (acpi_dev_found(hns_enet_acpi_match[1].id)) + priv->enet_ver = AE_VERSION_2; + else + return -ENXIO; + + /* try to find port-idx-in-ae first */ + ret = acpi_node_get_property_reference(dev->fwnode, + "ae-handle", 0, &args); + if (ret) { + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } + priv->fwnode = acpi_fwnode_handle(args.adev); + } else { + dev_err(dev, "cannot read cfg data from OF or acpi\n"); + return -ENXIO; } - /* try to find port-idx-in-ae first */ - ret = of_property_read_u32(node, "port-idx-in-ae", &port_id); + + ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id); if (ret) { /* only for old code compatible */ - ret = of_property_read_u32(node, "port-id", &port_id); + ret = device_property_read_u32(dev, "port-id", &port_id); if (ret) goto out_read_prop_fail; /* for old dts, we need to caculate the port offset */ @@ -1940,7 +1973,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) dev_dbg(dev, "set mask to 64bit\n"); else - dev_err(dev, "set mask to 32bit fail!\n"); + dev_err(dev, "set mask to 64bit fail!\n"); /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(ndev); @@ -2014,6 +2047,7 @@ static struct platform_driver hns_nic_dev_driver = { .driver = { .name = "hns-nic", .of_match_table = hns_enet_of_match, + .acpi_match_table = ACPI_PTR(hns_enet_acpi_match), }, .probe = hns_nic_dev_probe, .remove = hns_nic_dev_remove, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 337efa582..44bb3015e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -54,7 +54,7 @@ struct hns_nic_ops { }; struct hns_nic_priv { - const struct device_node *ae_node; + const struct fwnode_handle *fwnode; u32 enet_ver; u32 port_id; int phy_mode; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 67a648c7d..ab33487a5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -49,7 +49,7 @@ static u32 hns_nic_get_link(struct net_device *net_dev) h = priv->ae_handle; if (priv->phy) { - if (!genphy_update_link(priv->phy)) + if (!genphy_read_status(priv->phy)) link_stat = priv->phy->link; else link_stat = 0; @@ -165,13 +165,21 @@ static int hns_nic_get_settings(struct net_device *net_dev, cmd->advertising |= ADVERTISED_10000baseKR_Full; } - if (h->port_type == HNAE_PORT_SERVICE) { + switch (h->media_type) { + case HNAE_MEDIA_TYPE_FIBER: cmd->port = PORT_FIBRE; - cmd->supported |= SUPPORTED_Pause; - } else { + break; + case HNAE_MEDIA_TYPE_COPPER: cmd->port = PORT_TP; + break; + case HNAE_MEDIA_TYPE_UNKNOWN: + default: + break; } + if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG)) + cmd->supported |= SUPPORTED_Pause; + cmd->transceiver = XCVR_EXTERNAL; cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22); hns_get_mdix_mode(net_dev, cmd); @@ -242,6 +250,7 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = { static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) { #define COPPER_CONTROL_REG 0 +#define PHY_POWER_DOWN BIT(11) #define PHY_LOOP_BACK BIT(14) u16 val = 0; @@ -252,33 +261,40 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) /* speed : 1000M */ phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); phy_write(phy_dev, 21, 0x1046); + + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); /* Force Master */ phy_write(phy_dev, 9, 0x1F00); + /* Soft-reset */ phy_write(phy_dev, 0, 0x9140); /* If autoneg disabled,two soft-reset operations */ phy_write(phy_dev, 0, 0x9140); - phy_write(phy_dev, 22, 0xFA); + + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); /* Default is 0x0400 */ phy_write(phy_dev, 1, 0x418); /* Force 1000M Link, Default is 0x0200 */ phy_write(phy_dev, 7, 0x20C); - phy_write(phy_dev, 22, 0); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); - /* Enable MAC loop-back */ + /* Enable PHY loop-back */ val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; + val &= ~PHY_POWER_DOWN; phy_write(phy_dev, COPPER_CONTROL_REG, val); } else { - phy_write(phy_dev, 22, 0xFA); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); phy_write(phy_dev, 1, 0x400); phy_write(phy_dev, 7, 0x200); - phy_write(phy_dev, 22, 0); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); + phy_write(phy_dev, 9, 0xF00); val = phy_read(phy_dev, COPPER_CONTROL_REG); val &= ~PHY_LOOP_BACK; + val |= PHY_POWER_DOWN; phy_write(phy_dev, COPPER_CONTROL_REG, val); } return 0; @@ -339,28 +355,16 @@ static int __lb_up(struct net_device *ndev, hns_nic_net_reset(ndev); - if (priv->phy) { - phy_disconnect(priv->phy); - msleep(100); - - ret = hns_nic_init_phy(ndev, h); - if (ret) - return ret; - } - ret = __lb_setup(ndev, loop_mode); if (ret) return ret; - msleep(100); + msleep(200); ret = h->dev->ops->start ? h->dev->ops->start(h) : 0; if (ret) return ret; - if (priv->phy) - phy_start(priv->phy); - /* link adjust duplex*/ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) speed = 1000; @@ -561,9 +565,6 @@ static int __lb_down(struct net_device *ndev) __func__, ret); - if (priv->phy) - phy_stop(priv->phy); - if (h->dev->ops->stop) h->dev->ops->stop(h); @@ -596,7 +597,7 @@ static void hns_nic_self_test(struct net_device *ndev, st_param[1][0] = MAC_INTERNALLOOP_SERDES; st_param[1][1] = 1; /*serdes must exist*/ st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/ - st_param[2][1] = ((!!(priv->ae_handle->phy_node)) && + st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) && (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)); if (eth_test->flags == ETH_TEST_FL_OFFLINE) { @@ -758,6 +759,16 @@ static int hns_get_coalesce(struct net_device *net_dev, &ec->tx_max_coalesced_frames, &ec->rx_max_coalesced_frames); + ops->get_coalesce_range(priv->ae_handle, + &ec->tx_max_coalesced_frames_low, + &ec->rx_max_coalesced_frames_low, + &ec->tx_max_coalesced_frames_high, + &ec->rx_max_coalesced_frames_high, + &ec->tx_coalesce_usecs_low, + &ec->rx_coalesce_usecs_low, + &ec->tx_coalesce_usecs_high, + &ec->rx_coalesce_usecs_high); + return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 765ddb3dc..33f4c483a 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -7,6 +7,7 @@ * (at your option) any later version. */ +#include <linux/acpi.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/init.h> @@ -36,9 +37,19 @@ #define MDIO_TIMEOUT 1000000 +struct hns_mdio_sc_reg { + u16 mdio_clk_en; + u16 mdio_clk_dis; + u16 mdio_reset_req; + u16 mdio_reset_dreq; + u16 mdio_clk_st; + u16 mdio_reset_st; +}; + struct hns_mdio_device { void *vbase; /* mdio reg base address */ struct regmap *subctrl_vbase; + struct hns_mdio_sc_reg sc_reg; }; /* mdio reg */ @@ -92,7 +103,6 @@ enum mdio_c45_op_seq { #define MDIO_SC_CLK_DIS 0x33C #define MDIO_SC_RESET_REQ 0xA38 #define MDIO_SC_RESET_DREQ 0xA3C -#define MDIO_SC_CTRL 0x2010 #define MDIO_SC_CLK_ST 0x531C #define MDIO_SC_RESET_ST 0x5A1C @@ -352,69 +362,68 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) static int hns_mdio_reset(struct mii_bus *bus) { struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv; + const struct hns_mdio_sc_reg *sc_reg; int ret; - if (!mdio_dev->subctrl_vbase) { - dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); - return -ENODEV; - } - - /*1. reset req, and read reset st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1, - MDIO_SC_RESET_ST, 0x1, - MDIO_CHECK_SET_ST); - if (ret) { - dev_err(&bus->dev, "MDIO reset fail\n"); - return ret; - } + if (dev_of_node(bus->parent)) { + if (!mdio_dev->subctrl_vbase) { + dev_err(&bus->dev, "mdio sys ctl reg has not maped\n"); + return -ENODEV; + } - /*2. dis clk, and read clk st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS, - 0x1, MDIO_SC_CLK_ST, 0x1, - MDIO_CHECK_CLR_ST); - if (ret) { - dev_err(&bus->dev, "MDIO dis clk fail\n"); - return ret; - } + sc_reg = &mdio_dev->sc_reg; + /* 1. reset req, and read reset st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req, + 0x1, sc_reg->mdio_reset_st, 0x1, + MDIO_CHECK_SET_ST); + if (ret) { + dev_err(&bus->dev, "MDIO reset fail\n"); + return ret; + } - /*3. reset dreq, and read reset st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1, - MDIO_SC_RESET_ST, 0x1, - MDIO_CHECK_CLR_ST); - if (ret) { - dev_err(&bus->dev, "MDIO dis clk fail\n"); - return ret; - } + /* 2. dis clk, and read clk st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis, + 0x1, sc_reg->mdio_clk_st, 0x1, + MDIO_CHECK_CLR_ST); + if (ret) { + dev_err(&bus->dev, "MDIO dis clk fail\n"); + return ret; + } - /*4. en clk, and read clk st check*/ - ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN, - 0x1, MDIO_SC_CLK_ST, 0x1, - MDIO_CHECK_SET_ST); - if (ret) - dev_err(&bus->dev, "MDIO en clk fail\n"); + /* 3. reset dreq, and read reset st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq, + 0x1, sc_reg->mdio_reset_st, 0x1, + MDIO_CHECK_CLR_ST); + if (ret) { + dev_err(&bus->dev, "MDIO dis clk fail\n"); + return ret; + } + /* 4. en clk, and read clk st check */ + ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en, + 0x1, sc_reg->mdio_clk_st, 0x1, + MDIO_CHECK_SET_ST); + if (ret) + dev_err(&bus->dev, "MDIO en clk fail\n"); + } else if (is_acpi_node(bus->parent->fwnode)) { + acpi_status s; + + s = acpi_evaluate_object(ACPI_HANDLE(bus->parent), + "_RST", NULL, NULL); + if (ACPI_FAILURE(s)) { + dev_err(&bus->dev, "Reset failed, return:%#x\n", s); + ret = -EBUSY; + } else { + ret = 0; + } + } else { + dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n"); + ret = -ENXIO; + } return ret; } /** - * hns_mdio_bus_name - get mdio bus name - * @name: mdio bus name - * @np: mdio device node pointer - */ -static void hns_mdio_bus_name(char *name, struct device_node *np) -{ - const u32 *addr; - u64 taddr = OF_BAD_ADDR; - - addr = of_get_address(np, 0, NULL, NULL); - if (addr) - taddr = of_translate_address(np, addr); - - snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)taddr); -} - -/** * hns_mdio_probe - probe mdio device * @pdev: mdio platform device * @@ -422,17 +431,16 @@ static void hns_mdio_bus_name(char *name, struct device_node *np) */ static int hns_mdio_probe(struct platform_device *pdev) { - struct device_node *np; struct hns_mdio_device *mdio_dev; struct mii_bus *new_bus; struct resource *res; - int ret; + int ret = -ENODEV; if (!pdev) { dev_err(NULL, "pdev is NULL!\r\n"); return -ENODEV; } - np = pdev->dev.of_node; + mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL); if (!mdio_dev) return -ENOMEM; @@ -448,7 +456,7 @@ static int hns_mdio_probe(struct platform_device *pdev) new_bus->write = hns_mdio_write; new_bus->reset = hns_mdio_reset; new_bus->priv = mdio_dev; - hns_mdio_bus_name(new_bus->id, np); + new_bus->parent = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res); @@ -457,16 +465,73 @@ static int hns_mdio_probe(struct platform_device *pdev) return ret; } - mdio_dev->subctrl_vbase = - syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0)); - if (IS_ERR(mdio_dev->subctrl_vbase)) { - dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); - mdio_dev->subctrl_vbase = NULL; - } - new_bus->parent = &pdev->dev; platform_set_drvdata(pdev, new_bus); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii", + dev_name(&pdev->dev)); + if (dev_of_node(&pdev->dev)) { + struct of_phandle_args reg_args; + + ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node, + "subctrl-vbase", + 4, + 0, + ®_args); + if (!ret) { + mdio_dev->subctrl_vbase = + syscon_node_to_regmap(reg_args.np); + if (IS_ERR(mdio_dev->subctrl_vbase)) { + dev_warn(&pdev->dev, "syscon_node_to_regmap error\n"); + mdio_dev->subctrl_vbase = NULL; + } else { + if (reg_args.args_count == 4) { + mdio_dev->sc_reg.mdio_clk_en = + (u16)reg_args.args[0]; + mdio_dev->sc_reg.mdio_clk_dis = + (u16)reg_args.args[0] + 4; + mdio_dev->sc_reg.mdio_reset_req = + (u16)reg_args.args[1]; + mdio_dev->sc_reg.mdio_reset_dreq = + (u16)reg_args.args[1] + 4; + mdio_dev->sc_reg.mdio_clk_st = + (u16)reg_args.args[2]; + mdio_dev->sc_reg.mdio_reset_st = + (u16)reg_args.args[3]; + } else { + /* for compatible */ + mdio_dev->sc_reg.mdio_clk_en = + MDIO_SC_CLK_EN; + mdio_dev->sc_reg.mdio_clk_dis = + MDIO_SC_CLK_DIS; + mdio_dev->sc_reg.mdio_reset_req = + MDIO_SC_RESET_REQ; + mdio_dev->sc_reg.mdio_reset_dreq = + MDIO_SC_RESET_DREQ; + mdio_dev->sc_reg.mdio_clk_st = + MDIO_SC_CLK_ST; + mdio_dev->sc_reg.mdio_reset_st = + MDIO_SC_RESET_ST; + } + } + } else { + dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret); + mdio_dev->subctrl_vbase = NULL; + } + + ret = of_mdiobus_register(new_bus, pdev->dev.of_node); + } else if (is_acpi_node(pdev->dev.fwnode)) { + /* Clear all the IRQ properties */ + memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR); + + /* Mask out all PHYs from auto probing. */ + new_bus->phy_mask = ~0; + + /* Register the MDIO bus */ + ret = mdiobus_register(new_bus); + } else { + dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n"); + ret = -ENXIO; + } - ret = of_mdiobus_register(new_bus, np); if (ret) { dev_err(&pdev->dev, "Cannot register as MDIO bus!\n"); platform_set_drvdata(pdev, NULL); @@ -499,12 +564,19 @@ static const struct of_device_id hns_mdio_match[] = { {} }; +static const struct acpi_device_id hns_mdio_acpi_match[] = { + { "HISI0141", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match); + static struct platform_driver hns_mdio_driver = { .probe = hns_mdio_probe, .remove = hns_mdio_remove, .driver = { .name = MDIO_DRV_NAME, .of_match_table = hns_mdio_match, + .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match), }, }; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 4c9771d57..7af09cbc5 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev) dev->mcast_pending = 1; return; } + + mutex_lock(&dev->link_lock); __emac_set_multicast_list(dev); + mutex_unlock(&dev->link_lock); +} + +static int emac_set_mac_address(struct net_device *ndev, void *sa) +{ + struct emac_instance *dev = netdev_priv(ndev); + struct sockaddr *addr = sa; + struct emac_regs __iomem *p = dev->emacp; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mutex_lock(&dev->link_lock); + + memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); + + emac_rx_disable(dev); + emac_tx_disable(dev); + out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]); + out_be32(&p->ialr, (ndev->dev_addr[2] << 24) | + (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) | + ndev->dev_addr[5]); + emac_tx_enable(dev); + emac_rx_enable(dev); + + mutex_unlock(&dev->link_lock); + + return 0; } static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu) @@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_do_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit, .ndo_change_mtu = eth_change_mtu, }; @@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = { .ndo_do_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = emac_set_mac_address, .ndo_start_xmit = emac_start_xmit_sg, .ndo_change_mtu = emac_change_mtu, }; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 714bd1014..c0e17433f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -167,17 +167,6 @@ config IXGBE To compile this driver as a module, choose M here. The module will be called ixgbe. -config IXGBE_VXLAN - bool "Virtual eXtensible Local Area Network Support" - default n - depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m) - ---help--- - This allows one to create VXLAN virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. VXLAN is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use Virtual eXtensible Local Area Network - (VXLAN) in the driver. - config IXGBE_HWMON bool "Intel(R) 10GbE PCI Express adapters HWMON support" default y @@ -236,27 +225,6 @@ config I40E To compile this driver as a module, choose M here. The module will be called i40e. -config I40E_VXLAN - bool "Virtual eXtensible Local Area Network Support" - default n - depends on I40E && VXLAN && !(I40E=y && VXLAN=m) - ---help--- - This allows one to create VXLAN virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. VXLAN is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use Virtual eXtensible Local Area Network - (VXLAN) in the driver. - -config I40E_GENEVE - bool "Generic Network Virtualization Encapsulation (GENEVE) Support" - depends on I40E && GENEVE && !(I40E=y && GENEVE=m) - default n - ---help--- - This allows one to create GENEVE virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. GENEVE is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use GENEVE in the driver. - config I40E_DCB bool "Data Center Bridging (DCB) Support" default n @@ -307,15 +275,4 @@ config FM10K To compile this driver as a module, choose M here. The module will be called fm10k. MSI-X interrupt support is required -config FM10K_VXLAN - bool "Virtual eXtensible Local Area Network Support" - default n - depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m) - ---help--- - This allows one to create VXLAN virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. VXLAN is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to use Virtual eXtensible Local Area Network - (VXLAN) in the driver. - endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 7fd4d5459..6b03c8553 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = { | FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 | FLAG2_NO_DISABLE_RX - | FLAG2_DMA_BURST, + | FLAG2_DMA_BURST + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, @@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = { | FLAG_HAS_CTRLEXT_ON_LOAD, .flags2 = FLAG2_DISABLE_ASPM_L0S | FLAG2_DISABLE_ASPM_L1 - | FLAG2_NO_DISABLE_RX, + | FLAG2_NO_DISABLE_RX + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 32, .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index ef96cd11d..879cca47b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) #define FLAG2_DFLT_CRC_STRIPPING BIT(12) #define FLAG2_CHECK_RX_HWTSTAMP BIT(13) +#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 3e11322d8..f3aaca743 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = { | FLAG_HAS_JUMBO_FRAMES | FLAG_APME_IN_WUC, .flags2 = FLAG2_HAS_PHY_STATS - | FLAG2_HAS_EEE, + | FLAG2_HAS_EEE + | FLAG2_CHECK_SYSTIM_OVERFLOW, .pba = 26, .max_hw_frame_size = 9022, .get_variants = e1000_get_variants_ich8lan, diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2b2e2f8c6..7017281ba 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) } /** + * e1000e_sanitize_systim - sanitize raw cycle counter reads + * @hw: pointer to the HW structure + * @systim: cycle_t value read, sanitized and returned + * + * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: + * check to see that the time is incrementing at a reasonable + * rate and is a multiple of incvalue. + **/ +static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim) +{ + u64 time_delta, rem, temp; + cycle_t systim_next; + u32 incvalue; + int i; + + incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; + for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { + /* latch SYSTIMH on read of SYSTIML */ + systim_next = (cycle_t)er32(SYSTIML); + systim_next |= (cycle_t)er32(SYSTIMH) << 32; + + time_delta = systim_next - systim; + temp = time_delta; + /* VMWare users have seen incvalue of zero, don't div / 0 */ + rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0); + + systim = systim_next; + + if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0)) + break; + } + + return systim; +} + +/** * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) * @cc: cyclecounter structure **/ @@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) cc); struct e1000_hw *hw = &adapter->hw; u32 systimel, systimeh; - cycle_t systim, systim_next; + cycle_t systim; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before * we read SYSTIMH, the value of SYSTIMH has been incremented and we @@ -4335,32 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) systim = (cycle_t)systimel; systim |= (cycle_t)systimeh << 32; - if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { - u64 time_delta, rem, temp; - u32 incvalue; - int i; - - /* errata for 82574/82583 possible bad bits read from SYSTIMH/L - * check to see that the time is incrementing at a reasonable - * rate and is a multiple of incvalue - */ - incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; - for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { - /* latch SYSTIMH on read of SYSTIML */ - systim_next = (cycle_t)er32(SYSTIML); - systim_next |= (cycle_t)er32(SYSTIMH) << 32; - - time_delta = systim_next - systim; - temp = time_delta; - rem = do_div(temp, incvalue); - - systim = systim_next; + if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) + systim = e1000e_sanitize_systim(hw, systim); - if ((time_delta < E1000_82574_SYSTIM_EPSILON) && - (rem == 0)) - break; - } - } return systim; } @@ -7329,8 +7342,7 @@ err_flashmap: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -7397,8 +7409,7 @@ static void e1000_remove(struct pci_dev *pdev) if ((adapter->hw.flash_address) && (adapter->hw.mac.type < e1000_pch_spt)) iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index fcf106e54..c4cf08dcf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -362,6 +362,7 @@ enum fm10k_state_t { __FM10K_SERVICE_DISABLE, __FM10K_MBX_LOCK, __FM10K_LINK_DOWN, + __FM10K_UPDATING_STATS, }; static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) @@ -406,7 +407,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring) (&(((union fm10k_rx_desc *)((R)->desc))[i])) #define FM10K_MAX_TXD_PWR 14 -#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR) +#define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD) @@ -457,6 +458,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb); netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb, struct fm10k_ring *tx_ring); void fm10k_tx_timeout_reset(struct fm10k_intfc *interface); +u64 fm10k_get_tx_pending(struct fm10k_ring *ring); bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring); void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 5bbf19cfe..d6baaea8b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -519,8 +519,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) goto out; /* interface cannot receive traffic without logical ports */ - if (mac->dglort_map == FM10K_DGLORTMAP_NONE) + if (mac->dglort_map == FM10K_DGLORTMAP_NONE) { + if (hw->mac.ops.request_lport_map) + ret_val = hw->mac.ops.request_lport_map(hw); + goto out; + } /* if we passed all the tests above then the switch is ready and we no * longer need to check for link diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 9c0d87503..c04cbe9c9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -76,6 +76,8 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("mac_rules_used", hw.swapi.mac.used), FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), + FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending), + FM10K_STAT("tx_hang_count", tx_timeout_count), }; @@ -983,9 +985,10 @@ void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) /* generate a new table if we weren't given one */ for (j = 0; j < 4; j++) { if (indir) - n = indir[i + j]; + n = indir[4 * i + j]; else - n = ethtool_rxfh_indir_default(i + j, rss_i); + n = ethtool_rxfh_indir_default(4 * i + j, + rss_i); table[j] = n; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 0e166e9c9..e9767b636 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.19.3-k" +#define DRV_VERSION "0.21.2-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; @@ -56,7 +56,7 @@ static int __init fm10k_init_module(void) pr_info("%s\n", fm10k_copyright); /* create driver workqueue */ - fm10k_workqueue = create_workqueue("fm10k"); + fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0); fm10k_dbg_init(); @@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void) fm10k_dbg_exit(); /* destroy driver workqueue */ - flush_workqueue(fm10k_workqueue); destroy_workqueue(fm10k_workqueue); } module_exit(fm10k_exit_module); @@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer, #if (PAGE_SIZE < 8192) unsigned int truesize = FM10K_RX_BUFSZ; #else - unsigned int truesize = SKB_DATA_ALIGN(size); + unsigned int truesize = ALIGN(size, 512); #endif unsigned int pull_len; @@ -1129,11 +1128,13 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring) return ring->stats.packets; } -static u64 fm10k_get_tx_pending(struct fm10k_ring *ring) +u64 fm10k_get_tx_pending(struct fm10k_ring *ring) { - /* use SW head and tail until we have real hardware */ - u32 head = ring->next_to_clean; - u32 tail = ring->next_to_use; + struct fm10k_intfc *interface = ring->q_vector->interface; + struct fm10k_hw *hw = &interface->hw; + + u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx)); + u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx)); return ((head <= tail) ? tail : tail + ring->count) - head; } @@ -1857,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface) if (v_budget < 0) { kfree(interface->msix_entries); interface->msix_entries = NULL; - return -ENOMEM; + return v_budget; } /* record the number of queues available for q_vectors */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index b7dbc8a84..35c1dbad1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -41,6 +41,8 @@ struct fm10k_mbx_info; #define FM10K_MBX_ACK_INTERRUPT 0x00000010 #define FM10K_MBX_INTERRUPT_ENABLE 0x00000020 #define FM10K_MBX_INTERRUPT_DISABLE 0x00000040 +#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200 +#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400 #define FM10K_MBICR(_n) ((_n) + 0x18840) #define FM10K_GMBX 0x18842 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 2a08d3f5b..20a5bbe3f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -20,9 +20,7 @@ #include "fm10k.h" #include <linux/vmalloc.h> -#ifdef CONFIG_FM10K_VXLAN -#include <net/vxlan.h> -#endif /* CONFIG_FM10K_VXLAN */ +#include <net/udp_tunnel.h> /** * fm10k_setup_tx_resources - allocate Tx resources (Descriptors) @@ -434,8 +432,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) /** * fm10k_add_vxlan_port * @netdev: network interface device structure - * @sa_family: Address family of new port - * @port: port number used for VXLAN + * @ti: Tunnel endpoint information * * This function is called when a new VXLAN interface has added a new port * number to the range that is currently in use for VXLAN. The new port @@ -444,18 +441,21 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) * is always used as the VXLAN port number for offloads. **/ static void fm10k_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) { + struct udp_tunnel_info *ti) +{ struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_vxlan_port *vxlan_port; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; /* only the PF supports configuring tunnels */ if (interface->hw.mac.type != fm10k_mac_pf) return; /* existing ports are pulled out so our new entry is always last */ fm10k_vxlan_port_for_each(vxlan_port, interface) { - if ((vxlan_port->port == port) && - (vxlan_port->sa_family == sa_family)) { + if ((vxlan_port->port == ti->port) && + (vxlan_port->sa_family == ti->sa_family)) { list_del(&vxlan_port->list); goto insert_tail; } @@ -465,8 +465,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev, vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC); if (!vxlan_port) return; - vxlan_port->port = port; - vxlan_port->sa_family = sa_family; + vxlan_port->port = ti->port; + vxlan_port->sa_family = ti->sa_family; insert_tail: /* add new port value to list */ @@ -478,8 +478,7 @@ insert_tail: /** * fm10k_del_vxlan_port * @netdev: network interface device structure - * @sa_family: Address family of freed port - * @port: port number used for VXLAN + * @ti: Tunnel endpoint information * * This function is called when a new VXLAN interface has freed a port * number from the range that is currently in use for VXLAN. The freed @@ -487,17 +486,20 @@ insert_tail: * the port number for offloads. **/ static void fm10k_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) { + struct udp_tunnel_info *ti) +{ struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_vxlan_port *vxlan_port; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; if (interface->hw.mac.type != fm10k_mac_pf) return; /* find the port in the list and free it */ fm10k_vxlan_port_for_each(vxlan_port, interface) { - if ((vxlan_port->port == port) && - (vxlan_port->sa_family == sa_family)) { + if ((vxlan_port->port == ti->port) && + (vxlan_port->sa_family == ti->sa_family)) { list_del(&vxlan_port->list); kfree(vxlan_port); break; @@ -553,10 +555,8 @@ int fm10k_open(struct net_device *netdev) if (err) goto err_set_queues; -#ifdef CONFIG_FM10K_VXLAN /* update VXLAN port configuration */ - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); fm10k_up(interface); @@ -1375,8 +1375,8 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, .ndo_get_vf_config = fm10k_ndo_get_vf_config, - .ndo_add_vxlan_port = fm10k_add_vxlan_port, - .ndo_del_vxlan_port = fm10k_del_vxlan_port, + .ndo_udp_tunnel_add = fm10k_add_vxlan_port, + .ndo_udp_tunnel_del = fm10k_del_vxlan_port, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index e05aca9be..774a5654b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -123,11 +123,24 @@ static void fm10k_service_timer(unsigned long data) static void fm10k_detach_subtask(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; + u32 __iomem *hw_addr; + u32 value; /* do nothing if device is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) return; + /* check the real address space to see if we've recovered */ + hw_addr = READ_ONCE(interface->uc_addr); + value = readl(hw_addr); + if ((~value)) { + interface->hw.hw_addr = interface->uc_addr; + netif_device_attach(netdev); + interface->flags |= FM10K_FLAG_RESET_REQUESTED; + netdev_warn(netdev, "PCIe link restored, device now attached\n"); + return; + } + rtnl_lock(); if (netif_running(netdev)) @@ -136,11 +149,9 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) rtnl_unlock(); } -static void fm10k_reinit(struct fm10k_intfc *interface) +static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; - struct fm10k_hw *hw = &interface->hw; - int err; WARN_ON(in_interrupt()); @@ -165,6 +176,19 @@ static void fm10k_reinit(struct fm10k_intfc *interface) /* delay any future reset requests */ interface->last_reset = jiffies + (10 * HZ); + rtnl_unlock(); +} + +static int fm10k_handle_reset(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + struct fm10k_hw *hw = &interface->hw; + int err; + + rtnl_lock(); + + pci_set_master(interface->pdev); + /* reset and initialize the hardware so it is in a known state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@ -185,7 +209,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface) goto reinit_err; } - /* reassociate interrupts */ + /* re-associate interrupts */ err = fm10k_mbx_request_irq(interface); if (err) goto err_mbx_irq; @@ -219,7 +243,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface) clear_bit(__FM10K_RESETTING, &interface->state); - return; + return err; err_open: fm10k_mbx_free_irq(interface); err_mbx_irq: @@ -230,6 +254,20 @@ reinit_err: rtnl_unlock(); clear_bit(__FM10K_RESETTING, &interface->state); + + return err; +} + +static void fm10k_reinit(struct fm10k_intfc *interface) +{ + int err; + + fm10k_prepare_for_reset(interface); + + err = fm10k_handle_reset(interface); + if (err) + dev_err(&interface->pdev->dev, + "fm10k_handle_reset failed: %d\n", err); } static void fm10k_reset_subtask(struct fm10k_intfc *interface) @@ -372,12 +410,19 @@ void fm10k_update_stats(struct fm10k_intfc *interface) u64 bytes, pkts; int i; + /* ensure only one thread updates stats at a time */ + if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + return; + /* do not allow stats update via service task for next second */ interface->next_stats_update = jiffies + HZ; /* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) { - struct fm10k_ring *tx_ring = interface->tx_ring[i]; + struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]); + + if (!tx_ring) + continue; restart_queue += tx_ring->tx_stats.restart_queue; tx_busy += tx_ring->tx_stats.tx_busy; @@ -396,7 +441,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface) /* gather some stats to the interface struct that are per queue */ for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) { - struct fm10k_ring *rx_ring = interface->rx_ring[i]; + struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]); + + if (!rx_ring) + continue; bytes += rx_ring->stats.bytes; pkts += rx_ring->stats.packets; @@ -443,6 +491,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface) /* Fill out the OS statistics structure */ net_stats->rx_errors = rx_errors; net_stats->rx_dropped = interface->stats.nodesc_drop.count; + + clear_bit(__FM10K_UPDATING_STATS, &interface->state); } /** @@ -1566,6 +1616,9 @@ void fm10k_up(struct fm10k_intfc *interface) /* configure interrupts */ hw->mac.ops.update_int_moderator(hw); + /* enable statistics capture again */ + clear_bit(__FM10K_UPDATING_STATS, &interface->state); + /* clear down bit to indicate we are ready to go */ clear_bit(__FM10K_DOWN, &interface->state); @@ -1598,10 +1651,11 @@ void fm10k_down(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; - int err; + int err, i = 0, count = 0; /* signal that we are down to the interrupt handler and service task */ - set_bit(__FM10K_DOWN, &interface->state); + if (test_and_set_bit(__FM10K_DOWN, &interface->state)) + return; /* call carrier off first to avoid false dev_watchdog timeouts */ netif_carrier_off(netdev); @@ -1613,18 +1667,57 @@ void fm10k_down(struct fm10k_intfc *interface) /* reset Rx filters */ fm10k_reset_rx_state(interface); - /* allow 10ms for device to quiesce */ - usleep_range(10000, 20000); - /* disable polling routines */ fm10k_napi_disable_all(interface); /* capture stats one last time before stopping interface */ fm10k_update_stats(interface); + /* prevent updating statistics while we're down */ + while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state)) + usleep_range(1000, 2000); + + /* skip waiting for TX DMA if we lost PCIe link */ + if (FM10K_REMOVED(hw->hw_addr)) + goto skip_tx_dma_drain; + + /* In some rare circumstances it can take a while for Tx queues to + * quiesce and be fully disabled. Attempt to .stop_hw() first, and + * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop + * until the Tx queues have emptied, or until a number of retries. If + * we fail to clear within the retry loop, we will issue a warning + * indicating that Tx DMA is probably hung. Note this means we call + * .stop_hw() twice but this shouldn't cause any problems. + */ + err = hw->mac.ops.stop_hw(hw); + if (err != FM10K_ERR_REQUESTS_PENDING) + goto skip_tx_dma_drain; + +#define TX_DMA_DRAIN_RETRIES 25 + for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) { + usleep_range(10000, 20000); + + /* start checking at the last ring to have pending Tx */ + for (; i < interface->num_tx_queues; i++) + if (fm10k_get_tx_pending(interface->tx_ring[i])) + break; + + /* if all the queues are drained, we can break now */ + if (i == interface->num_tx_queues) + break; + } + + if (count >= TX_DMA_DRAIN_RETRIES) + dev_err(&interface->pdev->dev, + "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n", + count); +skip_tx_dma_drain: /* Disable DMA engine for Tx/Rx */ err = hw->mac.ops.stop_hw(hw); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) + dev_err(&interface->pdev->dev, + "due to pending requests hw was not shut down gracefully\n"); + else if (err) dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err); /* free any buffers still on the rings */ @@ -1750,6 +1843,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, /* Start off interface as being down */ set_bit(__FM10K_DOWN, &interface->state); + set_bit(__FM10K_UPDATING_STATS, &interface->state); return 0; } @@ -1869,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_dma; } - err = pci_request_selected_regions(pdev, - pci_select_bars(pdev, - IORESOURCE_MEM), - fm10k_driver_name); + err = pci_request_mem_regions(pdev, fm10k_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed: %d\n", err); @@ -1976,8 +2067,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_netdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -2025,14 +2115,55 @@ static void fm10k_remove(struct pci_dev *pdev) free_netdev(netdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); } +static void fm10k_prepare_suspend(struct fm10k_intfc *interface) +{ + /* the watchdog task reads from registers, which might appear like + * a surprise remove if the PCIe device is disabled while we're + * stopped. We stop the watchdog task until after we resume software + * activity. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + cancel_work_sync(&interface->service_task); + + fm10k_prepare_for_reset(interface); +} + +static int fm10k_handle_resume(struct fm10k_intfc *interface) +{ + struct fm10k_hw *hw = &interface->hw; + int err; + + /* reset statistics starting values */ + hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + + err = fm10k_handle_reset(interface); + if (err) + return err; + + /* assume host is not ready, to prevent race with watchdog in case we + * actually don't have connection to the switch + */ + interface->host_ready = false; + fm10k_watchdog_host_not_ready(interface); + + /* force link to stay down for a second to prevent link flutter */ + interface->link_down_event = jiffies + (HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* clear the service task disable bit to allow service task to start */ + clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); + fm10k_service_event_schedule(interface); + + return err; +} + #ifdef CONFIG_PM /** * fm10k_resume - Restore device to pre-sleep state @@ -2069,60 +2200,13 @@ static int fm10k_resume(struct pci_dev *pdev) /* refresh hw_addr in case it was dropped */ hw->hw_addr = interface->uc_addr; - /* reset hardware to known state */ - err = hw->mac.ops.init_hw(&interface->hw); - if (err) { - dev_err(&pdev->dev, "init_hw failed: %d\n", err); - return err; - } - - /* reset statistics starting values */ - hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - - rtnl_lock(); - - err = fm10k_init_queueing_scheme(interface); - if (err) - goto err_queueing_scheme; - - err = fm10k_mbx_request_irq(interface); - if (err) - goto err_mbx_irq; - - err = fm10k_hw_ready(interface); + err = fm10k_handle_resume(interface); if (err) - goto err_open; - - err = netif_running(netdev) ? fm10k_open(netdev) : 0; - if (err) - goto err_open; - - rtnl_unlock(); - - /* assume host is not ready, to prevent race with watchdog in case we - * actually don't have connection to the switch - */ - interface->host_ready = false; - fm10k_watchdog_host_not_ready(interface); - - /* clear the service task disable bit to allow service task to start */ - clear_bit(__FM10K_SERVICE_DISABLE, &interface->state); - fm10k_service_event_schedule(interface); - - /* restore SR-IOV interface */ - fm10k_iov_resume(pdev); + return err; netif_device_attach(netdev); return 0; -err_open: - fm10k_mbx_free_irq(interface); -err_mbx_irq: - fm10k_clear_queueing_scheme(interface); -err_queueing_scheme: - rtnl_unlock(); - - return err; } /** @@ -2142,27 +2226,7 @@ static int fm10k_suspend(struct pci_dev *pdev, netif_device_detach(netdev); - fm10k_iov_suspend(pdev); - - /* the watchdog tasks may read registers, which will appear like a - * surprise-remove event once the PCI device is disabled. This will - * cause us to close the netdevice, so we don't retain the open/closed - * state post-resume. Prevent this by disabling the service task while - * suspended, until we actually resume. - */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); - cancel_work_sync(&interface->service_task); - - rtnl_lock(); - - if (netif_running(netdev)) - fm10k_close(netdev); - - fm10k_mbx_free_irq(interface); - - fm10k_clear_queueing_scheme(interface); - - rtnl_unlock(); + fm10k_prepare_suspend(interface); err = pci_save_state(pdev); if (err) @@ -2195,17 +2259,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - rtnl_lock(); - - if (netif_running(netdev)) - fm10k_close(netdev); - - fm10k_mbx_free_irq(interface); - - /* free interrupts */ - fm10k_clear_queueing_scheme(interface); - - rtnl_unlock(); + fm10k_prepare_suspend(interface); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -2219,7 +2273,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, */ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); pci_ers_result_t result; if (pci_enable_device_mem(pdev)) { @@ -2237,12 +2290,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); - /* refresh hw_addr in case it was dropped */ - interface->hw.hw_addr = interface->uc_addr; - - interface->flags |= FM10K_FLAG_RESET_REQUESTED; - fm10k_service_event_schedule(interface); - result = PCI_ERS_RESULT_RECOVERED; } @@ -2262,50 +2309,54 @@ static void fm10k_io_resume(struct pci_dev *pdev) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); struct net_device *netdev = interface->netdev; - struct fm10k_hw *hw = &interface->hw; - int err = 0; - - /* reset hardware to known state */ - err = hw->mac.ops.init_hw(&interface->hw); - if (err) { - dev_err(&pdev->dev, "init_hw failed: %d\n", err); - return; - } - - /* reset statistics starting values */ - hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - - rtnl_lock(); + int err; - err = fm10k_init_queueing_scheme(interface); - if (err) { - dev_err(&interface->pdev->dev, - "init_queueing_scheme failed: %d\n", err); - goto unlock; - } + err = fm10k_handle_resume(interface); - /* reassociate interrupts */ - fm10k_mbx_request_irq(interface); + if (err) + dev_warn(&pdev->dev, + "fm10k_io_resume failed: %d\n", err); + else + netif_device_attach(netdev); +} - rtnl_lock(); - if (netif_running(netdev)) - err = fm10k_open(netdev); - rtnl_unlock(); +/** + * fm10k_io_reset_notify - called when PCI function is reset + * @pdev: Pointer to PCI device + * + * This callback is called when the PCI function is reset such as from + * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it + * means we should prepare for a function reset. If prepare is false, it means + * the function reset just occurred. + */ +static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare) +{ + struct fm10k_intfc *interface = pci_get_drvdata(pdev); + int err = 0; - /* final check of hardware state before registering the interface */ - err = err ? : fm10k_hw_ready(interface); + if (prepare) { + /* warn incase we have any active VF devices */ + if (pci_num_vf(pdev)) + dev_warn(&pdev->dev, + "PCIe FLR may cause issues for any active VF devices\n"); - if (!err) - netif_device_attach(netdev); + fm10k_prepare_suspend(interface); + } else { + err = fm10k_handle_resume(interface); + } -unlock: - rtnl_unlock(); + if (err) { + dev_warn(&pdev->dev, + "fm10k_io_reset_notify failed: %d\n", err); + netif_device_detach(interface->netdev); + } } static const struct pci_error_handlers fm10k_err_handler = { .error_detected = fm10k_io_error_detected, .slot_reset = fm10k_io_slot_reset, .resume = fm10k_io_resume, + .reset_notify = fm10k_io_reset_notify, }; static struct pci_driver fm10k_driver = { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index dc75507c9..682299dd0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -51,34 +51,37 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw) /* shut down all rings */ err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) { + hw->mac.reset_while_pending++; + goto force_reset; + } else if (err) { return err; + } /* Verify that DMA is no longer active */ reg = fm10k_read_reg(hw, FM10K_DMA_CTRL); if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE)) return FM10K_ERR_DMA_PENDING; - /* verify the switch is ready for reset */ - reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2); - if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY)) - goto out; - +force_reset: /* Inititate data path reset */ - reg |= FM10K_DMA_CTRL_DATAPATH_RESET; + reg = FM10K_DMA_CTRL_DATAPATH_RESET; fm10k_write_reg(hw, FM10K_DMA_CTRL, reg); /* Flush write and allow 100us for reset to complete */ fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); + /* Reset mailbox global interrupts */ + reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT; + fm10k_write_reg(hw, FM10K_GMBX, reg); + /* Verify we made it out of reset */ reg = fm10k_read_reg(hw, FM10K_IP); if (!(reg & FM10K_IP_NOTINRESET)) - err = FM10K_ERR_RESET_FAILED; + return FM10K_ERR_RESET_FAILED; -out: - return err; + return 0; } /** @@ -1619,25 +1622,15 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) **/ static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready) { - s32 ret_val = 0; u32 dma_ctrl2; /* verify the switch is ready for interaction */ dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2); if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY)) - goto out; + return 0; /* retrieve generic host state info */ - ret_val = fm10k_get_host_state_generic(hw, switch_ready); - if (ret_val) - goto out; - - /* interface cannot receive traffic without logical ports */ - if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) - ret_val = fm10k_request_lport_map_pf(hw); - -out: - return ret_val; + return fm10k_get_host_state_generic(hw, switch_ready); } /* This structure defines the attibutes to be parsed below */ @@ -1813,6 +1806,7 @@ static const struct fm10k_mac_ops mac_ops_pf = { .set_dma_mask = fm10k_set_dma_mask_pf, .get_fault = fm10k_get_fault_pf, .get_host_state = fm10k_get_host_state_pf, + .request_lport_map = fm10k_request_lport_map_pf, }; static const struct fm10k_iov_ops iov_ops_pf = { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index b8bc06183..f4e75c498 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -526,6 +526,7 @@ struct fm10k_mac_ops { s32 (*stop_hw)(struct fm10k_hw *); s32 (*get_bus_info)(struct fm10k_hw *); s32 (*get_host_state)(struct fm10k_hw *, bool *); + s32 (*request_lport_map)(struct fm10k_hw *); s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool); s32 (*read_mac_addr)(struct fm10k_hw *); s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *, @@ -562,6 +563,7 @@ struct fm10k_mac_info { bool tx_ready; u32 dglort_map; u8 itr_scale; + u64 reset_while_pending; }; struct fm10k_swapi_table_info { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 3b06685ea..337ba65a9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -34,7 +34,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) /* we need to disable the queues before taking further steps */ err = fm10k_stop_hw_generic(hw); - if (err) + if (err && err != FM10K_ERR_REQUESTS_PENDING) return err; /* If permanent address is set then we need to restore it */ @@ -67,7 +67,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen); } - return 0; + return err; } /** @@ -83,7 +83,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) /* shut down queues we own and reset DMA configuration */ err = fm10k_stop_hw_vf(hw); - if (err) + if (err == FM10K_ERR_REQUESTS_PENDING) + hw->mac.reset_while_pending++; + else if (err) return err; /* Inititate VF reset */ @@ -96,9 +98,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) /* Clear reset bit and verify it was cleared */ fm10k_write_reg(hw, FM10K_VFCTRL, 0); if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) - err = FM10K_ERR_RESET_FAILED; + return FM10K_ERR_RESET_FAILED; - return err; + return 0; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 9c44739da..2a882916b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -283,6 +283,7 @@ struct i40e_pf { #endif /* I40E_FCOE */ u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ + u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */ u16 num_iwarp_msix; /* num of iwarp vectors for this PF */ int iwarp_base_vector; int queues_left; /* queues left unclaimed */ @@ -447,6 +448,14 @@ struct i40e_pf { u16 phy_led_val; }; +enum i40e_filter_state { + I40E_FILTER_INVALID = 0, /* Invalid state */ + I40E_FILTER_NEW, /* New, not sent to FW yet */ + I40E_FILTER_ACTIVE, /* Added to switch by FW */ + I40E_FILTER_FAILED, /* Rejected by FW */ + I40E_FILTER_REMOVE, /* To be removed */ +/* There is no 'removed' state; the filter struct is freed */ +}; struct i40e_mac_filter { struct list_head list; u8 macaddr[ETH_ALEN]; @@ -455,8 +464,7 @@ struct i40e_mac_filter { u8 counter; /* number of instances of this filter */ bool is_vf; /* filter belongs to a VF */ bool is_netdev; /* filter belongs to a netdev */ - bool changed; /* filter needs to be sync'd to the HW */ - bool is_laa; /* filter is a Locally Administered Address */ + enum i40e_filter_state state; }; struct i40e_veb { @@ -522,6 +530,9 @@ struct i40e_vsi { struct i40e_ring **rx_rings; struct i40e_ring **tx_rings; + u32 active_filters; + u32 promisc_threshold; + u16 work_limit; u16 int_rate_limit; /* value in usecs */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index 0e6ac8413..618f18436 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) { struct i40e_client_instance *cdev; + int ret = 0; if (!vsi) return; @@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi) "Cannot locate client instance open routine\n"); continue; } - cdev->client->ops->open(&cdev->lan_info, cdev->client); + if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state))) { + ret = cdev->client->ops->open(&cdev->lan_info, + cdev->client); + if (!ret) + set_bit(__I40E_CLIENT_INSTANCE_OPENED, + &cdev->state); + } } } mutex_unlock(&i40e_client_instance_mutex); @@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, * i40e_client_add_instance - add a client instance struct to the instance list * @pf: pointer to the board struct * @client: pointer to a client struct in the client list. + * @existing: if there was already an existing instance * - * Returns cdev ptr on success, NULL on failure + * Returns cdev ptr on success or if already exists, NULL on failure **/ static struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, - struct i40e_client *client) + struct i40e_client *client, + bool *existing) { struct i40e_client_instance *cdev; struct netdev_hw_addr *mac = NULL; @@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf, mutex_lock(&i40e_client_instance_mutex); list_for_each_entry(cdev, &i40e_client_instances, list) { if ((cdev->lan_info.pf == pf) && (cdev->client == client)) { - cdev = NULL; + *existing = true; goto out; } } @@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf) { struct i40e_client_instance *cdev; struct i40e_client *client; + bool existing = false; int ret = 0; if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED)) @@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf) /* check if L2 VSI is up, if not we are not ready */ if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state)) continue; + } else { + dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n", + client->name); } /* Add the client instance to the instance list */ - cdev = i40e_client_add_instance(pf, client); + cdev = i40e_client_add_instance(pf, client, &existing); if (!cdev) continue; - /* Also up the ref_cnt of no. of instances of this client */ - atomic_inc(&client->ref_cnt); - dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", - client->name, pf->hw.pf_id, - pf->hw.bus.device, pf->hw.bus.func); + if (!existing) { + /* Also up the ref_cnt for no. of instances of this + * client. + */ + atomic_inc(&client->ref_cnt); + dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n", + client->name, pf->hw.pf_id, + pf->hw.bus.device, pf->hw.bus.func); + } /* Send an Open request to the client */ atomic_inc(&cdev->ref_cnt); @@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf) pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func); /* Since in some cases register may have happened before a device gets - * added, we can schedule a subtask to go initiate the clients. + * added, we can schedule a subtask to go initiate the clients if + * they can be launched at probe time. */ pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED; i40e_service_event_schedule(pf); @@ -980,13 +999,13 @@ int i40e_unregister_client(struct i40e_client *client) * a close for each of the client instances that were opened. * client_release function is called to handle this. */ + mutex_lock(&i40e_client_mutex); if (!client || i40e_client_release(client)) { ret = -EIO; goto out; } /* TODO: check if device is in reset, or if that matters? */ - mutex_lock(&i40e_client_mutex); if (!i40e_client_is_registered(client)) { pr_info("i40e: Client %s has not been registered\n", client->name); @@ -1005,8 +1024,8 @@ int i40e_unregister_client(struct i40e_client *client) client->name); } - mutex_unlock(&i40e_client_mutex); out: + mutex_unlock(&i40e_client_mutex); return ret; } EXPORT_SYMBOL(i40e_unregister_client); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 422b41d61..2154a34c1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -61,7 +61,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: - case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; default: @@ -297,13 +296,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len) { struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; - u16 len = le16_to_cpu(aq_desc->datalen); + u16 len; u8 *buf = (u8 *)buffer; u16 i = 0; if ((!(mask & hw->debug_mask)) || (desc == NULL)) return; + len = le16_to_cpu(aq_desc->datalen); + i40e_debug(hw, mask, "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", le16_to_cpu(aq_desc->opcode), @@ -1967,6 +1968,62 @@ aq_add_vsi_exit: } /** + * i40e_aq_set_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_clear_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = cpu_to_le16(0); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index e6af8c8d7..05cf9a719 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -116,6 +116,14 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, return len; } +static char *i40e_filter_state_string[] = { + "INVALID", + "NEW", + "ACTIVE", + "FAILED", + "REMOVE", +}; + /** * i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum * @pf: the i40e_pf created in command write @@ -160,10 +168,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) pf->hw.mac.port_addr); list_for_each_entry(f, &vsi->mac_filter_list, list) { dev_info(&pf->pdev->dev, - " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n", + " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n", f->macaddr, f->vlan, f->is_netdev, f->is_vf, - f->counter); + f->counter, i40e_filter_state_string[f->state]); } + dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n", + vsi->active_filters, vsi->promisc_threshold, + (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ? + "ON" : "OFF")); nstat = i40e_get_vsi_stats_struct(vsi); dev_info(&pf->pdev->dev, " net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index d701861c6..dd4457d29 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -45,7 +45,6 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5e8d84ff7..c912e041d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -272,15 +272,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, u32 *advertising) { enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types; - + struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; *supported = 0x0; *advertising = 0x0; if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { *supported |= SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + *advertising |= ADVERTISED_1000baseT_Full; if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { *supported |= SUPPORTED_100baseT_Full; *advertising |= ADVERTISED_100baseT_Full; @@ -299,8 +300,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { *supported |= SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseT_Full; } if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || phy_types & I40E_CAP_PHY_TYPE_XLPPI || @@ -310,15 +312,16 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { *supported |= SUPPORTED_Autoneg | SUPPORTED_40000baseCR4_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_40000baseCR4_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) + *advertising |= ADVERTISED_40000baseCR4_Full; } - if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && - !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { *supported |= SUPPORTED_Autoneg | SUPPORTED_100baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_100baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + *advertising |= ADVERTISED_100baseT_Full; } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || @@ -326,8 +329,9 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { *supported |= SUPPORTED_Autoneg | SUPPORTED_1000baseT_Full; - *advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + *advertising |= ADVERTISED_1000baseT_Full; } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) *supported |= SUPPORTED_40000baseSR4_Full; @@ -342,26 +346,30 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { *supported |= SUPPORTED_20000baseKR2_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_20000baseKR2_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) + *advertising |= ADVERTISED_20000baseKR2_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { *supported |= SUPPORTED_10000baseKR_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_10000baseKR_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseKR_Full; } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { *supported |= SUPPORTED_10000baseKX4_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_10000baseKX4_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseKX4_Full; } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { *supported |= SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg; - *advertising |= ADVERTISED_1000baseKX_Full | - ADVERTISED_Autoneg; + *advertising |= ADVERTISED_Autoneg; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + *advertising |= ADVERTISED_1000baseKX_Full; } } @@ -453,6 +461,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_10GBASE_AOC: ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = SUPPORTED_10000baseT_Full; break; case I40E_PHY_TYPE_SGMII: ecmd->supported = SUPPORTED_Autoneg | @@ -663,6 +672,7 @@ static int i40e_set_settings(struct net_device *netdev, if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && + hw->phy.media_type != I40E_MEDIA_TYPE_DA && hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 501f15d9f..d0b3a1bb8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -31,12 +31,7 @@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" -#if IS_ENABLED(CONFIG_VXLAN) -#include <net/vxlan.h> -#endif -#if IS_ENABLED(CONFIG_GENEVE) -#include <net/geneve.h> -#endif +#include <net/udp_tunnel.h> const char i40e_driver_name[] = "i40e"; static const char i40e_driver_string[] = @@ -45,8 +40,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 16 +#define DRV_VERSION_MINOR 6 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -91,7 +86,6 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, - {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ @@ -1280,8 +1274,9 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, (is_vf == f->is_vf) && (is_netdev == f->is_netdev)) { f->counter--; - f->changed = true; changed = 1; + if (f->counter == 0) + f->state = I40E_FILTER_REMOVE; } } if (changed) { @@ -1297,29 +1292,32 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr, * @vsi: the PF Main VSI - inappropriate for any other VSI * @macaddr: the MAC address * - * Some older firmware configurations set up a default promiscuous VLAN - * filter that needs to be removed. + * Remove whatever filter the firmware set up so the driver can manage + * its own filtering intelligently. **/ -static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) +static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr) { struct i40e_aqc_remove_macvlan_element_data element; struct i40e_pf *pf = vsi->back; - i40e_status ret; /* Only appropriate for the PF main VSI */ if (vsi->type != I40E_VSI_MAIN) - return -EINVAL; + return; memset(&element, 0, sizeof(element)); ether_addr_copy(element.mac_addr, macaddr); element.vlan_tag = 0; - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - if (ret) - return -ENOENT; + /* Ignore error returns, some firmware does it this way... */ + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - return 0; + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.vlan_tag = 0; + /* ...and some firmware does it this way. */ + element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH | + I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); } /** @@ -1340,6 +1338,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, bool is_vf, bool is_netdev) { struct i40e_mac_filter *f; + int changed = false; if (!vsi || !macaddr) return NULL; @@ -1359,8 +1358,15 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ether_addr_copy(f->macaddr, macaddr); f->vlan = vlan; - f->changed = true; - + /* If we're in overflow promisc mode, set the state directly + * to failed, so we don't bother to try sending the filter + * to the hardware. + */ + if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)) + f->state = I40E_FILTER_FAILED; + else + f->state = I40E_FILTER_NEW; + changed = true; INIT_LIST_HEAD(&f->list); list_add_tail(&f->list, &vsi->mac_filter_list); } @@ -1380,10 +1386,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, f->counter++; } - /* changed tells sync_filters_subtask to - * push the filter down to the firmware - */ - if (f->changed) { + if (changed) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } @@ -1402,6 +1405,9 @@ add_filter_out: * * NOTE: This function is expected to be called with mac_filter_list_lock * being held. + * ANOTHER NOTE: This function MUST be called from within the context of + * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe() + * instead of list_for_each_entry(). **/ void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, @@ -1441,9 +1447,18 @@ void i40e_del_filter(struct i40e_vsi *vsi, * remove the filter from the firmware's list */ if (f->counter == 0) { - f->changed = true; - vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; - vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + if ((f->state == I40E_FILTER_FAILED) || + (f->state == I40E_FILTER_NEW)) { + /* this one never got added by the FW. Just remove it, + * no need to sync anything. + */ + list_del(&f->list); + kfree(f); + } else { + f->state = I40E_FILTER_REMOVE; + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + vsi->back->flags |= I40E_FLAG_FILTER_SYNC; + } } } @@ -1465,7 +1480,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p) struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct sockaddr *addr = p; - struct i40e_mac_filter *f; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; @@ -1486,52 +1500,23 @@ static int i40e_set_mac(struct net_device *netdev, void *p) else netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true); + i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); + ether_addr_copy(netdev->dev_addr, addr->sa_data); if (vsi->type == I40E_VSI_MAIN) { i40e_status ret; ret = i40e_aq_mac_address_write(&vsi->back->hw, I40E_AQC_WRITE_TYPE_LAA_WOL, addr->sa_data, NULL); - if (ret) { - netdev_info(netdev, - "Addr change for Main VSI failed: %d\n", - ret); - return -EADDRNOTAVAIL; - } - } - - if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) { - struct i40e_aqc_remove_macvlan_element_data element; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, netdev->dev_addr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - } else { - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, - false, false); - spin_unlock_bh(&vsi->mac_filter_list_lock); - } - - if (ether_addr_equal(addr->sa_data, hw->mac.addr)) { - struct i40e_aqc_add_macvlan_element_data element; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, hw->mac.addr); - element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); - i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); - } else { - spin_lock_bh(&vsi->mac_filter_list_lock); - f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, - false, false); - if (f) - f->is_laa = true; - spin_unlock_bh(&vsi->mac_filter_list_lock); + if (ret) + netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } - ether_addr_copy(netdev->dev_addr, addr->sa_data); - /* schedule our worker thread which will take care of * applying the new filter changes */ @@ -1591,14 +1576,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - /* In MFP case we can have a much lower count of MSIx - * vectors available and so we need to lower the used - * q count. - */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) - qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); - else - qcount = vsi->alloc_queue_pairs; + qcount = vsi->alloc_queue_pairs; + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf)); @@ -1768,28 +1747,6 @@ bottom_of_search_loop: } /** - * i40e_mac_filter_entry_clone - Clones a MAC filter entry - * @src: source MAC filter entry to be clones - * - * Returns the pointer to newly cloned MAC filter entry or NULL - * in case of error - **/ -static struct i40e_mac_filter *i40e_mac_filter_entry_clone( - struct i40e_mac_filter *src) -{ - struct i40e_mac_filter *f; - - f = kzalloc(sizeof(*f), GFP_ATOMIC); - if (!f) - return NULL; - *f = *src; - - INIT_LIST_HEAD(&f->list); - - return f; -} - -/** * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries * @vsi: pointer to vsi struct * @from: Pointer to list which contains MAC filter entries - changes to @@ -1803,41 +1760,61 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi, struct i40e_mac_filter *f, *ftmp; list_for_each_entry_safe(f, ftmp, from, list) { - f->changed = true; /* Move the element back into MAC filter list*/ list_move_tail(&f->list, &vsi->mac_filter_list); } } /** - * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries - * @vsi: pointer to vsi struct + * i40e_update_filter_state - Update filter state based on return data + * from firmware + * @count: Number of filters added + * @add_list: return data from fw + * @head: pointer to first filter in current batch + * @aq_err: status from fw * - * MAC filter entries from list were slated to be added from device. + * MAC filter entries from list were slated to be added to device. Returns + * number of successful filters. Note that 0 does NOT mean success! **/ -static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi) +static int +i40e_update_filter_state(int count, + struct i40e_aqc_add_macvlan_element_data *add_list, + struct i40e_mac_filter *add_head, int aq_err) { - struct i40e_mac_filter *f, *ftmp; - - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed && f->counter) - f->changed = true; - } -} + int retval = 0; + int i; -/** - * i40e_cleanup_add_list - Deletes the element from add list and release - * memory - * @add_list: Pointer to list which contains MAC filter entries - **/ -static void i40e_cleanup_add_list(struct list_head *add_list) -{ - struct i40e_mac_filter *f, *ftmp; - list_for_each_entry_safe(f, ftmp, add_list, list) { - list_del(&f->list); - kfree(f); + if (!aq_err) { + retval = count; + /* Everything's good, mark all filters active. */ + for (i = 0; i < count ; i++) { + add_head->state = I40E_FILTER_ACTIVE; + add_head = list_next_entry(add_head, list); + } + } else if (aq_err == I40E_AQ_RC_ENOSPC) { + /* Device ran out of filter space. Check the return value + * for each filter to see which ones are active. + */ + for (i = 0; i < count ; i++) { + if (add_list[i].match_method == + I40E_AQC_MM_ERR_NO_RES) { + add_head->state = I40E_FILTER_FAILED; + } else { + add_head->state = I40E_FILTER_ACTIVE; + retval++; + } + add_head = list_next_entry(add_head, list); + } + } else { + /* Some other horrible thing happened, fail all filters */ + retval = 0; + for (i = 0; i < count ; i++) { + add_head->state = I40E_FILTER_FAILED; + add_head = list_next_entry(add_head, list); + } } + return retval; } /** @@ -1850,20 +1827,22 @@ static void i40e_cleanup_add_list(struct list_head *add_list) **/ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) { - struct list_head tmp_del_list, tmp_add_list; - struct i40e_mac_filter *f, *ftmp, *fclone; - bool promisc_forced_on = false; - bool add_happened = false; + struct i40e_mac_filter *f, *ftmp, *add_head = NULL; + struct list_head tmp_add_list, tmp_del_list; + struct i40e_hw *hw = &vsi->back->hw; + bool promisc_changed = false; + char vsi_name[16] = "PF"; int filter_list_len = 0; u32 changed_flags = 0; i40e_status aq_ret = 0; - bool err_cond = false; int retval = 0; struct i40e_pf *pf; int num_add = 0; int num_del = 0; int aq_err = 0; u16 cmd_flags; + int list_size; + int fcnt; /* empty array typed pointers, kcalloc later */ struct i40e_aqc_add_macvlan_element_data *add_list; @@ -1878,72 +1857,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) vsi->current_netdev_flags = vsi->netdev->flags; } - INIT_LIST_HEAD(&tmp_del_list); INIT_LIST_HEAD(&tmp_add_list); + INIT_LIST_HEAD(&tmp_del_list); + + if (vsi->type == I40E_VSI_SRIOV) + snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); + else if (vsi->type != I40E_VSI_MAIN) + snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; spin_lock_bh(&vsi->mac_filter_list_lock); + /* Create a list of filters to delete. */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; - - if (f->counter != 0) - continue; - f->changed = false; - - /* Move the element into temporary del_list */ - list_move_tail(&f->list, &tmp_del_list); - } - - list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - if (!f->changed) - continue; - - if (f->counter == 0) - continue; - f->changed = false; - - /* Clone MAC filter entry and add into temporary list */ - fclone = i40e_mac_filter_entry_clone(f); - if (!fclone) { - err_cond = true; - break; + if (f->state == I40E_FILTER_REMOVE) { + WARN_ON(f->counter != 0); + /* Move the element into temporary del_list */ + list_move_tail(&f->list, &tmp_del_list); + vsi->active_filters--; + } + if (f->state == I40E_FILTER_NEW) { + WARN_ON(f->counter == 0); + /* Move the element into temporary add_list */ + list_move_tail(&f->list, &tmp_add_list); } - list_add_tail(&fclone->list, &tmp_add_list); - } - - /* if failed to clone MAC filter entry - undo */ - if (err_cond) { - i40e_undo_del_filter_entries(vsi, &tmp_del_list); - i40e_undo_add_filter_entries(vsi); } spin_unlock_bh(&vsi->mac_filter_list_lock); - - if (err_cond) { - i40e_cleanup_add_list(&tmp_add_list); - retval = -ENOMEM; - goto out; - } } /* Now process 'del_list' outside the lock */ if (!list_empty(&tmp_del_list)) { - int del_list_size; - - filter_list_len = pf->hw.aq.asq_buf_size / + filter_list_len = hw->aq.asq_buf_size / sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list_size = filter_list_len * + list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kzalloc(del_list_size, GFP_ATOMIC); + del_list = kzalloc(list_size, GFP_ATOMIC); if (!del_list) { - i40e_cleanup_add_list(&tmp_add_list); - /* Undo VSI's MAC filter entry element updates */ spin_lock_bh(&vsi->mac_filter_list_lock); i40e_undo_del_filter_entries(vsi, &tmp_del_list); - i40e_undo_add_filter_entries(vsi); spin_unlock_bh(&vsi->mac_filter_list_lock); retval = -ENOMEM; goto out; @@ -1954,9 +1907,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* add to delete list */ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); - del_list[num_del].vlan_tag = - cpu_to_le16((u16)(f->vlan == - I40E_VLAN_ANY ? 0 : f->vlan)); + if (f->vlan == I40E_VLAN_ANY) { + del_list[num_del].vlan_tag = 0; + cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + } else { + del_list[num_del].vlan_tag = + cpu_to_le16((u16)(f->vlan)); + } cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; del_list[num_del].flags = cmd_flags; @@ -1964,21 +1921,23 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) /* flush a full buffer */ if (num_del == filter_list_len) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, - vsi->seid, + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, - num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; - memset(del_list, 0, del_list_size); + memset(del_list, 0, list_size); - if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) { + /* Explicitly ignore and do not report when + * firmware returns ENOENT. + */ + if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { retval = -EIO; - dev_err(&pf->pdev->dev, - "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + dev_info(&pf->pdev->dev, + "ignoring delete macvlan error on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); } } /* Release memory for MAC filter entries which were @@ -1989,17 +1948,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if (num_del) { - aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, - del_list, num_del, - NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list, + num_del, NULL); + aq_err = hw->aq.asq_last_status; num_del = 0; - if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) + /* Explicitly ignore and do not report when firmware + * returns ENOENT. + */ + if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) { + retval = -EIO; dev_info(&pf->pdev->dev, - "ignoring delete macvlan error, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); + "ignoring delete macvlan error on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, aq_err)); + } } kfree(del_list); @@ -2007,84 +1971,117 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } if (!list_empty(&tmp_add_list)) { - int add_list_size; - - /* do all the adds now */ - filter_list_len = pf->hw.aq.asq_buf_size / - sizeof(struct i40e_aqc_add_macvlan_element_data), - add_list_size = filter_list_len * + /* Do all the adds now. */ + filter_list_len = hw->aq.asq_buf_size / + sizeof(struct i40e_aqc_add_macvlan_element_data); + list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); - add_list = kzalloc(add_list_size, GFP_ATOMIC); + add_list = kzalloc(list_size, GFP_ATOMIC); if (!add_list) { - /* Purge element from temporary lists */ - i40e_cleanup_add_list(&tmp_add_list); - - /* Undo add filter entries from VSI MAC filter list */ - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_undo_add_filter_entries(vsi); - spin_unlock_bh(&vsi->mac_filter_list_lock); retval = -ENOMEM; goto out; } - - list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { - - add_happened = true; - cmd_flags = 0; - + num_add = 0; + list_for_each_entry(f, &tmp_add_list, list) { + if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state)) { + f->state = I40E_FILTER_FAILED; + continue; + } /* add to add array */ + if (num_add == 0) + add_head = f; + cmd_flags = 0; ether_addr_copy(add_list[num_add].mac_addr, f->macaddr); - add_list[num_add].vlan_tag = - cpu_to_le16( - (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan)); + if (f->vlan == I40E_VLAN_ANY) { + add_list[num_add].vlan_tag = 0; + cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN; + } else { + add_list[num_add].vlan_tag = + cpu_to_le16((u16)(f->vlan)); + } add_list[num_add].queue_number = 0; - cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH; add_list[num_add].flags = cpu_to_le16(cmd_flags); num_add++; /* flush a full buffer */ if (num_add == filter_list_len) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; + aq_err = hw->aq.asq_last_status; + fcnt = i40e_update_filter_state(num_add, + add_list, + add_head, + aq_ret); + vsi->active_filters += fcnt; + + if (fcnt != num_add) { + promisc_changed = true; + set_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state); + vsi->promisc_threshold = + (vsi->active_filters * 3) / 4; + dev_warn(&pf->pdev->dev, + "Error %s adding RX filters on %s, promiscuous mode forced on\n", + i40e_aq_str(hw, aq_err), + vsi_name); + } + memset(add_list, 0, list_size); num_add = 0; - - if (aq_ret) - break; - memset(add_list, 0, add_list_size); } - /* Entries from tmp_add_list were cloned from MAC - * filter list, hence clean those cloned entries - */ - list_del(&f->list); - kfree(f); } - if (num_add) { - aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, + aq_ret = i40e_aq_add_macvlan(hw, vsi->seid, add_list, num_add, NULL); - aq_err = pf->hw.aq.asq_last_status; - num_add = 0; + aq_err = hw->aq.asq_last_status; + fcnt = i40e_update_filter_state(num_add, add_list, + add_head, aq_ret); + vsi->active_filters += fcnt; + if (fcnt != num_add) { + promisc_changed = true; + set_bit(__I40E_FILTER_OVERFLOW_PROMISC, + &vsi->state); + vsi->promisc_threshold = + (vsi->active_filters * 3) / 4; + dev_warn(&pf->pdev->dev, + "Error %s adding RX filters on %s, promiscuous mode forced on\n", + i40e_aq_str(hw, aq_err), vsi_name); + } } + /* Now move all of the filters from the temp add list back to + * the VSI's list. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) { + list_move_tail(&f->list, &vsi->mac_filter_list); + } + spin_unlock_bh(&vsi->mac_filter_list_lock); kfree(add_list); add_list = NULL; + } - if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) { - retval = i40e_aq_rc_to_posix(aq_ret, aq_err); + /* Check to see if we can drop out of overflow promiscuous mode. */ + if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) && + (vsi->active_filters < vsi->promisc_threshold)) { + int failed_count = 0; + /* See if we have any failed filters. We can't drop out of + * promiscuous until these have all been deleted. + */ + spin_lock_bh(&vsi->mac_filter_list_lock); + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->state == I40E_FILTER_FAILED) + failed_count++; + } + spin_unlock_bh(&vsi->mac_filter_list_lock); + if (!failed_count) { dev_info(&pf->pdev->dev, - "add filter failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, aq_err)); - if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && - !test_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state)) { - promisc_forced_on = true; - set_bit(__I40E_FILTER_OVERFLOW_PROMISC, - &vsi->state); - dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); - } + "filter logjam cleared on %s, leaving overflow promiscuous mode\n", + vsi_name); + clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + promisc_changed = true; + vsi->promisc_threshold = 0; } } @@ -2105,15 +2102,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multi promisc failed, err %s aq_err %s\n", - i40e_stat_str(&pf->hw, aq_ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + "set multi promisc failed on %s, err %s aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } } - if ((changed_flags & IFF_PROMISC) || promisc_forced_on) { + if ((changed_flags & IFF_PROMISC) || + (promisc_changed && + test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) { bool cur_promisc; cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || @@ -2129,35 +2128,72 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) */ if (pf->cur_promisc != cur_promisc) { pf->cur_promisc = cur_promisc; - set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + if (cur_promisc) + aq_ret = + i40e_aq_set_default_vsi(hw, + vsi->seid, + NULL); + else + aq_ret = + i40e_aq_clear_default_vsi(hw, + vsi->seid, + NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + hw->aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Set default VSI failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } } else { aq_ret = i40e_aq_set_vsi_unicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL, true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set unicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set unicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } aq_ret = i40e_aq_set_vsi_multicast_promiscuous( - &vsi->back->hw, + hw, vsi->seid, cur_promisc, NULL); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set multicast promisc failed, err %d, aq_err %d\n", - aq_ret, pf->hw.aq.asq_last_status); + "set multicast promisc failed on %s, err %s, aq_err %s\n", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); } } + aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, + vsi->seid, + cur_promisc, NULL); + if (aq_ret) { + retval = i40e_aq_rc_to_posix(aq_ret, + pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "set brdcast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + } } out: /* if something went wrong then set the changed flag so we try again */ @@ -2325,7 +2361,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features) **/ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) { - struct i40e_mac_filter *f, *add_f; + struct i40e_mac_filter *f, *ftmp, *add_f; bool is_netdev, is_vf; is_vf = (vsi->type == I40E_VSI_SRIOV); @@ -2346,7 +2382,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) } } - list_for_each_entry(f, &vsi->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev); if (!add_f) { dev_info(&vsi->back->pdev->dev, @@ -2360,7 +2396,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Now if we add a vlan tag, make sure to check if it is the first * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag" * with 0, so we now accept untagged and specified tagged traffic - * (and not any taged and untagged) + * (and not all tags along with untagged) */ if (vid > 0) { if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr, @@ -2382,7 +2418,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */ if (vid > 0 && !vsi->info.pvid) { - list_for_each_entry(f, &vsi->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev)) continue; @@ -2419,7 +2455,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) { struct net_device *netdev = vsi->netdev; - struct i40e_mac_filter *f, *add_f; + struct i40e_mac_filter *f, *ftmp, *add_f; bool is_vf, is_netdev; int filter_count = 0; @@ -2432,7 +2468,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) if (is_netdev) i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev); - list_for_each_entry(f, &vsi->mac_filter_list, list) + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev); /* go through all the filters for this VSI and if there is only @@ -2465,7 +2501,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) } if (!filter_count) { - list_for_each_entry(f, &vsi->mac_filter_list, list) { + list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev); add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY, is_vf, is_netdev); @@ -2510,8 +2546,6 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev, if (vid > 4095) return -EINVAL; - netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); - /* If the network stack called us with vid = 0 then * it is asking to receive priority tagged packets with * vlan id 0. Our HW receives them by default when configured @@ -2545,8 +2579,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; - netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid); - /* return code is ignored as there is nothing a user * can do about failure to remove and a log message was * already printed from the other function @@ -2559,6 +2591,44 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev, } /** + * i40e_macaddr_init - explicitly write the mac address filters + * + * @vsi: pointer to the vsi + * @macaddr: the MAC address + * + * This is needed when the macaddr has been obtained by other + * means than the default, e.g., from Open Firmware or IDPROM. + * Returns 0 on success, negative on failure + **/ +static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) +{ + int ret; + struct i40e_aqc_add_macvlan_element_data element; + + ret = i40e_aq_mac_address_write(&vsi->back->hw, + I40E_AQC_WRITE_TYPE_LAA_WOL, + macaddr, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Addr change for VSI failed: %d\n", ret); + return -EADDRNOTAVAIL; + } + + memset(&element, 0, sizeof(element)); + ether_addr_copy(element.mac_addr, macaddr); + element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); + ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "add filter failed err %s aq_err %s\n", + i40e_stat_str(&vsi->back->hw, ret), + i40e_aq_str(&vsi->back->hw, + vsi->back->hw.aq.asq_last_status)); + } + return ret; +} + +/** * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up * @vsi: the vsi being brought back up **/ @@ -3004,8 +3074,19 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) **/ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi) { + struct i40e_pf *pf = vsi->back; + int err; + if (vsi->netdev) i40e_set_rx_mode(vsi->netdev); + + if (!!(pf->flags & I40E_FLAG_PF_MAC)) { + err = i40e_macaddr_init(vsi, pf->hw.mac.addr); + if (err) { + dev_warn(&pf->pdev->dev, + "could not set up macaddr; err %d\n", err); + } + } } /** @@ -3947,6 +4028,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi) /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(pf->msix_entries[vector].vector, NULL); + synchronize_irq(pf->msix_entries[vector].vector); free_irq(pf->msix_entries[vector].vector, vsi->q_vectors[i]); @@ -4472,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) **/ static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg) { + int i, tc_unused = 0; u8 num_tc = 0; - int i; + u8 ret = 0; /* Scan the ETS Config Priority Table to find * traffic class enabled for a given priority - * and use the traffic class index to get the - * number of traffic classes enabled + * and create a bitmask of enabled TCs */ - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - if (dcbcfg->etscfg.prioritytable[i] > num_tc) - num_tc = dcbcfg->etscfg.prioritytable[i]; - } + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); - /* Traffic class index starts from zero so - * increment to return the actual count + /* Now scan the bitmask to check for + * contiguous TCs starting with TC0 */ - return num_tc + 1; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (num_tc & BIT(i)) { + if (!tc_unused) { + ret++; + } else { + pr_err("Non-contiguous TC - Disabling DCB\n"); + return 1; + } + } else { + tc_unused = 1; + } + } + + /* There is always at least TC0 */ + if (!ret) + ret = 1; + + return ret; } /** @@ -4953,7 +5050,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) if (pf->vsi[v]->netdev) i40e_dcbnl_set_all(pf->vsi[v]); } - i40e_notify_client_of_l2_param_changes(pf->vsi[v]); } } @@ -5017,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) DCB_CAP_DCBX_VER_IEEE; pf->flags |= I40E_FLAG_DCB_CAPABLE; - /* Enable DCB tagging only when more than one TC */ + /* Enable DCB tagging only when more than one TC + * or explicitly disable if only one TC + */ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) pf->flags |= I40E_FLAG_DCB_ENABLED; + else + pf->flags &= ~I40E_FLAG_DCB_ENABLED; dev_dbg(&pf->pdev->dev, "DCBX offload is supported for this PF.\n"); } @@ -5178,12 +5278,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi) usleep_range(1000, 2000); i40e_down(vsi); - /* Give a VF some time to respond to the reset. The - * two second wait is based upon the watchdog cycle in - * the VF driver. - */ - if (vsi->type == I40E_VSI_SRIOV) - msleep(2000); i40e_up(vsi); clear_bit(__I40E_CONFIG_BUSY, &pf->state); } @@ -5226,6 +5320,9 @@ void i40e_down(struct i40e_vsi *vsi) i40e_clean_tx_ring(vsi->tx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]); } + + i40e_notify_client_of_netdev_close(vsi, false); + } /** @@ -5337,15 +5434,7 @@ int i40e_open(struct net_device *netdev) TCP_FLAG_CWR) >> 16); wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); -#ifdef CONFIG_I40E_VXLAN - vxlan_get_rx_port(netdev); -#endif -#ifdef CONFIG_I40E_GENEVE - if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) - geneve_get_rx_port(netdev); -#endif - - i40e_notify_client_of_netdev_open(vsi); + udp_tunnel_get_rx_info(netdev); return 0; } @@ -5631,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, u8 type; /* Not DCB capable or capability disabled */ - if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return ret; /* Ignore if event is not for Nearest Bridge */ @@ -5711,6 +5800,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, i40e_service_event_schedule(pf); } else { i40e_pf_unquiesce_all_vsi(pf); + /* Notify the client for the DCB changes */ + i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]); } exit: @@ -5935,7 +6026,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) if (I40E_DEBUG_FD & pf->hw.debug_mask) dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); } - } /** @@ -7052,7 +7142,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) **/ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) { -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) struct i40e_hw *hw = &pf->hw; i40e_status ret; __be16 port; @@ -7087,7 +7176,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) } } } -#endif } /** @@ -7169,7 +7257,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) vsi->alloc_queue_pairs = 1; vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT, I40E_REQ_DESCRIPTOR_MULTIPLE); - vsi->num_q_vectors = 1; + vsi->num_q_vectors = pf->num_fdsb_msix; break; case I40E_VSI_VMDQ2: @@ -7553,9 +7641,11 @@ static int i40e_init_msix(struct i40e_pf *pf) /* reserve one vector for sideband flow director */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { if (vectors_left) { + pf->num_fdsb_msix = 1; v_budget++; vectors_left--; } else { + pf->num_fdsb_msix = 0; pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; } } @@ -7810,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) #endif I40E_FLAG_RSS_ENABLED | I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | @@ -7906,7 +7997,6 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, u8 *rss_lut; int ret, i; - memset(&rss_key, 0, sizeof(rss_key)); memcpy(&rss_key, seed, sizeof(rss_key)); rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL); @@ -8580,7 +8670,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; - pf->flags |= I40E_FLAG_FD_SB_ENABLED; + /* enable FD_SB only if there is MSI-X vector */ + if (pf->num_fdsb_msix > 0) + pf->flags |= I40E_FLAG_FD_SB_ENABLED; } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -8629,7 +8721,6 @@ static int i40e_set_features(struct net_device *netdev, return 0; } -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) /** * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port * @pf: board private structure @@ -8649,21 +8740,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) return i; } -#endif - -#if IS_ENABLED(CONFIG_VXLAN) /** - * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up + * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ -static void i40e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void i40e_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 next_idx; u8 idx; @@ -8671,7 +8759,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev, /* Check if port already exists */ if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "vxlan port %d already offloaded\n", + netdev_info(netdev, "port %d already offloaded\n", ntohs(port)); return; } @@ -8680,131 +8768,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev, next_idx = i40e_get_udp_port_idx(pf, 0); if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n", + netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n", ntohs(port)); return; } - /* New port: add it and mark its index in the bitmap */ - pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; - pf->pending_udp_bitmap |= BIT_ULL(next_idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -} - -/** - * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away - * @netdev: This physical port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to - **/ -static void i40e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 idx; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - } else { - netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", - ntohs(port)); - } -} -#endif - -#if IS_ENABLED(CONFIG_GENEVE) -/** - * i40e_add_geneve_port - Get notifications about GENEVE ports that come up - * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: New UDP port number that GENEVE started listening to - **/ -static void i40e_add_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) -{ - struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_vsi *vsi = np->vsi; - struct i40e_pf *pf = vsi->back; - u8 next_idx; - u8 idx; - - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - - idx = i40e_get_udp_port_idx(pf, port); - - /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "udp port %d already offloaded\n", - ntohs(port)); - return; - } - - /* Now check if there is space to add the new port */ - next_idx = i40e_get_udp_port_idx(pf, 0); - - if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; + break; + default: return; } /* New port: add it and mark its index in the bitmap */ pf->udp_ports[next_idx].index = port; - pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; - - dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); } /** - * i40e_del_geneve_port - Get notifications about GENEVE ports that go away + * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away * @netdev: This physical port's netdev - * @sa_family: Socket Family that GENEVE is notifying us about - * @port: UDP port number that GENEVE stopped listening to + * @ti: Tunnel endpoint information **/ -static void i40e_del_geneve_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void i40e_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + __be16 port = ti->port; u8 idx; - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ - if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) { - /* if port exists, set it to 0 (mark for deletion) - * and make it pending - */ - pf->udp_ports[idx].index = 0; - pf->pending_udp_bitmap |= BIT_ULL(idx); - pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS) + goto not_found; - dev_info(&pf->pdev->dev, "deleting geneve port %d\n", - ntohs(port)); - } else { - netdev_warn(netdev, "geneve port %d was not found, not deleting\n", - ntohs(port)); + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN) + goto not_found; + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE) + goto not_found; + break; + default: + goto not_found; } + + /* if port exists, set it to 0 (mark for deletion) + * and make it pending + */ + pf->udp_ports[idx].index = 0; + pf->pending_udp_bitmap |= BIT_ULL(idx); + pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; + + return; +not_found: + netdev_warn(netdev, "UDP port %d was not found, not deleting\n", + ntohs(port)); } -#endif static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) @@ -9034,14 +9066,8 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, .ndo_set_vf_trust = i40e_ndo_set_vf_trust, -#if IS_ENABLED(CONFIG_VXLAN) - .ndo_add_vxlan_port = i40e_add_vxlan_port, - .ndo_del_vxlan_port = i40e_del_vxlan_port, -#endif -#if IS_ENABLED(CONFIG_GENEVE) - .ndo_add_geneve_port = i40e_add_geneve_port, - .ndo_del_geneve_port = i40e_del_geneve_port, -#endif + .ndo_udp_tunnel_add = i40e_udp_tunnel_add, + .ndo_udp_tunnel_del = i40e_udp_tunnel_del, .ndo_get_phys_port_id = i40e_get_phys_port_id, .ndo_fdb_add = i40e_ndo_fdb_add, .ndo_features_check = i40e_features_check, @@ -9057,7 +9083,6 @@ static const struct net_device_ops i40e_netdev_ops = { **/ static int i40e_config_netdev(struct i40e_vsi *vsi) { - u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_netdev_priv *np; @@ -9121,18 +9146,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * default a MAC-VLAN filter that accepts any tagged packet * which must be replaced by a normal filter. */ - if (!i40e_rm_default_mac_filter(vsi, mac_addr)) { - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_add_filter(vsi, mac_addr, - I40E_VLAN_ANY, false, true); - spin_unlock_bh(&vsi->mac_filter_list_lock); - } - } else if ((pf->hw.aq.api_maj_ver > 1) || - ((pf->hw.aq.api_maj_ver == 1) && - (pf->hw.aq.api_min_ver > 4))) { - /* Supported in FW API version higher than 1.4 */ - pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + i40e_rm_default_mac_filter(vsi, mac_addr); + spin_lock_bh(&vsi->mac_filter_list_lock); + i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true); + spin_unlock_bh(&vsi->mac_filter_list_lock); } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -9144,10 +9161,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) spin_unlock_bh(&vsi->mac_filter_list_lock); } - spin_lock_bh(&vsi->mac_filter_list_lock); - i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false); - spin_unlock_bh(&vsi->mac_filter_list_lock); - ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); @@ -9226,8 +9239,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) { int ret = -ENODEV; i40e_status aq_ret = 0; - u8 laa_macaddr[ETH_ALEN]; - bool found_laa_mac_filter = false; struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; struct i40e_vsi_context ctxt; @@ -9428,41 +9439,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) } } + vsi->active_filters = 0; + clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); spin_lock_bh(&vsi->mac_filter_list_lock); /* If macvlan filters already exist, force them to get loaded */ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) { - f->changed = true; + f->state = I40E_FILTER_NEW; f_count++; - - /* Expected to have only one MAC filter entry for LAA in list */ - if (f->is_laa && vsi->type == I40E_VSI_MAIN) { - ether_addr_copy(laa_macaddr, f->macaddr); - found_laa_mac_filter = true; - } } spin_unlock_bh(&vsi->mac_filter_list_lock); - if (found_laa_mac_filter) { - struct i40e_aqc_remove_macvlan_element_data element; - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, laa_macaddr); - element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; - ret = i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - if (ret) { - /* some older FW has a different default */ - element.flags |= - I40E_AQC_MACVLAN_DEL_IGNORE_VLAN; - i40e_aq_remove_macvlan(hw, vsi->seid, - &element, 1, NULL); - } - - i40e_aq_mac_address_write(hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - laa_macaddr, NULL); - } - if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; pf->flags |= I40E_FLAG_FILTER_SYNC; @@ -9673,6 +9659,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + if (vsi->type == I40E_VSI_MAIN) + i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); /* assign it some queues */ ret = i40e_alloc_rings(vsi); @@ -9698,44 +9686,6 @@ err_vsi: } /** - * i40e_macaddr_init - explicitly write the mac address filters. - * - * @vsi: pointer to the vsi. - * @macaddr: the MAC address - * - * This is needed when the macaddr has been obtained by other - * means than the default, e.g., from Open Firmware or IDPROM. - * Returns 0 on success, negative on failure - **/ -static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr) -{ - int ret; - struct i40e_aqc_add_macvlan_element_data element; - - ret = i40e_aq_mac_address_write(&vsi->back->hw, - I40E_AQC_WRITE_TYPE_LAA_WOL, - macaddr, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "Addr change for VSI failed: %d\n", ret); - return -EADDRNOTAVAIL; - } - - memset(&element, 0, sizeof(element)); - ether_addr_copy(element.mac_addr, macaddr); - element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH); - ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL); - if (ret) { - dev_info(&vsi->back->pdev->dev, - "add filter failed err %s aq_err %s\n", - i40e_stat_str(&vsi->back->hw, ret), - i40e_aq_str(&vsi->back->hw, - vsi->back->hw.aq.asq_last_status)); - } - return ret; -} - -/** * i40e_vsi_setup - Set up a VSI by a given type * @pf: board private structure * @type: VSI type @@ -10147,14 +10097,14 @@ void i40e_veb_release(struct i40e_veb *veb) static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; - bool is_default = veb->pf->cur_promisc; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; - /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, - veb->enabled_tc, is_default, + veb->enabled_tc, false, &veb->seid, enable_stats, NULL); + + /* get a VEB from the hardware */ if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", @@ -10557,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_VMDQ_ENABLED); } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | @@ -10580,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) /* Not enough queues for all TCs */ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && (queues_left < I40E_MAX_TRAFFIC_CLASS)) { - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + pf->flags &= ~(I40E_FLAG_DCB_CAPABLE | + I40E_FLAG_DCB_ENABLED); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } pf->num_lan_qps = max_t(int, pf->rss_size_max, @@ -10703,12 +10655,8 @@ static void i40e_print_features(struct i40e_pf *pf) } if (pf->flags & I40E_FLAG_DCB_CAPABLE) i += snprintf(&buf[i], REMAIN(i), " DCB"); -#if IS_ENABLED(CONFIG_VXLAN) i += snprintf(&buf[i], REMAIN(i), " VxLAN"); -#endif -#if IS_ENABLED(CONFIG_GENEVE) i += snprintf(&buf[i], REMAIN(i), " Geneve"); -#endif if (pf->flags & I40E_FLAG_PTP) i += snprintf(&buf[i], REMAIN(i), " PTP"); #ifdef I40E_FCOE @@ -10783,8 +10731,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* set up pci connections */ - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), i40e_driver_name); + err = pci_request_mem_regions(pdev, i40e_driver_name); if (err) { dev_info(&pdev->dev, "pci_request_selected_regions failed %d\n", err); @@ -10982,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = i40e_init_pf_dcb(pf); if (err) { dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); - pf->flags &= ~I40E_FLAG_DCB_CAPABLE; + pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED); /* Continue without DCB enabled */ } #endif /* CONFIG_I40E_DCB */ @@ -11281,8 +11228,7 @@ err_ioremap: kfree(pf); err_pf_alloc: pci_disable_pcie_error_reporting(pdev); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -11393,8 +11339,7 @@ static void i40e_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); kfree(pf); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -11539,6 +11484,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) { struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; + int retval = 0; set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); @@ -11550,10 +11496,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_stop_misc_vector(pf); + + retval = pci_save_state(pdev); + if (retval) + return retval; + pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); - return 0; + return retval; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 80403c6ee..4660c5abc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a8868e1bf..df7ecc957 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -740,14 +740,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_ring->q_vector->tx.total_packets += total_packets; if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - unsigned int j = 0; - /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40e_get_tx_pending(tx_ring, false); + unsigned int j = i40e_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1fcafcfa8..6fcbf764f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -665,6 +665,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { + u64 hena = i40e_pf_get_default_rss_hena(pf); + vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the @@ -687,6 +689,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) vf->default_lan_addr.addr, vf->vf_id); } spin_unlock_bh(&vsi->mac_filter_list_lock); + i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), + (u32)hena); + i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), + (u32)(hena >> 32)); } /* program mac filter */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 8f6420400..4db0c0326 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -59,7 +59,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: - case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index d34972bab..702357069 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -45,7 +45,6 @@ #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 79d99cd91..a579193b2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -259,13 +259,12 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, tx_ring->q_vector->tx.total_packets += total_packets; if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - unsigned int j = 0; /* check to see if there are < 4 descriptors * waiting to be written back, then kick the hardware to force * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40evf_get_tx_pending(tx_ring, false); + unsigned int j = i40evf_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 16c552952..600fb9c4a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -37,8 +37,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 5 -#define DRV_VERSION_BUILD 10 +#define DRV_VERSION_MINOR 6 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -57,7 +57,9 @@ static const char i40evf_copyright[] = */ static const struct pci_device_id i40evf_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0}, /* required last entry */ {0, } }; @@ -825,7 +827,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, ether_addr_copy(f->macaddr, macaddr); - list_add(&f->list, &adapter->mac_filter_list); + list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index f13445691..d76c221d4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -434,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter) ether_addr_copy(veal->list[i].addr, f->macaddr); i++; f->add = false; + if (i == count) + break; } } if (!more) @@ -497,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter) i++; list_del(&f->list); kfree(f); + if (i == count) + break; } } if (!more) @@ -560,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter) vvfl->vlan_id[i] = f->vlan; i++; f->add = false; + if (i == count) + break; } } if (!more) @@ -623,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) i++; list_del(&f->list); kfree(f); + if (i == count) + break; } } if (!more) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index b9609afa5..5387b3a96 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -445,6 +445,7 @@ struct igb_adapter { unsigned long ptp_tx_start; unsigned long last_rx_ptp_check; unsigned long last_rx_timestamp; + unsigned int ptp_flags; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; @@ -474,12 +475,15 @@ struct igb_adapter { u16 eee_advert; }; +/* flags controlling PTP/1588 function */ +#define IGB_PTP_ENABLED BIT(0) +#define IGB_PTP_OVERFLOW_CHECK BIT(1) + #define IGB_FLAG_HAS_MSI BIT(0) #define IGB_FLAG_DCA_ENABLED BIT(1) #define IGB_FLAG_QUAD_PORT_A BIT(2) #define IGB_FLAG_QUEUE_PAIRS BIT(3) #define IGB_FLAG_DMAC BIT(4) -#define IGB_FLAG_PTP BIT(5) #define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) #define IGB_FLAG_WOL_SUPPORTED BIT(8) @@ -546,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *); void igb_ptp_init(struct igb_adapter *adapter); void igb_ptp_stop(struct igb_adapter *adapter); void igb_ptp_reset(struct igb_adapter *adapter); +void igb_ptp_suspend(struct igb_adapter *adapter); void igb_ptp_rx_hang(struct igb_adapter *adapter); void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ef3d642f5..942a89fb0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2027,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter) wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); /* Re-enable PTP, where applicable. */ - igb_ptp_reset(adapter); + if (adapter->ptp_flags & IGB_PTP_ENABLED) + igb_ptp_reset(adapter); igb_get_phy_info(hw); } @@ -2323,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), - igb_driver_name); + err = pci_request_mem_regions(pdev, igb_driver_name); if (err) goto err_pci_reg; @@ -2749,8 +2748,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -2915,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev) pci_iounmap(pdev, adapter->io_addr); if (hw->flash_address) iounmap(hw->flash_address); - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); kfree(adapter->shadow_vfta); free_netdev(netdev); @@ -6855,12 +6852,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, **/ static bool igb_add_rx_frag(struct igb_ring *rx_ring, struct igb_rx_buffer *rx_buffer, + unsigned int size, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct page *page = rx_buffer->page; unsigned char *va = page_address(page) + rx_buffer->page_offset; - unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); #if (PAGE_SIZE < 8192) unsigned int truesize = IGB_RX_BUFSZ; #else @@ -6912,6 +6909,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) { + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); struct igb_rx_buffer *rx_buffer; struct page *page; @@ -6947,11 +6945,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, dma_sync_single_range_for_cpu(rx_ring->dev, rx_buffer->dma, rx_buffer->page_offset, - IGB_RX_BUFSZ, + size, DMA_FROM_DEVICE); /* pull page into skb */ - if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { /* hand second half of page back to the ring */ igb_reuse_rx_page(rx_ring, rx_buffer); } else { @@ -7527,6 +7525,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, if (netif_running(netdev)) __igb_close(netdev, true); + igb_ptp_suspend(adapter); + igb_clear_interrupt_scheme(adapter); #ifdef CONFIG_PM diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 3c7bcdf76..336c103ae 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL); unsigned long rx_event; + /* Other hardware uses per-packet timestamps */ if (hw->mac.type != e1000_82576) return; @@ -1062,6 +1063,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr) -EFAULT : 0; } +/** + * igb_ptp_init - Initialize PTP functionality + * @adapter: Board private structure + * + * This function is called at device probe to initialize the PTP + * functionality. + */ void igb_ptp_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; @@ -1084,8 +1092,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->cc.mask = CYCLECOUNTER_MASK(64); adapter->cc.mult = 1; adapter->cc.shift = IGB_82576_TSYNC_SHIFT; - /* Dial the nominal frequency. */ - wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; break; case e1000_82580: case e1000_i354: @@ -1104,8 +1111,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580); adapter->cc.mult = 1; adapter->cc.shift = 0; - /* Enable the timer functions by clearing bit 31. */ - wr32(E1000_TSAUXC, 0x0); + adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK; break; case e1000_i210: case e1000_i211: @@ -1130,44 +1136,24 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.settime64 = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; - /* Enable the timer functions by clearing bit 31. */ - wr32(E1000_TSAUXC, 0x0); break; default: adapter->ptp_clock = NULL; return; } - wrfl(); - spin_lock_init(&adapter->tmreg_lock); INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - /* Initialize the clock and overflow work for devices that need it. */ - if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { - struct timespec64 ts = ktime_to_timespec64(ktime_get_real()); - - igb_ptp_settime_i210(&adapter->ptp_caps, &ts); - } else { - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); - + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) INIT_DELAYED_WORK(&adapter->ptp_overflow_work, igb_ptp_overflow_check); - schedule_delayed_work(&adapter->ptp_overflow_work, - IGB_SYSTIM_OVERFLOW_PERIOD); - } - - /* Initialize the time sync interrupts for devices that support it. */ - if (hw->mac.type >= e1000_82580) { - wr32(E1000_TSIM, TSYNC_INTERRUPTS); - wr32(E1000_IMS, E1000_IMS_TS); - } - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + igb_ptp_reset(adapter); + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1176,32 +1162,24 @@ void igb_ptp_init(struct igb_adapter *adapter) } else { dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); - adapter->flags |= IGB_FLAG_PTP; + adapter->ptp_flags |= IGB_PTP_ENABLED; } } /** - * igb_ptp_stop - Disable PTP device and stop the overflow check. - * @adapter: Board private structure. + * igb_ptp_suspend - Disable PTP work items and prepare for suspend + * @adapter: Board private structure * - * This function stops the PTP support and cancels the delayed work. - **/ -void igb_ptp_stop(struct igb_adapter *adapter) + * This function stops the overflow check work and PTP Tx timestamp work, and + * will prepare the device for OS suspend. + */ +void igb_ptp_suspend(struct igb_adapter *adapter) { - switch (adapter->hw.mac.type) { - case e1000_82576: - case e1000_82580: - case e1000_i354: - case e1000_i350: - cancel_delayed_work_sync(&adapter->ptp_overflow_work); - break; - case e1000_i210: - case e1000_i211: - /* No delayed work to cancel. */ - break; - default: + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) return; - } + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + cancel_delayed_work_sync(&adapter->ptp_overflow_work); cancel_work_sync(&adapter->ptp_tx_work); if (adapter->ptp_tx_skb) { @@ -1209,12 +1187,23 @@ void igb_ptp_stop(struct igb_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); } +} + +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) +{ + igb_ptp_suspend(adapter); if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); dev_info(&adapter->pdev->dev, "removed PHC on %s\n", adapter->netdev->name); - adapter->flags &= ~IGB_FLAG_PTP; + adapter->ptp_flags &= ~IGB_PTP_ENABLED; } } @@ -1229,9 +1218,6 @@ void igb_ptp_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; unsigned long flags; - if (!(adapter->flags & IGB_FLAG_PTP)) - return; - /* reset the tstamp_config */ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -1268,4 +1254,10 @@ void igb_ptp_reset(struct igb_adapter *adapter) } out: spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + wrfl(); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 9f2db1855..9475ff905 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -804,8 +804,6 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; - - bool need_crosstalk_fix; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 47afed74a..63b25006a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1813,9 +1813,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; - if (ret_val) - return ret_val; - return ixgbe_verify_fw_version_82599(hw); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 902d2061c..c47b605e8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -277,6 +277,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) { s32 ret_val; u32 ctrl_ext; + u16 device_caps; /* Set the media type */ hw->phy.media_type = hw->mac.ops.get_media_type(hw); @@ -301,6 +302,22 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) if (ret_val) return ret_val; + /* Cashe bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + hw->need_crosstalk_fix = false; + else + hw->need_crosstalk_fix = true; + break; + default: + hw->need_crosstalk_fix = false; + break; + } + /* Clear adapter stopped flag */ hw->adapter_stopped = false; @@ -763,6 +780,9 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + if (index > 3) + return IXGBE_ERR_PARAM; + /* To turn on the LED, set mode to ON. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); @@ -781,6 +801,9 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) { u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + if (index > 3) + return IXGBE_ERR_PARAM; + /* To turn off the LED, set mode to OFF. */ led_reg &= ~IXGBE_LED_MODE_MASK(index); led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); @@ -2657,7 +2680,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) **/ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) { - int secrxreg; + u32 secrxreg; secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; @@ -2698,6 +2721,9 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) bool locked = false; s32 ret_val; + if (index > 3) + return IXGBE_ERR_PARAM; + /* * Link must be up to auto-blink the LEDs; * Force it if link is down. @@ -2741,6 +2767,9 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) bool locked = false; s32 ret_val; + if (index > 3) + return IXGBE_ERR_PARAM; + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); if (ret_val) return ret_val; @@ -2929,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) } /* was that the last pool using this rar? */ - if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + if (mpsar_lo == 0 && mpsar_hi == 0 && + rar != 0 && rar != hw->mac.san_mac_rar_index) hw->mac.ops.clear_rar(hw, rar); + return 0; } @@ -3188,6 +3219,31 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) } /** + * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix + * @hw: pointer to hardware structure + * + * Contains the logic to identify if we need to verify link for the + * crosstalk fix + **/ +static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) +{ + /* Does FW say we need the fix */ + if (!hw->need_crosstalk_fix) + return false; + + /* Only consider SFP+ PHYs i.e. media type fiber */ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + break; + default: + return false; + } + + return true; +} + +/** * ixgbe_check_mac_link_generic - Determine link and speed status * @hw: pointer to hardware structure * @speed: pointer to link speed @@ -3202,6 +3258,35 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, u32 links_reg, links_orig; u32 i; + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is full. + */ + if (ixgbe_need_crosstalk_fix(hw)) { + u32 sfp_cage_full; + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP0; + break; + default: + /* sanity check - No SFP+ devices here */ + sfp_cage_full = false; + break; + } + + if (!sfp_cage_full) { + *link_up = false; + *speed = IXGBE_LINK_SPEED_UNKNOWN; + return 0; + } + } + /* clear the old state */ links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 59b771b9b..0d7209eb5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2204,11 +2204,11 @@ static int ixgbe_set_phys_id(struct net_device *netdev, return 2; case ETHTOOL_ID_ON: - hw->mac.ops.led_on(hw, IXGBE_LED_ON); + hw->mac.ops.led_on(hw, hw->bus.func); break; case ETHTOOL_ID_OFF: - hw->mac.ops.led_off(hw, IXGBE_LED_ON); + hw->mac.ops.led_off(hw, hw->bus.func); break; case ETHTOOL_ID_INACTIVE: @@ -2991,10 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); + /* we always support timestamping disabled */ + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); + switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_x550em_a: + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); + /* fallthrough */ case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -3014,8 +3019,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); - info->rx_filters = - BIT(HWTSTAMP_FILTER_NONE) | + info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 58153e818..b4f03748a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -50,7 +50,7 @@ #include <linux/if_bridge.h> #include <linux/prefetch.h> #include <scsi/fc/fc_fcoe.h> -#include <net/vxlan.h> +#include <net/udp_tunnel.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_mirred.h> @@ -3084,7 +3084,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) free_irq(entry->vector, q_vector); } - free_irq(adapter->msix_entries[vector++].vector, adapter); + free_irq(adapter->msix_entries[vector].vector, adapter); } /** @@ -5631,7 +5631,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) struct pci_dev *pdev = adapter->pdev; unsigned int rss, fdir; u32 fwsm; - u16 device_caps; int i; /* PCI config space info */ @@ -5728,9 +5727,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; #endif -#ifdef CONFIG_IXGBE_VXLAN adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE; -#endif break; default: break; @@ -5779,22 +5776,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; - /* Cache bit indicating need for crosstalk fix */ - switch (hw->mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_x550em_a: - hw->mac.ops.get_device_caps(hw, &device_caps); - if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) - adapter->need_crosstalk_fix = false; - else - adapter->need_crosstalk_fix = true; - break; - default: - adapter->need_crosstalk_fix = false; - break; - } - /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; @@ -6164,9 +6145,7 @@ int ixgbe_open(struct net_device *netdev) ixgbe_up_complete(adapter); ixgbe_clear_vxlan_port(adapter); -#ifdef CONFIG_IXGBE_VXLAN - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); return 0; @@ -6717,18 +6696,6 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } - /* If Crosstalk fix enabled do the sanity check of making sure - * the SFP+ cage is empty. - */ - if (adapter->need_crosstalk_fix) { - u32 sfp_cage_full; - - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) - link_up = false; - } - if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); @@ -7075,16 +7042,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; s32 err; - /* If crosstalk fix enabled verify the SFP+ cage is full */ - if (adapter->need_crosstalk_fix) { - u32 sfp_cage_full; - - sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & - IXGBE_ESDP_SDP2; - if (!sfp_cage_full) - return; - } - /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) @@ -7268,14 +7225,12 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } -#ifdef CONFIG_IXGBE_VXLAN - rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { + rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; - vxlan_get_rx_port(adapter->netdev); + udp_tunnel_get_rx_info(adapter->netdev); + rtnl_unlock(); } - rtnl_unlock(); -#endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); @@ -7703,7 +7658,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring, /* snag network header to get L4 type and address */ skb = first->skb; hdr.network = skb_network_header(skb); -#ifdef CONFIG_IXGBE_VXLAN if (skb->encapsulation && first->protocol == htons(ETH_P_IP) && hdr.ipv4->protocol != IPPROTO_UDP) { @@ -7714,7 +7668,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring, udp_hdr(skb)->dest == adapter->vxlan_port) hdr.network = skb_inner_network_header(skb); } -#endif /* CONFIG_IXGBE_VXLAN */ /* Currently only IPv4/IPv6 with TCP is supported */ switch (hdr.ipv4->version) { @@ -8314,14 +8267,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls) { + u32 hdl = cls->knode.handle; u32 uhtid = TC_U32_USERHTID(cls->knode.handle); - u32 loc; - int err; + u32 loc = cls->knode.handle & 0xfffff; + int err = 0, i, j; + struct ixgbe_jump_table *jump = NULL; + + if (loc > IXGBE_MAX_HW_ENTRIES) + return -EINVAL; if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE)) return -EINVAL; - loc = cls->knode.handle & 0xfffff; + /* Clear this filter in the link data it is associated with */ + if (uhtid != 0x800) { + jump = adapter->jump_tables[uhtid]; + if (!jump) + return -EINVAL; + if (!test_bit(loc - 1, jump->child_loc_map)) + return -EINVAL; + clear_bit(loc - 1, jump->child_loc_map); + } + + /* Check if the filter being deleted is a link */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + jump = adapter->jump_tables[i]; + if (jump && jump->link_hdl == hdl) { + /* Delete filters in the hardware in the child hash + * table associated with this link + */ + for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) { + if (!test_bit(j, jump->child_loc_map)) + continue; + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, + NULL, + j + 1); + spin_unlock(&adapter->fdir_perfect_lock); + clear_bit(j, jump->child_loc_map); + } + /* Remove resources for this link */ + kfree(jump->input); + kfree(jump->mask); + kfree(jump); + adapter->jump_tables[i] = NULL; + return err; + } + } spin_lock(&adapter->fdir_perfect_lock); err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc); @@ -8404,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, struct tcf_exts *exts, u64 *action, u8 *queue) { const struct tc_action *a; + LIST_HEAD(actions); int err; if (tc_no_actions(exts)) return -EINVAL; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Drop action */ if (is_tcf_gact_shot(a)) { @@ -8555,6 +8549,18 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, if (!test_bit(link_uhtid - 1, &adapter->tables)) return err; + /* Multiple filters as links to the same hash table are not + * supported. To add a new filter with the same next header + * but different match/jump conditions, create a new hash table + * and link to it. + */ + if (adapter->jump_tables[link_uhtid] && + (adapter->jump_tables[link_uhtid])->link_hdl) { + e_err(drv, "Link filter exists for link: %x\n", + link_uhtid); + return err; + } + for (i = 0; nexthdr[i].jump; i++) { if (nexthdr[i].o != cls->knode.sel->offoff || nexthdr[i].s != cls->knode.sel->offshift || @@ -8576,6 +8582,8 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, } jump->input = input; jump->mask = mask; + jump->link_hdl = cls->knode.handle; + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, &nexthdr[i]); if (!err) { @@ -8603,6 +8611,20 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, if ((adapter->jump_tables[uhtid])->mask) memcpy(mask, (adapter->jump_tables[uhtid])->mask, sizeof(*mask)); + + /* Lookup in all child hash tables if this location is already + * filled with a filter + */ + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) { + struct ixgbe_jump_table *link = adapter->jump_tables[i]; + + if (link && (test_bit(loc - 1, link->child_loc_map))) { + e_err(drv, "Filter exists in location: %x\n", + loc); + err = -EINVAL; + goto err_out; + } + } } err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); if (err) @@ -8634,6 +8656,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) + set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map); + kfree(mask); return err; err_out_w_lock: @@ -8776,14 +8801,12 @@ static int ixgbe_set_features(struct net_device *netdev, netdev->features = features; -#ifdef CONFIG_IXGBE_VXLAN if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { if (features & NETIF_F_RXCSUM) adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; else ixgbe_clear_vxlan_port(adapter); } -#endif /* CONFIG_IXGBE_VXLAN */ if (need_reset) ixgbe_do_reset(netdev); @@ -8794,23 +8817,25 @@ static int ixgbe_set_features(struct net_device *netdev, return 0; } -#ifdef CONFIG_IXGBE_VXLAN /** * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifiying us about - * @port: New UDP port number that VXLAN started listening to + * @ti: Tunnel endpoint information **/ -static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void ixgbe_add_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; + __be16 port = ti->port; - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) return; if (adapter->vxlan_port == port) @@ -8830,30 +8855,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, /** * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away * @dev: The port's netdev - * @sa_family: Socket Family that VXLAN is notifying us about - * @port: UDP port number that VXLAN stopped listening to + * @ti: Tunnel endpoint information **/ -static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, - __be16 port) +static void ixgbe_del_vxlan_port(struct net_device *dev, + struct udp_tunnel_info *ti) { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) return; - if (adapter->vxlan_port != port) { + if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) + return; + + if (adapter->vxlan_port != ti->port) { netdev_info(dev, "Port %d was not found, not deleting\n", - ntohs(port)); + ntohs(ti->port)); return; } ixgbe_clear_vxlan_port(adapter); adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; } -#endif /* CONFIG_IXGBE_VXLAN */ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, @@ -9166,10 +9192,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, -#ifdef CONFIG_IXGBE_VXLAN - .ndo_add_vxlan_port = ixgbe_add_vxlan_port, - .ndo_del_vxlan_port = ixgbe_del_vxlan_port, -#endif /* CONFIG_IXGBE_VXLAN */ + .ndo_udp_tunnel_add = ixgbe_add_vxlan_port, + .ndo_udp_tunnel_del = ixgbe_del_vxlan_port, .ndo_features_check = ixgbe_features_check, }; @@ -9337,8 +9361,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_using_dac = 0; } - err = pci_request_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM), ixgbe_driver_name); + err = pci_request_mem_regions(pdev, ixgbe_driver_name); if (err) { dev_err(&pdev->dev, "pci_request_selected_regions failed 0x%x\n", err); @@ -9725,8 +9748,7 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, - pci_select_bars(pdev, IORESOURCE_MEM)); + pci_release_mem_regions(pdev); err_pci_reg: err_dma: if (!adapter || disable_dev) @@ -9793,8 +9815,7 @@ static void ixgbe_remove(struct pci_dev *pdev) #endif iounmap(adapter->io_addr); - pci_release_selected_regions(pdev, pci_select_bars(pdev, - IORESOURCE_MEM)); + pci_release_mem_regions(pdev); e_dev_info("complete\n"); @@ -10058,6 +10079,7 @@ static int __init ixgbe_init_module(void) ret = pci_register_driver(&ixgbe_driver); if (ret) { + destroy_workqueue(ixgbe_wq); ixgbe_dbg_exit(); return ret; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index a8bed3d88..538a1c547 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -42,8 +42,12 @@ struct ixgbe_jump_table { struct ixgbe_mat_field *mat; struct ixgbe_fdir_filter *input; union ixgbe_atr_input *mask; + u32 link_hdl; + unsigned long child_loc_map[32]; }; +#define IXGBE_MAX_HW_ENTRIES 2045 + static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index c5caacdd1..8618599df 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -954,6 +954,7 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, struct ixgbe_hw *hw = &adapter->hw; hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); + hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index da3d8358f..1248a9936 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3525,6 +3525,7 @@ struct ixgbe_hw { bool force_full_reset; bool allow_unsupported_sfp; bool wol_enabled; + bool need_crosstalk_fix; }; struct ixgbe_info { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19b75cd98..4716ca499 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1618,6 +1618,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; + mac->ops.setup_fc = ixgbe_setup_fc_x550em; + switch (mac->ops.get_media_type(hw)) { case ixgbe_media_type_fiber: /* CS4227 does not support autoneg, so disable the laser control @@ -1627,7 +1629,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_fc = ixgbe_setup_fc_x550em; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP_N: mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; @@ -1655,7 +1656,6 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.setup_link = ixgbe_setup_sgmii; break; default: - mac->ops.setup_fc = ixgbe_setup_fc_x550em; break; } } diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index ae09d60e7..8617cae2f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -32,6 +32,7 @@ #define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #define IXGBE_DEV_ID_82599_VF_HV 0x152E #define IXGBE_DEV_ID_X540_VF_HV 0x1530 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index d5944c391..be52f5976 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -457,6 +457,7 @@ enum ixgbevf_boards { board_X550_vf_hv, board_X550EM_x_vf, board_X550EM_x_vf_hv, + board_x550em_a_vf, }; enum ixgbevf_xcast_modes { @@ -470,6 +471,7 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info; extern const struct ixgbevf_info ixgbevf_X550_vf_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info; extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index acc24010c..d9d6616f0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -56,7 +56,7 @@ const char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; -#define DRV_VERSION "2.12.1-k" +#define DRV_VERSION "3.2.2-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2015 Intel Corporation."; @@ -70,6 +70,7 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, + [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -89,6 +90,7 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf }, /* required last entry */ {0, } }; @@ -1800,16 +1802,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, **/ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { - int i; struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + int i, ret; ixgbevf_setup_psrtype(adapter); if (hw->mac.type >= ixgbe_mac_X550_vf) ixgbevf_setup_vfmrqc(adapter); /* notify the PF of our intent to use this size of frame */ - hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + if (ret) + dev_err(&adapter->pdev->dev, + "Failed to set MTU at %d\n", netdev->mtu); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -2772,12 +2777,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || + test_bit(__IXGBEVF_REMOVING, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) return; adapter->tx_timeout_count++; + rtnl_lock(); ixgbevf_reinit_locked(adapter); + rtnl_unlock(); } /** @@ -3732,6 +3740,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; + int ret; switch (adapter->hw.api_version) { case ixgbe_mbox_api_11: @@ -3748,14 +3757,17 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > max_possible_frame)) return -EINVAL; + /* notify the PF of our intent to use this size of frame */ + ret = hw->mac.ops.set_rlpml(hw, max_frame); + if (ret) + return -EINVAL; + hw_dbg(hw, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); + /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - /* notify the PF of our intent to use this size of frame */ - hw->mac.ops.set_rlpml(hw, max_frame); - return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index e670d3b19..a52f70ec4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -33,6 +33,18 @@ */ #define IXGBE_HV_RESET_OFFSET 0x201 +static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, + u32 *retmsg, u16 size) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 retval = mbx->ops.write_posted(hw, msg, size); + + if (retval) + return retval; + + return mbx->ops.read_posted(hw, retmsg, size); +} + /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure @@ -255,8 +267,7 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) { - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 msgbuf[3]; + u32 msgbuf[3], msgbuf_chk; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; @@ -268,19 +279,18 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) */ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + msgbuf_chk = msgbuf[0]; + if (addr) ether_addr_copy(msg_addr, addr); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3); + if (!ret_val) { + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; - - if (!ret_val) - if (msgbuf[0] == - (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) - ret_val = -ENOMEM; + if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK)) + return -ENOMEM; + } return ret_val; } @@ -423,7 +433,6 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[3]; u8 *msg_addr = (u8 *)(&msgbuf[1]); s32 ret_val; @@ -431,10 +440,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, memset(msgbuf, 0, sizeof(msgbuf)); msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; ether_addr_copy(msg_addr, addr); - ret_val = mbx->ops.write_posted(hw, msgbuf, 3); - if (!ret_val) - ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -468,17 +475,6 @@ static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, return -EOPNOTSUPP; } -static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, - u32 *msg, u16 size) -{ - struct ixgbe_mbx_info *mbx = &hw->mbx; - u32 retmsg[IXGBE_VFMAILBOX_SIZE]; - s32 retval = mbx->ops.write_posted(hw, msg, size); - - if (!retval) - mbx->ops.read_posted(hw, retmsg, size); -} - /** * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses * @hw: pointer to the HW structure @@ -519,7 +515,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr); } - ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); + ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE); return 0; } @@ -542,7 +538,6 @@ static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, **/ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 err; @@ -556,11 +551,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE; msgbuf[1] = xcast_mode; - err = mbx->ops.write_posted(hw, msgbuf, 2); - if (err) - return err; - - err = mbx->ops.read_posted(hw, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (err) return err; @@ -589,7 +580,6 @@ static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { - struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 err; @@ -598,11 +588,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - err = mbx->ops.write_posted(hw, msgbuf, 2); - if (err) - goto mbx_err; - - err = mbx->ops.read_posted(hw, msgbuf, 2); + err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); if (err) goto mbx_err; @@ -797,13 +783,22 @@ out: * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ -static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 msgbuf[2]; + s32 ret_val; msgbuf[0] = IXGBE_VF_SET_LPE; msgbuf[1] = max_size; - ixgbevf_write_msg_read_ack(hw, msgbuf, 2); + + ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2); + if (ret_val) + return ret_val; + if ((msgbuf[0] & IXGBE_VF_SET_LPE) && + (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) + return IXGBE_ERR_MBX; + + return 0; } /** @@ -812,7 +807,7 @@ static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) * @max_size: value to assign to max frame size * Hyper-V variant. **/ -static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 reg; @@ -823,6 +818,8 @@ static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) /* CRC == 4 */ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); + + return 0; } /** @@ -839,11 +836,8 @@ static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) msg[0] = IXGBE_VF_API_NEGOTIATE; msg[1] = api; msg[2] = 0; - err = hw->mbx.ops.write_posted(hw, msg, 3); - - if (!err) - err = hw->mbx.ops.read_posted(hw, msg, 3); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -892,11 +886,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, /* Fetch queue configuration from the PF */ msg[0] = IXGBE_VF_GET_QUEUE; msg[1] = msg[2] = msg[3] = msg[4] = 0; - err = hw->mbx.ops.write_posted(hw, msg, 5); - - if (!err) - err = hw->mbx.ops.read_posted(hw, msg, 5); + err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5); if (!err) { msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; @@ -1005,3 +996,8 @@ const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_hv_mac_ops, }; + +const struct ixgbevf_info ixgbevf_x550em_a_vf_info = { + .mac = ixgbe_mac_x550em_a_vf, + .mac_ops = &ixgbevf_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 2cac610f3..04d8d4ee4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -69,7 +69,7 @@ struct ixgbe_mac_operations { s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); - void (*set_rlpml)(struct ixgbe_hw *, u16); + s32 (*set_rlpml)(struct ixgbe_hw *, u16); }; enum ixgbe_mac_type { @@ -78,6 +78,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540_vf, ixgbe_mac_X550_vf, ixgbe_mac_X550EM_x_vf, + ixgbe_mac_x550em_a_vf, ixgbe_num_macs }; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index dc82b1b19..91e09d68b 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -102,7 +102,6 @@ struct ltq_etop_priv { struct resource *res; struct mii_bus *mii_bus; - struct phy_device *phydev; struct ltq_etop_chan ch[MAX_DMA_CHAN]; int tx_free[MAX_DMA_CHAN >> 1]; @@ -305,34 +304,16 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) } static int -ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int -ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_ethtool_sset(priv->phydev, cmd); -} - -static int ltq_etop_nway_reset(struct net_device *dev) { - struct ltq_etop_priv *priv = netdev_priv(dev); - - return phy_start_aneg(priv->phydev); + return phy_start_aneg(dev->phydev); } static const struct ethtool_ops ltq_etop_ethtool_ops = { .get_drvinfo = ltq_etop_get_drvinfo, - .get_settings = ltq_etop_get_settings, - .set_settings = ltq_etop_set_settings, .nway_reset = ltq_etop_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int @@ -401,7 +382,6 @@ ltq_etop_mdio_probe(struct net_device *dev) | SUPPORTED_TP); phydev->advertising = phydev->supported; - priv->phydev = phydev; phy_attached_info(phydev); return 0; @@ -411,7 +391,6 @@ static int ltq_etop_mdio_init(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - int i; int err; priv->mii_bus = mdiobus_alloc(); @@ -451,7 +430,7 @@ ltq_etop_mdio_cleanup(struct net_device *dev) { struct ltq_etop_priv *priv = netdev_priv(dev); - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); } @@ -470,7 +449,7 @@ ltq_etop_open(struct net_device *dev) ltq_dma_open(&ch->dma); napi_enable(&ch->napi); } - phy_start(priv->phydev); + phy_start(dev->phydev); netif_tx_start_all_queues(dev); return 0; } @@ -482,7 +461,7 @@ ltq_etop_stop(struct net_device *dev) int i; netif_tx_stop_all_queues(dev); - phy_stop(priv->phydev); + phy_stop(dev->phydev); for (i = 0; i < MAX_DMA_CHAN; i++) { struct ltq_etop_chan *ch = &priv->ch[i]; @@ -557,10 +536,8 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu) static int ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct ltq_etop_priv *priv = netdev_priv(dev); - /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static int diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index f92018b13..d41c28d00 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4118,6 +4118,7 @@ static int mvneta_probe(struct platform_device *pdev) pp->bm_priv = NULL; } } + of_node_put(bm_node); err = mvneta_init(&pdev->dev, pp); if (err < 0) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 868a957f2..60227a345 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -699,7 +699,6 @@ struct mvpp2_port { u16 rx_ring_size; struct mvpp2_pcpu_stats __percpu *stats; - struct phy_device *phy_dev; phy_interface_t phy_interface; struct device_node *phy_node; unsigned int link; @@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id) static void mvpp2_link_event(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); - struct phy_device *phydev = port->phy_dev; + struct phy_device *phydev = dev->phydev; int status_change = 0; u32 val; @@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) /* Set hw internals when starting port */ static void mvpp2_start_dev(struct mvpp2_port *port) { + struct net_device *ndev = port->dev; + mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); @@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port) mvpp2_interrupts_enable(port); mvpp2_port_enable(port); - phy_start(port->phy_dev); + phy_start(ndev->phydev); netif_tx_start_all_queues(port->dev); } /* Set hw internals when stopping port */ static void mvpp2_stop_dev(struct mvpp2_port *port) { + struct net_device *ndev = port->dev; + /* Stop new packets from arriving to RXQs */ mvpp2_ingress_disable(port); @@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) mvpp2_egress_disable(port); mvpp2_port_disable(port); - phy_stop(port->phy_dev); + phy_stop(ndev->phydev); } /* Return positive if MTU is valid */ @@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port) phy_dev->supported &= PHY_GBIT_FEATURES; phy_dev->advertising = phy_dev->supported; - port->phy_dev = phy_dev; port->link = 0; port->duplex = 0; port->speed = 0; @@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port) static void mvpp2_phy_disconnect(struct mvpp2_port *port) { - phy_disconnect(port->phy_dev); - port->phy_dev = NULL; + struct net_device *ndev = port->dev; + + phy_disconnect(ndev->phydev); } static int mvpp2_open(struct net_device *dev) @@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct mvpp2_port *port = netdev_priv(dev); int ret; - if (!port->phy_dev) + if (!dev->phydev) return -ENOTSUPP; - ret = phy_mii_ioctl(port->phy_dev, ifr, cmd); + ret = phy_mii_ioctl(dev->phydev, ifr, cmd); if (!ret) mvpp2_link_event(dev); @@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Ethtool methods */ -/* Get settings (phy address, speed) for ethtools */ -static int mvpp2_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phy_dev) - return -ENODEV; - return phy_ethtool_gset(port->phy_dev, cmd); -} - -/* Set settings (phy address, speed) for ethtools */ -static int mvpp2_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct mvpp2_port *port = netdev_priv(dev); - - if (!port->phy_dev) - return -ENODEV; - return phy_ethtool_sset(port->phy_dev, cmd); -} - /* Set interrupt coalescing for ethtools */ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) @@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = { static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_link = ethtool_op_get_link, - .get_settings = mvpp2_ethtool_get_settings, - .set_settings = mvpp2_ethtool_set_settings, .set_coalesce = mvpp2_ethtool_set_coalesce, .get_coalesce = mvpp2_ethtool_get_coalesce, .get_drvinfo = mvpp2_ethtool_get_drvinfo, .get_ringparam = mvpp2_ethtool_get_ringparam, .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* Driver initialization */ @@ -6254,6 +6234,7 @@ err_free_stats: err_free_irq: irq_dispose_mapping(port->irq); err_free_netdev: + of_node_put(phy_node); free_netdev(dev); return err; } @@ -6264,6 +6245,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) int i; unregister_netdev(port->dev); + of_node_put(port->phy_node); free_percpu(port->pcpu); free_percpu(port->stats); for (i = 0; i < txq_number; i++) diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 54d5154ac..5d5000c8e 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -247,7 +247,6 @@ struct pxa168_eth_private { */ struct timer_list timeout; struct mii_bus *smi_bus; - struct phy_device *phy; /* clock */ struct clk *clk; @@ -275,8 +274,8 @@ enum hash_table_entry { HASH_ENTRY_RECEIVE_DISCARD_BIT = 2 }; -static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); -static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd); +static int pxa168_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd); static int pxa168_init_hw(struct pxa168_eth_private *pep); static int pxa168_init_phy(struct net_device *dev); static void eth_port_reset(struct net_device *dev); @@ -644,7 +643,7 @@ static void eth_port_start(struct net_device *dev) struct pxa168_eth_private *pep = netdev_priv(dev); int tx_curr_desc, rx_curr_desc; - phy_start(pep->phy); + phy_start(dev->phydev); /* Assignment of Tx CTRP of given queue */ tx_curr_desc = pep->tx_curr_desc_q; @@ -700,7 +699,7 @@ static void eth_port_reset(struct net_device *dev) val &= ~PCR_EN; wrl(pep, PORT_CONFIG, val); - phy_stop(pep->phy); + phy_stop(dev->phydev); } /* @@ -943,7 +942,7 @@ static int set_port_config_ext(struct pxa168_eth_private *pep) static void pxa168_eth_adjust_link(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); - struct phy_device *phy = pep->phy; + struct phy_device *phy = dev->phydev; u32 cfg, cfg_o = rdl(pep, PORT_CONFIG); u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT); @@ -972,35 +971,37 @@ static void pxa168_eth_adjust_link(struct net_device *dev) static int pxa168_init_phy(struct net_device *dev) { struct pxa168_eth_private *pep = netdev_priv(dev); - struct ethtool_cmd cmd; + struct ethtool_link_ksettings cmd; + struct phy_device *phy = NULL; int err; - if (pep->phy) + if (dev->phydev) return 0; - pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); - if (IS_ERR(pep->phy)) - return PTR_ERR(pep->phy); + phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); + if (IS_ERR(phy)) + return PTR_ERR(phy); - err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, + err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link, pep->phy_intf); if (err) return err; - err = pxa168_get_settings(dev, &cmd); + err = pxa168_get_link_ksettings(dev, &cmd); if (err) return err; - cmd.phy_address = pep->phy_addr; - cmd.speed = pep->phy_speed; - cmd.duplex = pep->phy_duplex; - cmd.advertising = PHY_BASIC_FEATURES; - cmd.autoneg = AUTONEG_ENABLE; + cmd.base.phy_address = pep->phy_addr; + cmd.base.speed = pep->phy_speed; + cmd.base.duplex = pep->phy_duplex; + ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising, + PHY_BASIC_FEATURES); + cmd.base.autoneg = AUTONEG_ENABLE; - if (cmd.speed != 0) - cmd.autoneg = AUTONEG_DISABLE; + if (cmd.base.speed != 0) + cmd.base.autoneg = AUTONEG_DISABLE; - return pxa168_set_settings(dev, &cmd); + return phy_ethtool_set_link_ksettings(dev, &cmd); } static int pxa168_init_hw(struct pxa168_eth_private *pep) @@ -1366,32 +1367,24 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum, static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct pxa168_eth_private *pep = netdev_priv(dev); - if (pep->phy != NULL) - return phy_mii_ioctl(pep->phy, ifr, cmd); + if (dev->phydev != NULL) + return phy_mii_ioctl(dev->phydev, ifr, cmd); return -EOPNOTSUPP; } -static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int pxa168_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - struct pxa168_eth_private *pep = netdev_priv(dev); int err; - err = phy_read_status(pep->phy); + err = phy_read_status(dev->phydev); if (err == 0) - err = phy_ethtool_gset(pep->phy, cmd); + err = phy_ethtool_ksettings_get(dev->phydev, cmd); return err; } -static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct pxa168_eth_private *pep = netdev_priv(dev); - - return phy_ethtool_sset(pep->phy, cmd); -} - static void pxa168_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1402,11 +1395,11 @@ static void pxa168_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops pxa168_ethtool_ops = { - .get_settings = pxa168_get_settings, - .set_settings = pxa168_set_settings, .get_drvinfo = pxa168_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = pxa168_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops pxa168_eth_netdev_ops = { @@ -1513,6 +1506,7 @@ static int pxa168_eth_probe(struct platform_device *pdev) } of_property_read_u32(np, "reg", &pep->phy_addr); pep->phy_intf = of_get_phy_mode(pdev->dev.of_node); + of_node_put(np); } /* Hardware supports only 3 ports */ @@ -1569,8 +1563,8 @@ static int pxa168_eth_remove(struct platform_device *pdev) pep->htpr, pep->htpr_dma); pep->htpr = NULL; } - if (pep->phy) - phy_disconnect(pep->phy); + if (dev->phydev) + phy_disconnect(dev->phydev); if (pep->clk) { clk_disable_unprepare(pep->clk); } diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d1cdc2d76..3743af8f1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats { MTK_ETHTOOL_STAT(rx_flow_control_packets), }; +static const char * const mtk_clks_source_name[] = { + "ethif", "esw", "gp1", "gp2" +}; + void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) { __raw_writel(val, eth->base + reg); @@ -76,8 +80,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth) return -1; } -u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, - u32 phy_register, u32 write_data) +static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, + u32 phy_register, u32 write_data) { if (mtk_mdio_busy_wait(eth)) return -1; @@ -95,7 +99,7 @@ u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, return 0; } -u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) +static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) { u32 d; @@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) case PHY_INTERFACE_MODE_MII: ge_mode = 1; break; - case PHY_INTERFACE_MODE_RMII: + case PHY_INTERFACE_MODE_REVMII: ge_mode = 2; break; + case PHY_INTERFACE_MODE_RMII: + if (!mac->id) + goto err_phy; + ge_mode = 3; + break; default: - dev_err(eth->dev, "invalid phy_mode\n"); - return -1; + goto err_phy; } /* put the gmac into the right mode */ @@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac) mac->phy_dev->autoneg = AUTONEG_ENABLE; mac->phy_dev->speed = 0; mac->phy_dev->duplex = 0; + + if (of_phy_is_fixed_link(mac->of_node)) + mac->phy_dev->supported |= + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause; mac->phy_dev->advertising = mac->phy_dev->supported | ADVERTISED_Autoneg; phy_start_aneg(mac->phy_dev); + of_node_put(np); + return 0; + +err_phy: + of_node_put(np); + dev_err(eth->dev, "invalid phy_mode\n"); + return -EINVAL; } static int mtk_mdio_init(struct mtk_eth *eth) { struct device_node *mii_np; - int err; + int ret; mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); if (!mii_np) { @@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth) } if (!of_device_is_available(mii_np)) { - err = 0; + ret = -ENODEV; goto err_put_node; } - eth->mii_bus = mdiobus_alloc(); + eth->mii_bus = devm_mdiobus_alloc(eth->dev); if (!eth->mii_bus) { - err = -ENOMEM; + ret = -ENOMEM; goto err_put_node; } @@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth) eth->mii_bus->parent = eth->dev; snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); - err = of_mdiobus_register(eth->mii_bus, mii_np); - if (err) - goto err_free_bus; - - return 0; - -err_free_bus: - mdiobus_free(eth->mii_bus); + ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: of_node_put(mii_np); - eth->mii_bus = NULL; - return err; + return ret; } static void mtk_mdio_cleanup(struct mtk_eth *eth) @@ -322,28 +334,28 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) return; mdiobus_unregister(eth->mii_bus); - of_node_put(eth->mii_bus->dev.of_node); - mdiobus_free(eth->mii_bus); } static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) { + unsigned long flags; u32 val; + spin_lock_irqsave(ð->irq_lock, flags); val = mtk_r32(eth, MTK_QDMA_INT_MASK); mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); - /* flush write */ - mtk_r32(eth, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->irq_lock, flags); } static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) { + unsigned long flags; u32 val; + spin_lock_irqsave(ð->irq_lock, flags); val = mtk_r32(eth, MTK_QDMA_INT_MASK); mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); - /* flush write */ - mtk_r32(eth, MTK_QDMA_INT_MASK); + spin_unlock_irqrestore(ð->irq_lock, flags); } static int mtk_set_mac_address(struct net_device *dev, void *p) @@ -540,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, return &ring->buf[idx]; } -static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) +static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf) { if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { - dma_unmap_single(dev, + dma_unmap_single(eth->dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { - dma_unmap_page(dev, + dma_unmap_page(eth->dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); @@ -570,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; - u32 txd4 = 0; + u32 txd4 = 0, fport; itxd = ring->next_free; if (itxd == ring->last_free) return -ENOMEM; /* set the forward port */ - txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; + fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; + txd4 |= fport; tx_buf = mtk_desc_to_tx_buf(ring, itxd); memset(tx_buf, 0, sizeof(*tx_buf)); @@ -593,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (skb_vlan_tag_present(skb)) txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); - mapped_addr = dma_map_single(&dev->dev, skb->data, + mapped_addr = dma_map_single(eth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) return -ENOMEM; WRITE_ONCE(itxd->txd1, mapped_addr); @@ -621,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, n_desc++; frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); - mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, + mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, frag_map_size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) goto err_dma; if (i == nr_frags - 1 && @@ -635,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(txd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(frag_map_size) | last_frag * TX_DMA_LS0)); - WRITE_ONCE(txd->txd4, 0); + WRITE_ONCE(txd->txd4, fport); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf = mtk_desc_to_tx_buf(ring, txd); @@ -677,7 +690,7 @@ err_dma: tx_buf = mtk_desc_to_tx_buf(ring, itxd); /* unmap dma */ - mtk_tx_unmap(&dev->dev, tx_buf); + mtk_tx_unmap(eth, tx_buf); itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); @@ -798,7 +811,7 @@ drop: } static int mtk_poll_rx(struct napi_struct *napi, int budget, - struct mtk_eth *eth, u32 rx_intr) + struct mtk_eth *eth) { struct mtk_rx_ring *ring = ð->rx_ring; int idx = ring->calc_idx; @@ -834,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, netdev->stats.rx_dropped++; goto release_desc; } - dma_addr = dma_map_single(ð->netdev[mac]->dev, + dma_addr = dma_map_single(eth->dev, new_data + NET_SKB_PAD, ring->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; @@ -847,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, /* receive data */ skb = build_skb(data, ring->frag_size); if (unlikely(!skb)) { - put_page(virt_to_head_page(new_data)); + skb_free_frag(new_data); netdev->stats.rx_dropped++; goto release_desc; } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - dma_unmap_single(&netdev->dev, trxd.rxd1, + dma_unmap_single(eth->dev, trxd.rxd1, ring->buf_size, DMA_FROM_DEVICE); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; @@ -886,22 +899,22 @@ release_desc: } if (done < budget) - mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); return done; } -static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) +static int mtk_poll_tx(struct mtk_eth *eth, int budget) { struct mtk_tx_ring *ring = ð->tx_ring; struct mtk_tx_dma *desc; struct sk_buff *skb; struct mtk_tx_buf *tx_buf; - int total = 0, done[MTK_MAX_DEVS]; + unsigned int done[MTK_MAX_DEVS]; unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; static int condition; - int i; + int total = 0, i; memset(done, 0, sizeof(done)); memset(bytes, 0, sizeof(bytes)); @@ -935,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) done[mac]++; budget--; } - mtk_tx_unmap(eth->dev, tx_buf); + mtk_tx_unmap(eth, tx_buf); ring->last_free = desc; atomic_inc(&ring->free_count); @@ -952,15 +965,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) total += done[i]; } - /* read hw index again make sure no new tx packet */ - if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) - *tx_again = true; - else - mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); - - if (!total) - return 0; - if (mtk_queue_stopped(eth) && (atomic_read(&ring->free_count) > ring->thresh)) mtk_wake_queue(eth); @@ -968,49 +972,75 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) return total; } -static int mtk_poll(struct napi_struct *napi, int budget) +static void mtk_handle_status_irq(struct mtk_eth *eth) { - struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); - u32 status, status2, mask, tx_intr, rx_intr, status_intr; - int tx_done, rx_done; - bool tx_again = false; + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - status2 = mtk_r32(eth, MTK_INT_STATUS2); - tx_intr = MTK_TX_DONE_INT; - rx_intr = MTK_RX_DONE_INT; - status_intr = (MTK_GDM1_AF | MTK_GDM2_AF); - tx_done = 0; - rx_done = 0; - tx_again = 0; + if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { + mtk_stats_update(eth); + mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), + MTK_INT_STATUS2); + } +} - if (status & tx_intr) - tx_done = mtk_poll_tx(eth, budget, &tx_again); +static int mtk_napi_tx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); + u32 status, mask; + int tx_done = 0; - if (status & rx_intr) - rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + tx_done = mtk_poll_tx(eth, budget); - if (unlikely(status2 & status_intr)) { - mtk_stats_update(eth); - mtk_w32(eth, status_intr, MTK_INT_STATUS2); + if (unlikely(netif_msg_intr(eth))) { + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + dev_info(eth->dev, + "done tx %d, intr 0x%08x/0x%x\n", + tx_done, status, mask); } + if (tx_done == budget) + return budget; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + if (status & MTK_TX_DONE_INT) + return budget; + + napi_complete(napi); + mtk_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; +} + +static int mtk_napi_rx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); + u32 status, mask; + int rx_done = 0; + + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS); + rx_done = mtk_poll_rx(napi, budget, eth); + if (unlikely(netif_msg_intr(eth))) { + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); mask = mtk_r32(eth, MTK_QDMA_INT_MASK); - netdev_info(eth->netdev[0], - "done tx %d, rx %d, intr 0x%08x/0x%x\n", - tx_done, rx_done, status, mask); + dev_info(eth->dev, + "done rx %d, intr 0x%08x/0x%x\n", + rx_done, status, mask); } - if (tx_again || rx_done == budget) + if (rx_done == budget) return budget; status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - if (status & (tx_intr | rx_intr)) + if (status & MTK_RX_DONE_INT) return budget; napi_complete(napi); - mtk_irq_enable(eth, tx_intr | rx_intr); + mtk_irq_enable(eth, MTK_RX_DONE_INT); return rx_done; } @@ -1073,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth) if (ring->buf) { for (i = 0; i < MTK_DMA_SIZE; i++) - mtk_tx_unmap(eth->dev, &ring->buf[i]); + mtk_tx_unmap(eth, &ring->buf[i]); kfree(ring->buf); ring->buf = NULL; } @@ -1246,22 +1276,26 @@ static void mtk_tx_timeout(struct net_device *dev) schedule_work(ð->pending_work); } -static irqreturn_t mtk_handle_irq(int irq, void *_eth) +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) { struct mtk_eth *eth = _eth; - u32 status; - status = mtk_r32(eth, MTK_QMTK_INT_STATUS); - if (unlikely(!status)) - return IRQ_NONE; + if (likely(napi_schedule_prep(ð->rx_napi))) { + __napi_schedule(ð->rx_napi); + mtk_irq_disable(eth, MTK_RX_DONE_INT); + } - if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) { - if (likely(napi_schedule_prep(ð->rx_napi))) - __napi_schedule(ð->rx_napi); - } else { - mtk_w32(eth, status, MTK_QMTK_INT_STATUS); + return IRQ_HANDLED; +} + +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + if (likely(napi_schedule_prep(ð->tx_napi))) { + __napi_schedule(ð->tx_napi); + mtk_irq_disable(eth, MTK_TX_DONE_INT); } - mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT)); return IRQ_HANDLED; } @@ -1274,7 +1308,7 @@ static void mtk_poll_controller(struct net_device *dev) u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; mtk_irq_disable(eth, int_mask); - mtk_handle_irq(dev->irq, dev); + mtk_handle_irq_rx(eth->irq[2], dev); mtk_irq_enable(eth, int_mask); } #endif @@ -1310,6 +1344,7 @@ static int mtk_open(struct net_device *dev) if (err) return err; + napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); } @@ -1358,6 +1393,7 @@ static int mtk_stop(struct net_device *dev) return 0; mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + napi_disable(ð->tx_napi); napi_disable(ð->rx_napi); mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); @@ -1395,7 +1431,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth) /* Enable RX VLan Offloading */ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); - err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, + err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0, + dev_name(eth->dev), eth); + if (err) + return err; + err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0, dev_name(eth->dev), eth); if (err) return err; @@ -1411,7 +1451,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth) mtk_w32(eth, 0, MTK_RST_GL); /* FE int grouping */ - mtk_w32(eth, 0, MTK_FE_INT_GRP); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); + mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); for (i = 0; i < 2; i++) { u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); @@ -1457,9 +1501,7 @@ static void mtk_uninit(struct net_device *dev) struct mtk_eth *eth = mac->hw; phy_disconnect(mac->phy_dev); - mtk_mdio_cleanup(eth); mtk_irq_disable(eth, ~0); - free_irq(dev->irq, dev); } static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) @@ -1717,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) goto free_netdev; } spin_lock_init(&mac->hw_stats->stats_lock); + u64_stats_init(&mac->hw_stats->syncp); mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); @@ -1733,10 +1776,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) dev_err(eth->dev, "error bringing up device\n"); goto free_netdev; } - eth->netdev[id]->irq = eth->irq; + eth->netdev[id]->irq = eth->irq[0]; netif_info(eth, probe, eth->netdev[id], "mediatek frame engine at 0x%08lx, irq %d\n", - eth->netdev[id]->base_addr, eth->netdev[id]->irq); + eth->netdev[id]->base_addr, eth->irq[0]); return 0; @@ -1753,6 +1796,7 @@ static int mtk_probe(struct platform_device *pdev) struct mtk_soc_data *soc; struct mtk_eth *eth; int err; + int i; match = of_match_device(of_mtk_match, &pdev->dev); soc = (struct mtk_soc_data *)match->data; @@ -1761,11 +1805,13 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(eth->base)) return PTR_ERR(eth->base); spin_lock_init(ð->page_lock); + spin_lock_init(ð->irq_lock); eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mediatek,ethsys"); @@ -1787,26 +1833,28 @@ static int mtk_probe(struct platform_device *pdev) return PTR_ERR(eth->rstc); } - eth->irq = platform_get_irq(pdev, 0); - if (eth->irq < 0) { - dev_err(&pdev->dev, "no IRQ resource found\n"); - return -ENXIO; + for (i = 0; i < 3; i++) { + eth->irq[i] = platform_get_irq(pdev, i); + if (eth->irq[i] < 0) { + dev_err(&pdev->dev, "no IRQ%d resource found\n", i); + return -ENXIO; + } + } + for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { + eth->clks[i] = devm_clk_get(eth->dev, + mtk_clks_source_name[i]); + if (IS_ERR(eth->clks[i])) { + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) + return -EPROBE_DEFER; + return -ENODEV; + } } - eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); - eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); - eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); - eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); - if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) || - IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif)) - return -ENODEV; - - clk_prepare_enable(eth->clk_ethif); - clk_prepare_enable(eth->clk_esw); - clk_prepare_enable(eth->clk_gp1); - clk_prepare_enable(eth->clk_gp2); + clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); + clk_prepare_enable(eth->clks[MTK_CLK_ESW]); + clk_prepare_enable(eth->clks[MTK_CLK_GP1]); + clk_prepare_enable(eth->clks[MTK_CLK_GP2]); - eth->dev = &pdev->dev; eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); INIT_WORK(ð->pending_work, mtk_pending_work); @@ -1831,7 +1879,9 @@ static int mtk_probe(struct platform_device *pdev) * for NAPI to work */ init_dummy_netdev(ð->dummy_dev); - netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, + MTK_NAPI_WEIGHT); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, MTK_NAPI_WEIGHT); platform_set_drvdata(pdev, eth); @@ -1846,14 +1896,24 @@ err_free_dev: static int mtk_remove(struct platform_device *pdev) { struct mtk_eth *eth = platform_get_drvdata(pdev); + int i; - clk_disable_unprepare(eth->clk_ethif); - clk_disable_unprepare(eth->clk_esw); - clk_disable_unprepare(eth->clk_gp1); - clk_disable_unprepare(eth->clk_gp2); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + } + clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); + clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); + clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); + clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); + + netif_napi_del(ð->tx_napi); netif_napi_del(ð->rx_napi); mtk_cleanup(eth); + mtk_mdio_cleanup(eth); platform_set_drvdata(pdev, NULL); return 0; @@ -1863,13 +1923,13 @@ const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt7623-eth" }, {}, }; +MODULE_DEVICE_TABLE(of, of_mtk_match); static struct platform_driver mtk_driver = { .probe = mtk_probe, .remove = mtk_remove, .driver = { .name = "mtk_soc_eth", - .owner = THIS_MODULE, .of_match_table = of_mtk_match, }, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index a5eb7c623..6e1ade7a2 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -68,6 +68,10 @@ /* Unicast Filter MAC Address Register - High */ #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) +/* PDMA Interrupt grouping registers */ +#define MTK_PDMA_INT_GRP1 0xa50 +#define MTK_PDMA_INT_GRP2 0xa54 + /* QDMA TX Queue Configuration Registers */ #define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) #define QDMA_RES_THRES 4 @@ -125,6 +129,11 @@ #define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) +/* QDMA Interrupt grouping registers */ +#define MTK_QDMA_INT_GRP1 0x1a20 +#define MTK_QDMA_INT_GRP2 0x1a24 +#define MTK_RLS_DONE_INT BIT(0) + /* QDMA Interrupt Status Register */ #define MTK_QDMA_INT_MASK 0x1A1C @@ -281,6 +290,17 @@ enum mtk_tx_flags { MTK_TX_FLAGS_PAGE0 = 0x02, }; +/* This enum allows us to identify how the clock is defined on the array of the + * clock in the order + */ +enum mtk_clks_map { + MTK_CLK_ETHIF, + MTK_CLK_ESW, + MTK_CLK_GP1, + MTK_CLK_GP2, + MTK_CLK_MAX +}; + /* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at * by the TX descriptor s * @skb: The SKB pointer of the packet being sent @@ -356,14 +376,12 @@ struct mtk_rx_ring { * @dma_refcnt: track how many netdevs are using the DMA engine * @tx_ring: Pointer to the memore holding info about the TX ring * @rx_ring: Pointer to the memore holding info about the RX ring - * @rx_napi: The NAPI struct + * @tx_napi: The TX NAPI struct + * @rx_napi: The RX NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring * @phy_scratch_ring: physical address of scratch_ring * @scratch_head: The scratch memory that scratch_ring points to. - * @clk_ethif: The ethif clock - * @clk_esw: The switch clock - * @clk_gp1: The gmac1 clock - * @clk_gp2: The gmac2 clock + * @clks: clock array for all clocks required * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring */ @@ -373,10 +391,11 @@ struct mtk_eth { void __iomem *base; struct reset_control *rstc; spinlock_t page_lock; + spinlock_t irq_lock; struct net_device dummy_dev; struct net_device *netdev[MTK_MAX_DEVS]; struct mtk_mac *mac[MTK_MAX_DEVS]; - int irq; + int irq[3]; u32 msg_enable; unsigned long sysclk; struct regmap *ethsys; @@ -384,14 +403,13 @@ struct mtk_eth { atomic_t dma_refcnt; struct mtk_tx_ring tx_ring; struct mtk_rx_ring rx_ring; + struct napi_struct tx_napi; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; dma_addr_t phy_scratch_ring; void *scratch_head; - struct clk *clk_ethif; - struct clk *clk_esw; - struct clk *clk_gp1; - struct clk *clk_gp2; + struct clk *clks[MTK_CLK_MAX]; + struct mii_bus *mii_bus; struct work_struct pending_work; }; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 9ca3734eb..5098e7f21 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -24,13 +24,6 @@ config MLX4_EN_DCB If unsure, set to Y -config MLX4_EN_VXLAN - bool "VXLAN offloads Support" - default y - depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m) - ---help--- - Say Y here if you want to use VXLAN offloads in the driver. - config MLX4_CORE tristate depends on PCI diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c index f01918c63..b04760a50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c @@ -37,6 +37,11 @@ #include "mlx4_en.h" #include "fw_qos.h" +enum { + MLX4_CEE_STATE_DOWN = 0, + MLX4_CEE_STATE_UP = 1, +}; + /* Definitions for QCN */ @@ -80,13 +85,205 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics { __be32 reserved3[4]; }; +static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + switch (capid) { + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_DCBX: + *cap = priv->dcbx_cap; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 1 << mlx4_max_tc(priv->mdev->dev); + break; + default: + *cap = false; + break; + } + + return 0; +} + +static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + return priv->cee_config.pfc_state; +} + +static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + priv->cee_config.pfc_state = state; +} + +static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, + u8 *setting) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + *setting = priv->cee_config.dcb_pfc[priority]; +} + +static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, + u8 setting) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + priv->cee_config.dcb_pfc[priority] = setting; + priv->cee_config.pfc_state = true; +} + +static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + + if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) + return -EINVAL; + + if (tcid == DCB_NUMTCS_ATTR_PFC) + *num = mlx4_max_tc(priv->mdev->dev); + else + *num = 0; + + return 0; +} + +static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (priv->cee_config.pfc_state) { + int tc; + + priv->prof->rx_pause = 0; + priv->prof->tx_pause = 0; + for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { + u8 tc_mask = 1 << tc; + + switch (priv->cee_config.dcb_pfc[tc]) { + case pfc_disabled: + priv->prof->tx_ppp &= ~tc_mask; + priv->prof->rx_ppp &= ~tc_mask; + break; + case pfc_enabled_full: + priv->prof->tx_ppp |= tc_mask; + priv->prof->rx_ppp |= tc_mask; + break; + case pfc_enabled_tx: + priv->prof->tx_ppp |= tc_mask; + priv->prof->rx_ppp &= ~tc_mask; + break; + case pfc_enabled_rx: + priv->prof->tx_ppp &= ~tc_mask; + priv->prof->rx_ppp |= tc_mask; + break; + default: + break; + } + } + en_dbg(DRV, priv, "Set pfc on\n"); + } else { + priv->prof->rx_pause = 1; + priv->prof->tx_pause = 1; + en_dbg(DRV, priv, "Set pfc off\n"); + } + + if (mlx4_SET_PORT_general(mdev->dev, priv->port, + priv->rx_skb_size + ETH_FCS_LEN, + priv->prof->tx_pause, + priv->prof->tx_ppp, + priv->prof->rx_pause, + priv->prof->rx_ppp)) { + en_err(priv, "Failed setting pause params\n"); + return 1; + } + + return 0; +} + +static u8 mlx4_en_dcbnl_get_state(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED) + return MLX4_CEE_STATE_UP; + + return MLX4_CEE_STATE_DOWN; +} + +static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + int num_tcs = 0; + + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 1; + + if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED)) + return 0; + + if (state) { + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + num_tcs = IEEE_8021QAZ_MAX_TCS; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + } + + if (mlx4_en_setup_tc(dev, num_tcs)) + return 1; + + return 0; +} + +/* On success returns a non-zero 802.1p user priority bitmap + * otherwise returns 0 as the invalid user priority bitmap to + * indicate an error. + */ +static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return 0; + + return dcb_getapp(netdev, &app); +} + +static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype, + u16 id, u8 up) +{ + struct mlx4_en_priv *priv = netdev_priv(netdev); + struct dcb_app app; + + if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return -EINVAL; + + memset(&app, 0, sizeof(struct dcb_app)); + app.selector = idtype; + app.protocol = id; + app.priority = up; + + return dcb_setapp(netdev, &app); +} + static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct mlx4_en_priv *priv = netdev_priv(dev); struct ieee_ets *my_ets = &priv->ets; - /* No IEEE PFC settings available */ if (!my_ets) return -EINVAL; @@ -237,18 +434,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) { - return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + struct mlx4_en_priv *priv = netdev_priv(dev); + + return priv->dcbx_cap; } static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) { + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + + if (mode == priv->dcbx_cap) + return 0; + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || - (mode & DCB_CAP_DCBX_VER_CEE) || - !(mode & DCB_CAP_DCBX_VER_IEEE) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && + (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) - return 1; + goto err; + + priv->dcbx_cap = mode; + + ets.ets_cap = IEEE_8021QAZ_MAX_TCS; + pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) + goto err; + if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) + goto err; + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + if (mlx4_en_dcbnl_set_all(dev)) + goto err; + } else { + if (mlx4_en_dcbnl_ieee_setets(dev, &ets)) + goto err; + if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc)) + goto err; + if (mlx4_en_setup_tc(dev, 0)) + goto err; + } return 0; +err: + return 1; } #define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */ @@ -463,24 +693,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev, } const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = { - .ieee_getets = mlx4_en_dcbnl_ieee_getets, - .ieee_setets = mlx4_en_dcbnl_ieee_setets, - .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, - .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, - .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, - .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + .ieee_getets = mlx4_en_dcbnl_ieee_getets, + .ieee_setets = mlx4_en_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate, + .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, + .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, + .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, + .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + + .getstate = mlx4_en_dcbnl_get_state, + .setstate = mlx4_en_dcbnl_set_state, + .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, + .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, + .setall = mlx4_en_dcbnl_set_all, + .getcap = mlx4_en_dcbnl_getcap, + .getnumtcs = mlx4_en_dcbnl_getnumtcs, + .getpfcstate = mlx4_en_dcbnl_getpfcstate, + .setpfcstate = mlx4_en_dcbnl_setpfcstate, + .getapp = mlx4_en_dcbnl_getapp, + .setapp = mlx4_en_dcbnl_setapp, .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, - .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn, - .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn, - .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats, }; const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = { .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc, .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc, + .setstate = mlx4_en_dcbnl_set_state, + .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg, + .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg, + .setall = mlx4_en_dcbnl_set_all, + .getnumtcs = mlx4_en_dcbnl_getnumtcs, + .getpfcstate = mlx4_en_dcbnl_getpfcstate, + .setpfcstate = mlx4_en_dcbnl_setpfcstate, + .getapp = mlx4_en_dcbnl_getapp, + .setapp = mlx4_en_dcbnl_setapp, + .getdcbx = mlx4_en_dcbnl_getdcbx, .setdcbx = mlx4_en_dcbnl_setdcbx, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 44cf16d01..bdda17d2e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1112,7 +1112,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); - return priv->rx_ring_num; + return rounddown_pow_of_two(priv->rx_ring_num); } static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev) @@ -1146,19 +1146,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key, u8 *hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); - struct mlx4_en_rss_map *rss_map = &priv->rss_map; - int rss_rings; - size_t n = priv->rx_ring_num; + u32 n = mlx4_en_get_rxfh_indir_size(dev); + u32 i, rss_rings; int err = 0; - rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num; - rss_rings = 1 << ilog2(rss_rings); + rss_rings = priv->prof->rss_rings ?: n; + rss_rings = rounddown_pow_of_two(rss_rings); - while (n--) { + for (i = 0; i < n; i++) { if (!ring_index) break; - ring_index[n] = rss_map->qps[n % rss_rings].qpn - - rss_map->base_qpn; + ring_index[i] = i % rss_rings; } if (key) memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE); @@ -1171,6 +1169,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, const u8 *key, const u8 hfunc) { struct mlx4_en_priv *priv = netdev_priv(dev); + u32 n = mlx4_en_get_rxfh_indir_size(dev); struct mlx4_en_dev *mdev = priv->mdev; int port_up = 0; int err = 0; @@ -1180,18 +1179,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index, /* Calculate RSS table size and make sure flows are spread evenly * between rings */ - for (i = 0; i < priv->rx_ring_num; i++) { + for (i = 0; i < n; i++) { if (!ring_index) - continue; + break; if (i > 0 && !ring_index[i] && !rss_rings) rss_rings = i; - if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num))) + if (ring_index[i] != (i % (rss_rings ?: n))) return -EINVAL; } if (!rss_rings) - rss_rings = priv->rx_ring_num; + rss_rings = n; /* RSS table size must be an order of 2 */ if (!is_power_of_2(rss_rings)) @@ -1730,6 +1729,12 @@ static int mlx4_en_set_channels(struct net_device *dev, !channel->tx_count || !channel->rx_count) return -EINVAL; + if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) { + en_err(priv, "Minimum %d tx channels required with XDP on\n", + priv->xdp_ring_num / MLX4_EN_NUM_UP + 1); + return -EINVAL; + } + tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; @@ -1751,7 +1756,8 @@ static int mlx4_en_set_channels(struct net_device *dev, mlx4_en_safe_replace_resources(priv, tmp); - netif_set_real_num_tx_queues(dev, priv->tx_ring_num); + netif_set_real_num_tx_queues(dev, priv->tx_ring_num - + priv->xdp_ring_num); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); if (dev->num_tc) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 8359e9e51..fedb82927 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -31,6 +31,7 @@ * */ +#include <linux/bpf.h> #include <linux/etherdevice.h> #include <linux/tcp.h> #include <linux/if_vlan.h> @@ -67,6 +68,18 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) offset += priv->num_tx_rings_p_up; } +#ifdef CONFIG_MLX4_EN_DCB + if (!mlx4_is_slave(priv->mdev->dev)) { + if (up) { + if (priv->dcbx_cap) + priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; + } else { + priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED; + priv->cee_config.pfc_state = false; + } + } +#endif /* CONFIG_MLX4_EN_DCB */ + return 0; } @@ -1201,8 +1214,8 @@ static void mlx4_en_netpoll(struct net_device *dev) struct mlx4_en_cq *cq; int i; - for (i = 0; i < priv->rx_ring_num; i++) { - cq = priv->rx_cq[i]; + for (i = 0; i < priv->tx_ring_num; i++) { + cq = priv->tx_cq[i]; napi_schedule(&cq->napi); } } @@ -1510,6 +1523,24 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx) free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask); } +static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv, + int tx_ring_idx) +{ + struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx]; + int rr_index; + + rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx; + if (rr_index >= 0) { + tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc; + tx_ring->recycle_ring = priv->rx_ring[rr_index]; + en_dbg(DRV, priv, + "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n", + tx_ring_idx, rr_index); + } else { + tx_ring->recycle_ring = NULL; + } +} + int mlx4_en_start_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1632,6 +1663,8 @@ int mlx4_en_start_port(struct net_device *dev) } tx_ring->tx_queue = netdev_get_tx_queue(dev, i); + mlx4_en_init_recycle_ring(priv, i); + /* Arm CQ for TX completions */ mlx4_en_arm_cq(priv, cq); @@ -1696,10 +1729,9 @@ int mlx4_en_start_port(struct net_device *dev) /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->rx_mode_task); -#ifdef CONFIG_MLX4_EN_VXLAN if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) - vxlan_get_rx_port(dev); -#endif + udp_tunnel_get_rx_info(dev); + priv->port_up = true; netif_tx_start_all_queues(dev); netif_device_attach(dev); @@ -2177,6 +2209,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) en_err(priv, "Bad MTU size:%d.\n", new_mtu); return -EPERM; } + if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) { + en_err(priv, "MTU size:%d requires frags but XDP running\n", + new_mtu); + return -EOPNOTSUPP; + } dev->mtu = new_mtu; if (netif_running(dev)) { @@ -2434,7 +2471,6 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev, return 0; } -#ifdef CONFIG_MLX4_EN_VXLAN static void mlx4_en_add_vxlan_offloads(struct work_struct *work) { int ret; @@ -2484,15 +2520,19 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) } static void mlx4_en_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port; - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return; current_port = priv->vxlan_port; @@ -2507,15 +2547,19 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev, } static void mlx4_en_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx4_en_priv *priv = netdev_priv(dev); + __be16 port = ti->port; __be16 current_port; - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) return; - if (sa_family == AF_INET6) + if (ti->sa_family != AF_INET) + return; + + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) return; current_port = priv->vxlan_port; @@ -2550,7 +2594,6 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, return features; } -#endif static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate) { @@ -2579,6 +2622,103 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m return err; } +static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + struct bpf_prog *old_prog; + int xdp_ring_num; + int port_up = 0; + int err; + int i; + + xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0; + + /* No need to reconfigure buffers when simply swapping the + * program for a new one. + */ + if (priv->xdp_ring_num == xdp_ring_num) { + if (prog) { + prog = bpf_prog_add(prog, priv->rx_ring_num - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + for (i = 0; i < priv->rx_ring_num; i++) { + /* This xchg is paired with READ_ONCE in the fastpath */ + old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + } + return 0; + } + + if (priv->num_frags > 1) { + en_err(priv, "Cannot set XDP if MTU requires multiple frags\n"); + return -EOPNOTSUPP; + } + + if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) { + en_err(priv, + "Minimum %d tx channels required to run XDP\n", + (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP); + return -EINVAL; + } + + if (prog) { + prog = bpf_prog_add(prog, priv->rx_ring_num - 1); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + + mutex_lock(&mdev->state_lock); + if (priv->port_up) { + port_up = 1; + mlx4_en_stop_port(dev, 1); + } + + priv->xdp_ring_num = xdp_ring_num; + netif_set_real_num_tx_queues(dev, priv->tx_ring_num - + priv->xdp_ring_num); + + for (i = 0; i < priv->rx_ring_num; i++) { + old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + } + + if (port_up) { + err = mlx4_en_start_port(dev); + if (err) { + en_err(priv, "Failed starting port %d for XDP change\n", + priv->port); + queue_work(mdev->workqueue, &priv->watchdog_task); + } + } + + mutex_unlock(&mdev->state_lock); + return 0; +} + +static bool mlx4_xdp_attached(struct net_device *dev) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + return !!priv->xdp_ring_num; +} + +static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return mlx4_xdp_set(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_attached = mlx4_xdp_attached(dev); + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops mlx4_netdev_ops = { .ndo_open = mlx4_en_open, .ndo_stop = mlx4_en_close, @@ -2603,12 +2743,11 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, -#ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, -#endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, + .ndo_xdp = mlx4_xdp, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2641,12 +2780,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, -#ifdef CONFIG_MLX4_EN_VXLAN - .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, - .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, + .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port, + .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, -#endif .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, + .ndo_xdp = mlx4_xdp, }; struct mlx4_en_bond { @@ -2936,10 +3074,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); -#ifdef CONFIG_MLX4_EN_VXLAN INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads); INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads); -#endif #ifdef CONFIG_RFS_ACCEL INIT_LIST_HEAD(&priv->filters); spin_lock_init(&priv->filters_lock); @@ -2979,6 +3115,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->msg_enable = MLX4_EN_MSG_LEVEL; #ifdef CONFIG_MLX4_EN_DCB if (!mlx4_is_slave(priv->mdev->dev)) { + priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | + DCB_CAP_DCBX_VER_IEEE; + priv->flags |= MLX4_EN_DCB_ENABLED; + priv->cee_config.pfc_state = false; + + for (i = 0; i < MLX4_EN_NUM_UP; i++) + priv->cee_config.dcb_pfc[i] = pfc_disabled; + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { dev->dcbnl_ops = &mlx4_en_dcbnl_ops; } else { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 99b5407f2..2040dad86 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -32,6 +32,7 @@ */ #include <net/busy_poll.h> +#include <linux/bpf.h> #include <linux/mlx4/cq.h> #include <linux/slab.h> #include <linux/mlx4/qp.h> @@ -57,7 +58,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, struct page *page; dma_addr_t dma; - for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) { + for (order = frag_info->order; ;) { gfp_t gfp = _gfp; if (order) @@ -70,7 +71,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, return -ENOMEM; } dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order, - PCI_DMA_FROMDEVICE); + frag_info->dma_dir); if (dma_mapping_error(priv->ddev, dma)) { put_page(page); return -ENOMEM; @@ -124,7 +125,8 @@ out: while (i--) { if (page_alloc[i].page != ring_alloc[i].page) { dma_unmap_page(priv->ddev, page_alloc[i].dma, - page_alloc[i].page_size, PCI_DMA_FROMDEVICE); + page_alloc[i].page_size, + priv->frag_info[i].dma_dir); page = page_alloc[i].page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc[i].page_size / @@ -145,7 +147,7 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv, if (next_frag_end > frags[i].page_size) dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size, - PCI_DMA_FROMDEVICE); + frag_info->dma_dir); if (frags[i].page) put_page(frags[i].page); @@ -176,7 +178,8 @@ out: page_alloc = &ring->page_alloc[i]; dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, + priv->frag_info[i].dma_dir); page = page_alloc->page; /* Revert changes done by mlx4_alloc_pages */ page_ref_sub(page, page_alloc->page_size / @@ -201,7 +204,7 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv, i, page_count(page_alloc->page)); dma_unmap_page(priv->ddev, page_alloc->dma, - page_alloc->page_size, PCI_DMA_FROMDEVICE); + page_alloc->page_size, frag_info->dma_dir); while (page_alloc->page_offset + frag_info->frag_stride < page_alloc->page_size) { put_page(page_alloc->page); @@ -244,6 +247,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_alloc *frags = ring->rx_info + (index << priv->log_rx_info); + if (ring->page_cache.index > 0) { + frags[0] = ring->page_cache.buf[--ring->page_cache.index]; + rx_desc->data[0].addr = cpu_to_be64(frags[0].dma); + return 0; + } + return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp); } @@ -502,13 +511,35 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv) } } +/* When the rx ring is running in page-per-packet mode, a released frame can go + * directly into a small cache, to avoid unmapping or touching the page + * allocator. In bpf prog performance scenarios, buffers are either forwarded + * or dropped, never converted to skbs, so every page can come directly from + * this cache when it is sized to be a multiple of the napi budget. + */ +bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frame) +{ + struct mlx4_en_page_cache *cache = &ring->page_cache; + + if (cache->index >= MLX4_EN_CACHE_SIZE) + return false; + + cache->buf[cache->index++] = *frame; + return true; +} + void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring **pring, u32 size, u16 stride) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring = *pring; + struct bpf_prog *old_prog; + old_prog = READ_ONCE(ring->xdp_prog); + if (old_prog) + bpf_prog_put(old_prog); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; @@ -519,6 +550,16 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) { + int i; + + for (i = 0; i < ring->page_cache.index; i++) { + struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i]; + + dma_unmap_page(priv->ddev, frame->dma, frame->page_size, + priv->frag_info[0].dma_dir); + put_page(frame->page); + } + ring->page_cache.index = 0; mlx4_en_free_rx_buf(priv, ring); if (ring->stride <= TXBB_SIZE) ring->buf -= TXBB_SIZE; @@ -740,7 +781,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; struct mlx4_en_rx_alloc *frags; struct mlx4_en_rx_desc *rx_desc; + struct bpf_prog *xdp_prog; + int doorbell_pending; struct sk_buff *skb; + int tx_index; int index; int nr; unsigned int length; @@ -756,6 +800,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (budget <= 0) return polled; + xdp_prog = READ_ONCE(ring->xdp_prog); + doorbell_pending = 0; + tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring; + /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx * descriptor offset can be deduced from the CQE index instead of * reading 'cqe->index' */ @@ -832,6 +880,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) && (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL)); + /* A bpf program gets first chance to drop the packet. It may + * read bytes but not past the end of the frag. + */ + if (xdp_prog) { + struct xdp_buff xdp; + dma_addr_t dma; + u32 act; + + dma = be64_to_cpu(rx_desc->data[0].addr); + dma_sync_single_for_cpu(priv->ddev, dma, + priv->frag_info[0].frag_size, + DMA_FROM_DEVICE); + + xdp.data = page_address(frags[0].page) + + frags[0].page_offset; + xdp.data_end = xdp.data + length; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + if (!mlx4_en_xmit_frame(frags, dev, + length, tx_index, + &doorbell_pending)) + goto consumed; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + case XDP_DROP: + if (mlx4_en_rx_recycle(ring, frags)) + goto consumed; + goto next; + } + } + if (likely(dev->features & NETIF_F_RXCSUM)) { if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | MLX4_CQE_STATUS_UDP)) { @@ -983,6 +1068,7 @@ next: for (nr = 0; nr < priv->num_frags; nr++) mlx4_en_free_frag(priv, frags, nr); +consumed: ++cq->mcq.cons_index; index = (cq->mcq.cons_index) & ring->size_mask; cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; @@ -991,6 +1077,9 @@ next: } out: + if (doorbell_pending) + mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]); + AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled); mlx4_cq_set_ci(&cq->mcq); wmb(); /* ensure HW sees CQ consumer before we post new buffers */ @@ -1058,22 +1147,35 @@ static const int frag_sizes[] = { void mlx4_en_calc_rx_buf(struct net_device *dev) { + enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE; struct mlx4_en_priv *priv = netdev_priv(dev); - /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple - * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). - */ - int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN); + int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu); + int order = MLX4_EN_ALLOC_PREFER_ORDER; + u32 align = SMP_CACHE_BYTES; int buf_size = 0; int i = 0; + /* bpf requires buffers to be set up as 1 packet per page. + * This only works when num_frags == 1. + */ + if (priv->xdp_ring_num) { + dma_dir = PCI_DMA_BIDIRECTIONAL; + /* This will gain efficient xdp frame recycling at the expense + * of more costly truesize accounting + */ + align = PAGE_SIZE; + order = 0; + } + while (buf_size < eff_mtu) { + priv->frag_info[i].order = order; priv->frag_info[i].frag_size = (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; priv->frag_info[i].frag_stride = - ALIGN(priv->frag_info[i].frag_size, - SMP_CACHE_BYTES); + ALIGN(priv->frag_info[i].frag_size, align); + priv->frag_info[i].dma_dir = dma_dir; buf_size += priv->frag_info[i].frag_size; i++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 76aa4d271..e2509bba3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -196,6 +196,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->last_nr_txbb = 1; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); + ring->free_tx_desc = mlx4_en_free_tx_desc; ring->qp_state = MLX4_QP_STATE_RST; ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); @@ -265,10 +266,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, } -static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, - struct mlx4_en_tx_ring *ring, - int index, u8 owner, u64 timestamp, - int napi_mode) +u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; @@ -344,6 +345,27 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, return tx_info->nr_txbb; } +u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode) +{ + struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; + struct mlx4_en_rx_alloc frame = { + .page = tx_info->page, + .dma = tx_info->map0_dma, + .page_offset = 0, + .page_size = PAGE_SIZE, + }; + + if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) { + dma_unmap_page(priv->ddev, tx_info->map0_dma, + PAGE_SIZE, priv->frag_info[0].dma_dir); + put_page(tx_info->page); + } + + return tx_info->nr_txbb; +} int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) { @@ -362,7 +384,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) } while (ring->cons != ring->prod) { - ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, + ring->last_nr_txbb = ring->free_tx_desc(priv, ring, ring->cons & ring->size_mask, !!(ring->cons & ring->size), 0, 0 /* Non-NAPI caller */); @@ -444,7 +466,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ - last_nr_txbb = mlx4_en_free_tx_desc( + last_nr_txbb = ring->free_tx_desc( priv, ring, ring_index, !!((ring_cons + txbbs_skipped) & ring->size), timestamp, napi_budget); @@ -476,6 +498,9 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + if (ring->free_tx_desc == mlx4_en_recycle_tx_desc) + return done < budget; + netdev_tx_completed_queue(ring->tx_queue, packets, bytes); /* Wakeup Tx queue if this stopped, and ring is not full. @@ -631,8 +656,7 @@ static int get_real_size(const struct sk_buff *skb, static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, const struct sk_buff *skb, const struct skb_shared_info *shinfo, - int real_size, u16 *vlan_tag, - int tx_ind, void *fragptr) + void *fragptr) { struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; @@ -700,10 +724,66 @@ static void mlx4_bf_copy(void __iomem *dst, const void *src, __iowrite64_copy(dst, src, bytecnt / 8); } +void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring) +{ + wmb(); + /* Since there is no iowrite*_native() that writes the + * value as is, without byteswapping - using the one + * the doesn't do byteswapping in the relevant arch + * endianness. + */ +#if defined(__LITTLE_ENDIAN) + iowrite32( +#else + iowrite32be( +#endif + ring->doorbell_qpn, + ring->bf.uar->map + MLX4_SEND_DOORBELL); +} + +static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring, + struct mlx4_en_tx_desc *tx_desc, + union mlx4_wqe_qpn_vlan qpn_vlan, + int desc_size, int bf_index, + __be32 op_own, bool bf_ok, + bool send_doorbell) +{ + tx_desc->ctrl.qpn_vlan = qpn_vlan; + + if (bf_ok) { + op_own |= htonl((bf_index & 0xffff) << 8); + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + + wmb(); + + mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, + desc_size); + + wmb(); + + ring->bf.offset ^= ring->bf.buf_size; + } else { + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + if (send_doorbell) + mlx4_en_xmit_doorbell(ring); + else + ring->xmit_more++; + } +} + netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct mlx4_en_priv *priv = netdev_priv(dev); + union mlx4_wqe_qpn_vlan qpn_vlan = {}; struct device *ddev = priv->ddev; struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_desc *tx_desc; @@ -715,7 +795,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) int real_size; u32 index, bf_index; __be32 op_own; - u16 vlan_tag = 0; u16 vlan_proto = 0; int i_frag; int lso_header_size; @@ -725,6 +804,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) bool stop_queue; bool inline_ok; u32 ring_cons; + bool bf_ok; tx_ind = skb_get_queue_mapping(skb); ring = priv->tx_ring[tx_ind]; @@ -738,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); if (unlikely(!real_size)) - goto tx_drop; + goto tx_drop_count; /* Align descriptor to TXBB size */ desc_size = ALIGN(real_size, TXBB_SIZE); @@ -746,12 +826,20 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { if (netif_msg_tx_err(priv)) en_warn(priv, "Oversized header or SG list\n"); - goto tx_drop; + goto tx_drop_count; } + bf_ok = ring->bf_enabled; if (skb_vlan_tag_present(skb)) { - vlan_tag = skb_vlan_tag_get(skb); + qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb)); vlan_proto = be16_to_cpu(skb->vlan_proto); + if (vlan_proto == ETH_P_8021AD) + qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; + else if (vlan_proto == ETH_P_8021Q) + qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; + else + qpn_vlan.ins_vlan = 0; + bf_ok = false; } netdev_txq_bql_enqueue_prefetchw(ring->tx_queue); @@ -771,6 +859,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) else { tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf; bounce = true; + bf_ok = false; } /* Save skb in tx_info ring */ @@ -907,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); if (tx_info->inl) - build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag, - tx_ind, fragptr); + build_inline_wqe(tx_desc, skb, shinfo, fragptr); if (skb->encapsulation) { union { @@ -946,60 +1034,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) real_size = (real_size / 16) & 0x3f; - if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && - !skb_vlan_tag_present(skb) && send_doorbell) { - tx_desc->ctrl.bf_qpn = ring->doorbell_qpn | - cpu_to_be32(real_size); + bf_ok &= desc_size <= MAX_BF && send_doorbell; - op_own |= htonl((bf_index & 0xffff) << 8); - /* Ensure new descriptor hits memory - * before setting ownership of this descriptor to HW - */ - dma_wmb(); - tx_desc->ctrl.owner_opcode = op_own; - - wmb(); - - mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl, - desc_size); - - wmb(); - - ring->bf.offset ^= ring->bf.buf_size; - } else { - tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag); - if (vlan_proto == ETH_P_8021AD) - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN; - else if (vlan_proto == ETH_P_8021Q) - tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN; - else - tx_desc->ctrl.ins_vlan = 0; + if (bf_ok) + qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size); + else + qpn_vlan.fence_size = real_size; - tx_desc->ctrl.fence_size = real_size; - - /* Ensure new descriptor hits memory - * before setting ownership of this descriptor to HW - */ - dma_wmb(); - tx_desc->ctrl.owner_opcode = op_own; - if (send_doorbell) { - wmb(); - /* Since there is no iowrite*_native() that writes the - * value as is, without byteswapping - using the one - * the doesn't do byteswapping in the relevant arch - * endianness. - */ -#if defined(__LITTLE_ENDIAN) - iowrite32( -#else - iowrite32be( -#endif - ring->doorbell_qpn, - ring->bf.uar->map + MLX4_SEND_DOORBELL); - } else { - ring->xmit_more++; - } - } + mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index, + op_own, bf_ok, send_doorbell); if (unlikely(stop_queue)) { /* If queue was emptied after the if (stop_queue) , and before @@ -1028,9 +1071,114 @@ tx_drop_unmap: PCI_DMA_TODEVICE); } +tx_drop_count: + ring->tx_dropped++; tx_drop: dev_kfree_skb_any(skb); - ring->tx_dropped++; return NETDEV_TX_OK; } +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, + struct net_device *dev, unsigned int length, + int tx_ind, int *doorbell_pending) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + union mlx4_wqe_qpn_vlan qpn_vlan = {}; + struct mlx4_en_tx_ring *ring; + struct mlx4_en_tx_desc *tx_desc; + struct mlx4_wqe_data_seg *data; + struct mlx4_en_tx_info *tx_info; + int index, bf_index; + bool send_doorbell; + int nr_txbb = 1; + bool stop_queue; + dma_addr_t dma; + int real_size; + __be32 op_own; + u32 ring_cons; + bool bf_ok; + + BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE, + "mlx4_en_xmit_frame requires minimum size tx desc"); + + ring = priv->tx_ring[tx_ind]; + + if (!priv->port_up) + goto tx_drop; + + if (mlx4_en_is_tx_ring_full(ring)) + goto tx_drop_count; + + /* fetch ring->cons far ahead before needing it to avoid stall */ + ring_cons = READ_ONCE(ring->cons); + + index = ring->prod & ring->size_mask; + tx_info = &ring->tx_info[index]; + + bf_ok = ring->bf_enabled; + + /* Track current inflight packets for performance analysis */ + AVG_PERF_COUNTER(priv->pstats.inflight_avg, + (u32)(ring->prod - ring_cons - 1)); + + bf_index = ring->prod; + tx_desc = ring->buf + index * TXBB_SIZE; + data = &tx_desc->data; + + dma = frame->dma; + + tx_info->page = frame->page; + frame->page = NULL; + tx_info->map0_dma = dma; + tx_info->map0_byte_count = length; + tx_info->nr_txbb = nr_txbb; + tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); + tx_info->data_offset = (void *)data - (void *)tx_desc; + tx_info->ts_requested = 0; + tx_info->nr_maps = 1; + tx_info->linear = 1; + tx_info->inl = 0; + + dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE); + + data->addr = cpu_to_be64(dma); + data->lkey = ring->mr_key; + dma_wmb(); + data->byte_count = cpu_to_be32(length); + + /* tx completion can avoid cache line miss for common cases */ + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + + op_own = cpu_to_be32(MLX4_OPCODE_SEND) | + ((ring->prod & ring->size) ? + cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); + + ring->packets++; + ring->bytes += tx_info->nr_bytes; + AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length); + + ring->prod += nr_txbb; + + stop_queue = mlx4_en_is_tx_ring_full(ring); + send_doorbell = stop_queue || + *doorbell_pending > MLX4_EN_DOORBELL_BUDGET; + bf_ok &= send_doorbell; + + real_size = ((CTRL_SIZE + nr_txbb * DS_SIZE) / 16) & 0x3f; + + if (bf_ok) + qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size); + else + qpn_vlan.fence_size = real_size; + + mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, bf_index, + op_own, bf_ok, send_doorbell); + *doorbell_pending = send_doorbell ? 0 : *doorbell_pending + 1; + + return NETDEV_TX_OK; + +tx_drop_count: + ring->tx_dropped++; +tx_drop: + return NETDEV_TX_BUSY; +} diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index f61397745..cf8f8a72a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) return 0; err_out_unmap: - while (i >= 0) - mlx4_free_eq(dev, &priv->eq_table.eq[i--]); + while (i > 0) + mlx4_free_eq(dev, &priv->eq_table.eq[--i]); #ifdef CONFIG_RFS_ACCEL for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_priv(dev)->port[i].rmap) { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index e97094598..d728704d0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -721,6 +721,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c +#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d #define QUERY_DEV_CAP_VXLAN 0x9e #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0 @@ -935,6 +936,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; if (field32 & (1 << 7)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; + MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT); + if (field32 & (1 << 17)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT; MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; @@ -1128,6 +1132,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c port_cap->max_pkeys = 1 << (field & 0xf); MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET); port_cap->max_vl = field & 0xf; + port_cap->max_tc_eth = field >> 4; MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET); port_cap->log_max_macs = field & 0xf; port_cap->log_max_vlans = field >> 4; @@ -2456,6 +2461,42 @@ int mlx4_NOP(struct mlx4_dev *dev) MLX4_CMD_NATIVE); } +int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier, + const u32 offset[], + u32 value[], size_t array_len, u8 port) +{ + struct mlx4_cmd_mailbox *mailbox; + u32 *outbox; + size_t i; + int ret; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + outbox = mailbox->buf; + + ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier, + MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (ret) + goto out; + + for (i = 0; i < array_len; i++) { + if (offset[i] > MLX4_MAILBOX_SIZE) { + ret = -EINVAL; + goto out; + } + + MLX4_GET(value[i], outbox, offset[i]); + } + +out: + mlx4_free_cmd_mailbox(dev, mailbox); + return ret; +} +EXPORT_SYMBOL(mlx4_query_diag_counters); + int mlx4_get_phys_port_id(struct mlx4_dev *dev) { u8 port; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 7ea258af6..cdbd76f10 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -53,6 +53,7 @@ struct mlx4_port_cap { int ib_mtu; int max_port_width; int max_vl; + int max_tc_eth; int max_gids; int max_pkeys; u64 def_mac; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index dec77d6f0..0e8b7c449 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -147,7 +147,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) if (enable) { dev->flags |= MLX4_FLAG_BONDED; } else { - ret = mlx4_virt2phy_port_map(dev, 1, 2); + ret = mlx4_virt2phy_port_map(dev, 1, 2); if (ret) { mlx4_err(dev, "Fail to reset port map\n"); return ret; @@ -218,6 +218,9 @@ void mlx4_unregister_device(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_interface *intf; + if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)) + return; + mlx4_stop_catas_poll(dev); mutex_lock(&intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 546fab0ec..7183ac413 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port, dev->caps.pkey_table_len[port] = port_cap->max_pkeys; dev->caps.port_width_cap[port] = port_cap->max_port_width; dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; + dev->caps.max_tc_eth = port_cap->max_tc_eth; dev->caps.def_mac[port] = port_cap->def_mac; dev->caps.supported_type[port] = port_cap->supported_port_types; dev->caps.suggested_type[port] = port_cap->suggested_type; @@ -2599,7 +2600,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) err = mlx4_init_uar_table(dev); if (err) { mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); - return err; + return err; } err = mlx4_uar_alloc(dev, &priv->driver_uar); @@ -2969,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) mlx4_err(dev, "Failed to create mtu file for port %d\n", port); device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); + devlink_port_unregister(&info->devlink_port); info->port = -1; } @@ -2983,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_mtu_attr); + devlink_port_unregister(&info->devlink_port); + #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(info->rmap); info->rmap = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f2d092001..94b891c11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -618,8 +618,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, err = mlx4_READ_ENTRY(dev, entry->index, mailbox); - if (err) - goto out_mailbox; + if (err) + goto out_mailbox; members_count = be32_to_cpu(mgm->members_count) & 0xffffff; @@ -657,8 +657,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox); - if (err) - goto out_mailbox; + if (err) + goto out_mailbox; } } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 13d297ee3..9099dbd04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -132,6 +132,7 @@ enum { MLX4_EN_NUM_UP) #define MLX4_EN_DEFAULT_TX_WORK 256 +#define MLX4_EN_DOORBELL_BUDGET 8 /* Target number of packets to coalesce with interrupt moderation */ #define MLX4_EN_RX_COAL_TARGET 44 @@ -164,6 +165,10 @@ enum { #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) #define MLX4_EN_MIN_MTU 46 +/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple + * headers. (For example: ETH_P_8021Q and ETH_P_8021AD). + */ +#define MLX4_EN_EFF_MTU(mtu) ((mtu) + ETH_HLEN + (2 * VLAN_HLEN)) #define ETH_BCAST 0xffffffffffffULL #define MLX4_EN_LOOPBACK_RETRIES 5 @@ -215,7 +220,10 @@ enum cq_type { struct mlx4_en_tx_info { - struct sk_buff *skb; + union { + struct sk_buff *skb; + struct page *page; + }; dma_addr_t map0_dma; u32 map0_byte_count; u32 nr_txbb; @@ -255,6 +263,14 @@ struct mlx4_en_rx_alloc { u32 page_size; }; +#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT) +struct mlx4_en_page_cache { + u32 index; + struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE]; +}; + +struct mlx4_en_priv; + struct mlx4_en_tx_ring { /* cache line used and dirtied in tx completion * (mlx4_en_free_tx_buf()) @@ -288,6 +304,11 @@ struct mlx4_en_tx_ring { __be32 mr_key; void *buf; struct mlx4_en_tx_info *tx_info; + struct mlx4_en_rx_ring *recycle_ring; + u32 (*free_tx_desc)(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, + u64 timestamp, int napi_mode); u8 *bounce_buf; struct mlx4_qp_context context; int qpn; @@ -319,6 +340,8 @@ struct mlx4_en_rx_ring { u8 fcs_del; void *buf; void *rx_info; + struct bpf_prog *xdp_prog; + struct mlx4_en_page_cache page_cache; unsigned long bytes; unsigned long packets; unsigned long csum_ok; @@ -440,7 +463,9 @@ struct mlx4_en_mc_list { struct mlx4_en_frag_info { u16 frag_size; u16 frag_prefix_size; - u16 frag_stride; + u32 frag_stride; + enum dma_data_direction dma_dir; + int order; }; #ifdef CONFIG_MLX4_EN_DCB @@ -450,6 +475,17 @@ struct mlx4_en_frag_info { #define MLX4_EN_TC_ETS 7 +enum dcb_pfc_type { + pfc_disabled = 0, + pfc_enabled_full, + pfc_enabled_tx, + pfc_enabled_rx +}; + +struct mlx4_en_cee_config { + bool pfc_state; + enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP]; +}; #endif struct ethtool_flow_id { @@ -469,6 +505,9 @@ enum { MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4), MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5), +#ifdef CONFIG_MLX4_EN_DCB + MLX4_EN_FLAG_DCB_ENABLED = (1 << 6), +#endif }; #define PORT_BEACON_MAX_LIMIT (65535) @@ -536,6 +575,7 @@ struct mlx4_en_priv { struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS]; u16 num_frags; u16 log_rx_info; + int xdp_ring_num; struct mlx4_en_tx_ring **tx_ring; struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS]; @@ -547,10 +587,8 @@ struct mlx4_en_priv { struct work_struct linkstate_task; struct delayed_work stats_task; struct delayed_work service_task; -#ifdef CONFIG_MLX4_EN_VXLAN struct work_struct vxlan_add_task; struct work_struct vxlan_del_task; -#endif struct mlx4_en_perf_stats pstats; struct mlx4_en_pkt_stats pkstats; struct mlx4_en_counter_stats pf_stats; @@ -572,9 +610,12 @@ struct mlx4_en_priv { u32 counter_index; #ifdef CONFIG_MLX4_EN_DCB +#define MLX4_EN_DCB_ENABLED 0x3 struct ieee_ets ets; u16 maxrate[IEEE_8021QAZ_MAX_TCS]; enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS]; + struct mlx4_en_cee_config cee_config; + u8 dcbx_cap; #endif #ifdef CONFIG_RFS_ACCEL spinlock_t filters_lock; @@ -644,6 +685,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); +netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame, + struct net_device *dev, unsigned int length, + int tx_ind, int *doorbell_pending); +void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); +bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, + struct mlx4_en_rx_alloc *frame); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring, @@ -672,6 +719,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, int budget); int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget); +u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode); +u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring, + int index, u8 owner, u64 timestamp, + int napi_mode); void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 93195191f..395b5463c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -248,7 +248,7 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) offset, order); return; } - __mlx4_free_mtt_range(dev, offset, order); + __mlx4_free_mtt_range(dev, offset, order); } void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c index b3cc3ab63..6fc156a39 100644 --- a/drivers/net/ethernet/mellanox/mlx4/pd.c +++ b/drivers/net/ethernet/mellanox/mlx4/pd.c @@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node) goto free_uar; } - uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); + uar->bf_map = io_mapping_map_wc(priv->bf_mapping, + uar->index << PAGE_SHIFT, + PAGE_SIZE); if (!uar->bf_map) { err = -ENOMEM; goto unamp_uar; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 087b23b32..c5b206429 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -52,6 +52,7 @@ #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2 #define MLX4_IGNORE_FCS_MASK 0x1 +#define MLX4_TC_MAX_NUMBER 8 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table) { @@ -2015,3 +2016,14 @@ out: return ret; } EXPORT_SYMBOL(mlx4_get_module_info); + +int mlx4_max_tc(struct mlx4_dev *dev) +{ + u8 num_tc = dev->caps.max_tc_eth; + + if (!num_tc) + num_tc = MLX4_TC_MAX_NUMBER; + + return num_tc; +} +EXPORT_SYMBOL(mlx4_max_tc); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index cd9b2b28d..8b81114bd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2372,16 +2372,15 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, __mlx4_mpt_release(dev, index); break; case RES_OP_MAP_ICM: - index = get_param_l(&in_param); - id = index & mpt_mask(dev); - err = mr_res_start_move_to(dev, slave, id, - RES_MPT_RESERVED, &mpt); - if (err) - return err; - - __mlx4_mpt_free_icm(dev, mpt->key); - res_end_move(dev, slave, RES_MPT, id); + index = get_param_l(&in_param); + id = index & mpt_mask(dev); + err = mr_res_start_move_to(dev, slave, id, + RES_MPT_RESERVED, &mpt); + if (err) return err; + + __mlx4_mpt_free_icm(dev, mpt->key); + res_end_move(dev, slave, RES_MPT, id); break; default: err = -EINVAL; @@ -4253,9 +4252,8 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) && !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) { - mlx4_warn(dev, - "Src check LB for slave %d isn't supported\n", - slave); + mlx4_warn(dev, "Src check LB for slave %d isn't supported\n", + slave); return -ENOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 1cf722eba..aae46884b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -4,6 +4,7 @@ config MLX5_CORE tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" + depends on MAY_USE_DEVLINK depends on PCI default n ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9ea7b5830..05cc1effc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -1,11 +1,13 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ - health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o + health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ + fs_counters.o rl.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ - en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ + en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ + en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ + en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 943b1bd43..bf722aa88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -44,6 +44,7 @@ #include <linux/mlx5/vport.h> #include <linux/mlx5/transobj.h> #include <linux/rhashtable.h> +#include <net/switchdev.h> #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" @@ -72,13 +73,18 @@ #define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) #define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ MLX5_MPWRQ_WQE_PAGE_ORDER) -#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \ - BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) + +#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2) +#define MLX5E_REQUIRED_MTTS(rqs, wqes)\ + (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8)) +#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX) + #define MLX5_UMR_ALIGN (2048) #define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 @@ -88,6 +94,7 @@ #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1) +#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) #define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_SQ_BF_BUDGET 16 @@ -126,6 +133,12 @@ static inline int mlx5_max_log_rq_size(int wq_type) } } +enum { + MLX5E_INLINE_MODE_L2, + MLX5E_INLINE_MODE_VPORT_CONTEXT, + MLX5_INLINE_MODE_NOT_REQUIRED, +}; + struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; @@ -143,10 +156,31 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_data_seg data; }; +static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", +}; + +enum mlx5e_priv_flag { + MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), +}; + +#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \ + do { \ + if (enable) \ + priv->pflags |= pflag; \ + else \ + priv->pflags &= ~pflag; \ + } while (0) + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #endif +struct mlx5e_cq_moder { + u16 usec; + u16 pkts; +}; + struct mlx5e_params { u8 log_sq_size; u8 rq_wq_type; @@ -155,16 +189,16 @@ struct mlx5e_params { u8 log_rq_size; u16 num_channels; u8 num_tc; + u8 rx_cq_period_mode; bool rx_cqe_compress_admin; bool rx_cqe_compress; - u16 rx_cq_moderation_usec; - u16 rx_cq_moderation_pkts; - u16 tx_cq_moderation_usec; - u16 tx_cq_moderation_pkts; + struct mlx5e_cq_moder rx_cq_moderation; + struct mlx5e_cq_moder tx_cq_moderation; u16 min_rx_wqes; bool lro_en; u32 lro_wqe_sz; u16 tx_max_inline; + u8 tx_min_inline_mode; u8 rss_hfunc; u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; @@ -172,6 +206,7 @@ struct mlx5e_params { #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif + bool rx_am_enabled; }; struct mlx5e_tstamp { @@ -188,9 +223,9 @@ struct mlx5e_tstamp { }; enum { - MLX5E_RQ_STATE_POST_WQES_ENABLE, + MLX5E_RQ_STATE_FLUSH, MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, - MLX5E_RQ_STATE_FLUSH_TIMEOUT, + MLX5E_RQ_STATE_AM, }; struct mlx5e_cq { @@ -198,6 +233,7 @@ struct mlx5e_cq { struct mlx5_cqwq wq; /* data path - accessed per napi poll */ + u16 event_ctr; struct napi_struct *napi; struct mlx5_core_cq mcq; struct mlx5e_channel *channel; @@ -227,6 +263,30 @@ struct mlx5e_dma_info { dma_addr_t addr; }; +struct mlx5e_rx_am_stats { + int ppms; /* packets per msec */ + int epms; /* events per msec */ +}; + +struct mlx5e_rx_am_sample { + ktime_t time; + unsigned int pkt_ctr; + u16 event_ctr; +}; + +struct mlx5e_rx_am { /* Adaptive Moderation */ + u8 state; + struct mlx5e_rx_am_stats prev_stats; + struct mlx5e_rx_am_sample start_sample; + struct work_struct work; + u8 profile_ix; + u8 mode; + u8 tune_state; + u8 steps_right; + u8 steps_left; + u8 tired; +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; @@ -247,6 +307,9 @@ struct mlx5e_rq { unsigned long state; int ix; + u32 mpwqe_mtt_offset; + + struct mlx5e_rx_am am; /* Adaptive Moderation */ /* control */ struct mlx5_wq_ctrl wq_ctrl; @@ -306,9 +369,8 @@ struct mlx5e_sq_dma { }; enum { - MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, + MLX5E_SQ_STATE_FLUSH, MLX5E_SQ_STATE_BF_ENABLE, - MLX5E_SQ_STATE_TX_TIMEOUT, }; struct mlx5e_ico_wqe_info { @@ -346,6 +408,7 @@ struct mlx5e_sq { u32 sqn; u16 bf_buf_size; u16 max_inline; + u8 min_inline_mode; u16 edge; struct device *pdev; struct mlx5e_tstamp *tstamp; @@ -358,6 +421,7 @@ struct mlx5e_sq { struct mlx5e_channel *channel; int tc; struct mlx5e_ico_wqe_info *ico_wqe_info; + u32 rate_limit; } ____cacheline_aligned_in_smp; static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@ -495,8 +559,24 @@ enum { MLX5E_ARFS_FT_LEVEL }; +struct mlx5e_ethtool_table { + struct mlx5_flow_table *ft; + int num_rules; +}; + +#define ETHTOOL_NUM_L3_L4_FTS 7 +#define ETHTOOL_NUM_L2_FTS 4 + +struct mlx5e_ethtool_steering { + struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS]; + struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS]; + struct list_head rules; + int tot_num_rules; +}; + struct mlx5e_flow_steering { struct mlx5_flow_namespace *ns; + struct mlx5e_ethtool_steering ethtool; struct mlx5e_tc_table tc; struct mlx5e_vlan_table vlan; struct mlx5e_l2_table l2; @@ -504,9 +584,15 @@ struct mlx5e_flow_steering { struct mlx5e_arfs_tables arfs; }; -struct mlx5e_direct_tir { - u32 tirn; +struct mlx5e_rqt { u32 rqtn; + bool enabled; +}; + +struct mlx5e_tir { + u32 tirn; + struct mlx5e_rqt rqt; + struct list_head list; }; enum { @@ -514,6 +600,22 @@ enum { MLX5E_NIC_PRIO }; +struct mlx5e_profile { + void (*init)(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, void *ppriv); + void (*cleanup)(struct mlx5e_priv *priv); + int (*init_rx)(struct mlx5e_priv *priv); + void (*cleanup_rx)(struct mlx5e_priv *priv); + int (*init_tx)(struct mlx5e_priv *priv); + void (*cleanup_tx)(struct mlx5e_priv *priv); + void (*enable)(struct mlx5e_priv *priv); + void (*disable)(struct mlx5e_priv *priv); + void (*update_stats)(struct mlx5e_priv *priv); + int (*max_nch)(struct mlx5_core_dev *mdev); + int max_tc; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_sq **txq_to_sq_map; @@ -522,18 +624,15 @@ struct mlx5e_priv { unsigned long state; struct mutex state_lock; /* Protects Interface state */ - struct mlx5_uar cq_uar; - u32 pdn; - u32 tdn; - struct mlx5_core_mkey mkey; struct mlx5_core_mkey umr_mkey; struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; - u32 indir_rqtn; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_rqt indir_rqt; + struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; @@ -545,11 +644,14 @@ struct mlx5e_priv { struct work_struct tx_timeout_work; struct delayed_work update_stats_work; + u32 pflags; struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_tstamp tstamp; u16 q_counter; + const struct mlx5e_profile *profile; + void *ppriv; }; enum mlx5e_link_mode { @@ -567,6 +669,7 @@ enum mlx5e_link_mode { MLX5E_10GBASE_ER = 14, MLX5E_40GBASE_SR4 = 15, MLX5E_40GBASE_LR4 = 16, + MLX5E_50GBASE_SR2 = 18, MLX5E_100GBASE_CR4 = 20, MLX5E_100GBASE_SR4 = 21, MLX5E_100GBASE_KR4 = 22, @@ -584,6 +687,9 @@ enum mlx5e_link_mode { #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) + +void mlx5e_build_ptys2ethtool_map(void); + void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw); u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback); @@ -595,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); void mlx5e_free_tx_descs(struct mlx5e_sq *sq); -void mlx5e_free_rx_descs(struct mlx5e_rq *rq); void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); @@ -621,12 +726,26 @@ void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); +void mlx5e_rx_am(struct mlx5e_rq *rq); +void mlx5e_rx_am_work(struct work_struct *work); +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode); + void mlx5e_update_stats(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); +int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, + int location); +int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, + struct ethtool_rxnfc *info, u32 *rule_locs); +int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs); +int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, + int location); +void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); +void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, @@ -656,6 +775,9 @@ void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, + u8 cq_period_mode); + static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { @@ -694,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) MLX5E_MAX_NUM_CHANNELS); } -static inline int mlx5e_get_mtt_octw(int npages) -{ - return ALIGN(npages, 8) / 2; -} - extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; @@ -732,5 +849,39 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, #endif u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); +int mlx5e_create_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir, u32 *in, int inlen); +void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir); +int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); +void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); +int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev); + +struct mlx5_eswitch_rep; +int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep); +void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); +void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); +int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); + +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv); +void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); +int mlx5e_create_tises(struct mlx5e_priv *priv); +void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); +int mlx5e_close(struct net_device *netdev); +int mlx5e_open(struct net_device *netdev); +void mlx5e_update_stats_work(struct work_struct *work); +void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, void *ppriv); +void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); +struct rtnl_link_stats64 * +mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 3515e78ba..a8cb38789 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -93,14 +93,14 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) static int arfs_disable(struct mlx5e_priv *priv) { struct mlx5_flow_destination dest; - u32 *tirn = priv->indir_tirn; + struct mlx5e_tir *tir = priv->indir_tir; int err = 0; int tt; int i; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; for (i = 0; i < ARFS_NUM_TYPES; i++) { - dest.tir_num = tirn[i]; + dest.tir_num = tir[i].tirn; tt = arfs_get_tt(i); /* Modify ttc rules destination to bypass the aRFS tables*/ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], @@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, { struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; - u32 *tirn = priv->indir_tirn; - u32 *match_criteria; - u32 *match_value; + struct mlx5e_tir *tir = priv->indir_tir; + struct mlx5_flow_spec *spec; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); err = -ENOMEM; goto out; @@ -192,24 +189,23 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; switch (type) { case ARFS_IPV4_TCP: - dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; + dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn; break; case ARFS_IPV4_UDP: - dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; + dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn; break; case ARFS_IPV6_TCP: - dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; + dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn; break; case ARFS_IPV6_UDP: - dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; + dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn; break; default: err = -EINVAL; goto out; } - arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, - match_criteria, match_value, + arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); @@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, __func__, type); } out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err; } @@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_destination dest; struct arfs_table *arfs_table; - u8 match_criteria_enable = 0; + struct mlx5_flow_spec *spec; struct mlx5_flow_table *ft; - u32 *match_criteria; - u32 *match_value; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); err = -ENOMEM; goto out; } - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, ntohs(tuple->etype)); arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); if (!arfs_table) { @@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ft = arfs_table->ft.t; if (tuple->ip_proto == IPPROTO_TCP) { - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.tcp_dport); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.tcp_sport); - MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport, ntohs(tuple->dst_port)); - MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport, ntohs(tuple->src_port)); } else { - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_sport); - MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, ntohs(tuple->dst_port)); - MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, + MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport, ntohs(tuple->src_port)); } if (tuple->etype == htons(ETH_P_IP)) { - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), &tuple->src_ipv4, 4); - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), &tuple->dst_ipv4, 4); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); } else { - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), &tuple->src_ipv6, 16); - memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), &tuple->dst_ipv6, 16); - memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16); - memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16); } dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; - rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, - match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); if (IS_ERR(rule)) { @@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, } out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err ? ERR_PTR(err) : rule; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c new file mode 100644 index 000000000..9cce153e1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +/* mlx5e global resources should be placed in this file. + * Global resources are common to all the netdevices crated on the same nic. + */ + +int mlx5e_create_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir, u32 *in, int inlen) +{ + int err; + + err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn); + if (err) + return err; + + list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); + + return 0; +} + +void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, + struct mlx5e_tir *tir) +{ + mlx5_core_destroy_tir(mdev, tir->tirn); + list_del(&tir->list); +} + +static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, + struct mlx5_core_mkey *mkey) +{ + struct mlx5_create_mkey_mbox_in *in; + int err; + + in = mlx5_vzalloc(sizeof(*in)); + if (!in) + return -ENOMEM; + + in->seg.flags = MLX5_PERM_LOCAL_WRITE | + MLX5_PERM_LOCAL_READ | + MLX5_ACCESS_MODE_PA; + in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + + err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, + NULL); + + kvfree(in); + + return err; +} + +int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev) +{ + struct mlx5e_resources *res = &mdev->mlx5e_res; + int err; + + err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false); + if (err) { + mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); + return err; + } + + err = mlx5_core_alloc_pd(mdev, &res->pdn); + if (err) { + mlx5_core_err(mdev, "alloc pd failed, %d\n", err); + goto err_unmap_free_uar; + } + + err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn); + if (err) { + mlx5_core_err(mdev, "alloc td failed, %d\n", err); + goto err_dealloc_pd; + } + + err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey); + if (err) { + mlx5_core_err(mdev, "create mkey failed, %d\n", err); + goto err_dealloc_transport_domain; + } + + INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); + + return 0; + +err_dealloc_transport_domain: + mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); +err_dealloc_pd: + mlx5_core_dealloc_pd(mdev, res->pdn); +err_unmap_free_uar: + mlx5_unmap_free_uar(mdev, &res->cq_uar); + + return err; +} + +void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) +{ + struct mlx5e_resources *res = &mdev->mlx5e_res; + + mlx5_core_destroy_mkey(mdev, &res->mkey); + mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); + mlx5_core_dealloc_pd(mdev, res->pdn); + mlx5_unmap_free_uar(mdev, &res->cq_uar); +} + +int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev) +{ + struct mlx5e_tir *tir; + void *in; + int inlen; + int err = 0; + + inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); + + list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { + err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen); + if (err) + goto out; + } + +out: + kvfree(in); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index c585349e0..762af16ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); } -static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) +static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, + struct ieee_ets *ets) { int bw_sum = 0; int i; /* Validate Priority */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) + if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) { + netdev_err(netdev, + "Failed to validate ETS: priority value greater than max(%d)\n", + MLX5E_MAX_PRIORITY); return -EINVAL; + } } /* Validate Bandwidth Sum */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { - if (!ets->tc_tx_bw[i]) + if (!ets->tc_tx_bw[i]) { + netdev_err(netdev, + "Failed to validate ETS: BW 0 is illegal\n"); return -EINVAL; + } bw_sum += ets->tc_tx_bw[i]; } } - if (bw_sum != 0 && bw_sum != 100) + if (bw_sum != 0 && bw_sum != 100) { + netdev_err(netdev, + "Failed to validate ETS: BW sum is illegal\n"); return -EINVAL; + } return 0; } @@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); int err; - err = mlx5e_dbcnl_validate_ets(ets); + err = mlx5e_dbcnl_validate_ets(netdev, ets); if (err) return err; @@ -195,7 +206,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; - enum mlx5_port_status ps; u8 curr_pfc_en; int ret; @@ -204,14 +214,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, if (pfc->pfc_en == curr_pfc_en) return 0; - mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); - - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_toggle_port_link(mdev); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index e667a870e..7a346bb2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -48,123 +48,85 @@ static void mlx5e_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } -static const struct { - u32 supported; - u32 advertised; +struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); u32 speed; -} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = { - [MLX5E_1000BASE_CX_SGMII] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_1000BASE_KX] = { - .supported = SUPPORTED_1000baseKX_Full, - .advertised = ADVERTISED_1000baseKX_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_CX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KX4] = { - .supported = SUPPORTED_10000baseKX4_Full, - .advertised = ADVERTISED_10000baseKX4_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_KR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_20GBASE_KR2] = { - .supported = SUPPORTED_20000baseKR2_Full, - .advertised = ADVERTISED_20000baseKR2_Full, - .speed = 20000, - }, - [MLX5E_40GBASE_CR4] = { - .supported = SUPPORTED_40000baseCR4_Full, - .advertised = ADVERTISED_40000baseCR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_KR4] = { - .supported = SUPPORTED_40000baseKR4_Full, - .advertised = ADVERTISED_40000baseKR4_Full, - .speed = 40000, - }, - [MLX5E_56GBASE_R4] = { - .supported = SUPPORTED_56000baseKR4_Full, - .advertised = ADVERTISED_56000baseKR4_Full, - .speed = 56000, - }, - [MLX5E_10GBASE_CR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_SR] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_10GBASE_ER] = { - .supported = SUPPORTED_10000baseKR_Full, - .advertised = ADVERTISED_10000baseKR_Full, - .speed = 10000, - }, - [MLX5E_40GBASE_SR4] = { - .supported = SUPPORTED_40000baseSR4_Full, - .advertised = ADVERTISED_40000baseSR4_Full, - .speed = 40000, - }, - [MLX5E_40GBASE_LR4] = { - .supported = SUPPORTED_40000baseLR4_Full, - .advertised = ADVERTISED_40000baseLR4_Full, - .speed = 40000, - }, - [MLX5E_100GBASE_CR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_SR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_KR4] = { - .speed = 100000, - }, - [MLX5E_100GBASE_LR4] = { - .speed = 100000, - }, - [MLX5E_100BASE_TX] = { - .speed = 100, - }, - [MLX5E_1000BASE_T] = { - .supported = SUPPORTED_1000baseT_Full, - .advertised = ADVERTISED_1000baseT_Full, - .speed = 1000, - }, - [MLX5E_10GBASE_T] = { - .supported = SUPPORTED_10000baseT_Full, - .advertised = ADVERTISED_10000baseT_Full, - .speed = 1000, - }, - [MLX5E_25GBASE_CR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_KR] = { - .speed = 25000, - }, - [MLX5E_25GBASE_SR] = { - .speed = 25000, - }, - [MLX5E_50GBASE_CR2] = { - .speed = 50000, - }, - [MLX5E_50GBASE_KR2] = { - .speed = 50000, - }, }; +static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER]; + +#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_table[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + +void mlx5e_build_ptys2ethtool_map(void) +{ + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT); + MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000, + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); +} + static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -177,6 +139,18 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) return err ? 0 : pfc_en_tx | pfc_en_rx; } +static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 rx_pause; + u32 tx_pause; + int err; + + err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); + + return err ? false : rx_pause | tx_pause; +} + #define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) #define MLX5E_NUM_RQ_STATS(priv) \ (NUM_RQ_STATS * priv->params.num_channels * \ @@ -185,8 +159,8 @@ static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ test_bit(MLX5E_STATE_OPENED, &priv->state)) #define MLX5E_NUM_PFC_COUNTERS(priv) \ - (hweight8(mlx5e_query_pfc_combined(priv)) * \ - NUM_PPORT_PER_PRIO_PFC_COUNTERS) + ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \ + NUM_PPORT_PER_PRIO_PFC_COUNTERS) static int mlx5e_get_sset_count(struct net_device *dev, int sset) { @@ -200,6 +174,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv); + case ETH_SS_PRIV_FLAGS: + return ARRAY_SIZE(mlx5e_priv_flags); /* fallthrough */ default: return -EOPNOTSUPP; @@ -246,8 +222,18 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + char pfc_string[ETH_GSTRING_LEN]; + + snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, pfc_string); + } + } + + if (mlx5e_query_global_pause_combined(priv)) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_pfc_stats_desc[i].format, prio); + pport_per_prio_pfc_stats_desc[i].format, "global"); } } @@ -272,9 +258,12 @@ static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx5e_priv *priv = netdev_priv(dev); + int i; switch (stringset) { case ETH_SS_PRIV_FLAGS: + for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) + strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); break; case ETH_SS_TEST: @@ -339,6 +328,13 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, } } + if (mlx5e_query_global_pause_combined(priv)) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], + pport_per_prio_pfc_stats_desc, i); + } + } + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; @@ -356,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, sq_stats_desc, j); } +static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type, + int num_wqe) +{ + int packets_per_wqe; + int stride_size; + int num_strides; + int wqe_size; + + if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return num_wqe; + + stride_size = 1 << priv->params.mpwqe_log_stride_sz; + num_strides = 1 << priv->params.mpwqe_log_num_strides; + wqe_size = stride_size * num_strides; + + packets_per_wqe = wqe_size / + ALIGN(ETH_DATA_LEN, stride_size); + return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1)); +} + +static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type, + int num_packets) +{ + int packets_per_wqe; + int stride_size; + int num_strides; + int wqe_size; + int num_wqes; + + if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) + return num_packets; + + stride_size = 1 << priv->params.mpwqe_log_stride_sz; + num_strides = 1 << priv->params.mpwqe_log_num_strides; + wqe_size = stride_size * num_strides; + + num_packets = (1 << order_base_2(num_packets)); + + packets_per_wqe = wqe_size / + ALIGN(ETH_DATA_LEN, stride_size); + num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe); + return 1 << (order_base_2(num_wqes)); +} + static void mlx5e_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); int rq_wq_type = priv->params.rq_wq_type; - param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); + param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << mlx5_max_log_rq_size(rq_wq_type)); param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; - param->rx_pending = 1 << priv->params.log_rq_size; + param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << priv->params.log_rq_size); param->tx_pending = 1 << priv->params.log_sq_size; } @@ -374,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); bool was_opened; int rq_wq_type = priv->params.rq_wq_type; + u32 rx_pending_wqes; + u32 min_rq_size; + u32 max_rq_size; u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; + u32 num_mtts; int err = 0; if (param->rx_jumbo_pending) { @@ -389,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev, __func__); return -EINVAL; } - if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) { + + min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << mlx5_min_log_rq_size(rq_wq_type)); + max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type, + 1 << mlx5_max_log_rq_size(rq_wq_type)); + rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type, + param->rx_pending); + + if (param->rx_pending < min_rq_size) { netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - 1 << mlx5_min_log_rq_size(rq_wq_type)); + min_rq_size); return -EINVAL; } - if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { + if (param->rx_pending > max_rq_size) { netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", __func__, param->rx_pending, - 1 << mlx5_max_log_rq_size(rq_wq_type)); + max_rq_size); return -EINVAL; } + + num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels, + rx_pending_wqes); + if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + !MLX5E_VALID_NUM_MTTS(num_mtts)) { + netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n", + __func__, param->rx_pending); + return -EINVAL; + } + if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n", __func__, param->tx_pending, @@ -414,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev, return -EINVAL; } - log_rq_size = order_base_2(param->rx_pending); + log_rq_size = order_base_2(rx_pending_wqes); log_sq_size = order_base_2(param->tx_pending); - min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); + min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes); if (log_rq_size == priv->params.log_rq_size && log_sq_size == priv->params.log_sq_size && @@ -458,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev, unsigned int count = ch->combined_count; bool arfs_enabled; bool was_opened; + u32 num_mtts; int err = 0; if (!count) { @@ -476,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev, return -EINVAL; } + num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size)); + if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + !MLX5E_VALID_NUM_MTTS(num_mtts)) { + netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n", + __func__, count); + return -EINVAL; + } + if (priv->params.num_channels == count) return 0; @@ -519,10 +592,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) return -ENOTSUPP; - coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; - coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; - coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; - coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; + coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; + coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec; + coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts; + coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled; return 0; } @@ -533,6 +607,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channel *c; + bool restart = + !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled; + bool was_opened; + int err = 0; int tc; int i; @@ -540,12 +618,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev, return -ENOTSUPP; mutex_lock(&priv->state_lock); - priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; - priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; - priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; - priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened && restart) { + mlx5e_close_locked(netdev); + priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce; + } + + priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs; + priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames; + priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs; + priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames; + + if (!was_opened || restart) goto out; for (i = 0; i < priv->params.num_channels; ++i) { @@ -564,35 +649,39 @@ static int mlx5e_set_coalesce(struct net_device *netdev, } out: + if (was_opened && restart) + err = mlx5e_open_locked(netdev); + mutex_unlock(&priv->state_lock); - return 0; + return err; } -static u32 ptys2ethtool_supported_link(u32 eth_proto_cap) +static void ptys2ethtool_supported_link(unsigned long *supported_modes, + u32 eth_proto_cap) { - int i; - u32 supported_modes = 0; + unsigned long proto_cap = eth_proto_cap; + int proto; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - supported_modes |= ptys2ethtool_table[i].supported; - } - return supported_modes; + for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(supported_modes, supported_modes, + ptys2ethtool_table[proto].supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static u32 ptys2ethtool_adver_link(u32 eth_proto_cap) +static void ptys2ethtool_adver_link(unsigned long *advertising_modes, + u32 eth_proto_cap) { - int i; - u32 advertising_modes = 0; + unsigned long proto_cap = eth_proto_cap; + int proto; - for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (eth_proto_cap & MLX5E_PROT_MASK(i)) - advertising_modes |= ptys2ethtool_table[i].advertised; - } - return advertising_modes; + for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER) + bitmap_or(advertising_modes, advertising_modes, + ptys2ethtool_table[proto].advertised, + __ETHTOOL_LINK_MODE_MASK_NBITS); } -static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) +static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings, + u32 eth_proto_cap) { if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) @@ -600,7 +689,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE); } if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4) @@ -608,9 +697,8 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) { - return SUPPORTED_Backplane; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane); } - return 0; } int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) @@ -634,7 +722,7 @@ int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, - struct ethtool_cmd *cmd) + struct ethtool_link_ksettings *link_ksettings) { int i; u32 speed = SPEED_UNKNOWN; @@ -651,23 +739,32 @@ static void get_speed_duplex(struct net_device *netdev, } } out: - ethtool_cmd_speed_set(cmd, speed); - cmd->duplex = duplex; + link_ksettings->base.speed = speed; + link_ksettings->base.duplex = duplex; } -static void get_supported(u32 eth_proto_cap, u32 *supported) +static void get_supported(u32 eth_proto_cap, + struct ethtool_link_ksettings *link_ksettings) { - *supported |= ptys2ethtool_supported_port(eth_proto_cap); - *supported |= ptys2ethtool_supported_link(eth_proto_cap); - *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + unsigned long *supported = link_ksettings->link_modes.supported; + + ptys2ethtool_supported_port(link_ksettings, eth_proto_cap); + ptys2ethtool_supported_link(supported, eth_proto_cap); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause); } static void get_advertising(u32 eth_proto_cap, u8 tx_pause, - u8 rx_pause, u32 *advertising) + u8 rx_pause, + struct ethtool_link_ksettings *link_ksettings) { - *advertising |= ptys2ethtool_adver_link(eth_proto_cap); - *advertising |= tx_pause ? ADVERTISED_Pause : 0; - *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0; + unsigned long *advertising = link_ksettings->link_modes.advertising; + + ptys2ethtool_adver_link(advertising, eth_proto_cap); + if (tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); + if (tx_pause ^ rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause); } static u8 get_connector_port(u32 eth_proto) @@ -695,13 +792,16 @@ static u8 get_connector_port(u32 eth_proto) return PORT_OTHER; } -static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising) +static void get_lp_advertising(u32 eth_proto_lp, + struct ethtool_link_ksettings *link_ksettings) { - *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp); + unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; + + ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); } -static int mlx5e_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@ -710,6 +810,8 @@ static int mlx5e_get_settings(struct net_device *netdev, u32 eth_proto_admin; u32 eth_proto_lp; u32 eth_proto_oper; + u8 an_disable_admin; + u8 an_status; int err; err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); @@ -720,35 +822,49 @@ static int mlx5e_get_settings(struct net_device *netdev, goto err_query_ptys; } - eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); - eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); - eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); - eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability); + eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin); + eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); + eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); + an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); + an_status = MLX5_GET(ptys_reg, out, an_status); - cmd->supported = 0; - cmd->advertising = 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - get_supported(eth_proto_cap, &cmd->supported); - get_advertising(eth_proto_admin, 0, 0, &cmd->advertising); - get_speed_duplex(netdev, eth_proto_oper, cmd); + get_supported(eth_proto_cap, link_ksettings); + get_advertising(eth_proto_admin, 0, 0, link_ksettings); + get_speed_duplex(netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; - cmd->port = get_connector_port(eth_proto_oper); - get_lp_advertising(eth_proto_lp, &cmd->lp_advertising); + link_ksettings->base.port = get_connector_port(eth_proto_oper); + get_lp_advertising(eth_proto_lp, link_ksettings); + + if (an_status == MLX5_AN_COMPLETE) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg); - cmd->transceiver = XCVR_INTERNAL; + link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE : + AUTONEG_ENABLE; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + if (!an_disable_admin) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); err_query_ptys: return err; } -static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes) +static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0; for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) { - if (ptys2ethtool_table[i].advertised & link_modes) + if (bitmap_intersects(ptys2ethtool_table[i].advertised, + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= MLX5E_PROT_MASK(i); } @@ -767,21 +883,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed) return speed_links; } -static int mlx5e_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) +static int mlx5e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + u32 eth_proto_cap, eth_proto_admin; + bool an_changes = false; + u8 an_disable_admin; + u8 an_disable_cap; + bool an_disable; u32 link_modes; + u8 an_status; u32 speed; - u32 eth_proto_cap, eth_proto_admin; - enum mlx5_port_status ps; int err; - speed = ethtool_cmd_speed(cmd); + speed = link_ksettings->base.speed; - link_modes = cmd->autoneg == AUTONEG_ENABLE ? - mlx5e_ethtool2ptys_adver_link(cmd->advertising) : + link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ? + mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) : mlx5e_ethtool2ptys_speed_link(speed); err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); @@ -806,15 +926,18 @@ static int mlx5e_set_settings(struct net_device *netdev, goto out; } - if (link_modes == eth_proto_admin) + mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status, + &an_disable_cap, &an_disable_admin); + + an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE; + an_changes = ((!an_disable && an_disable_admin) || + (an_disable && !an_disable_admin)); + + if (!an_changes && link_modes == eth_proto_admin) goto out; - mlx5_query_port_admin_status(mdev, &ps); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); - mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN); - if (ps == MLX5_PORT_UP) - mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN); + mlx5_toggle_port_link(mdev); out: return err; @@ -861,7 +984,7 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) mlx5e_build_tir_ctx_hash(tirc, priv); for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen); + mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); } static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, @@ -883,7 +1006,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mutex_lock(&priv->state_lock); if (indir) { - u32 rqtn = priv->indir_rqtn; + u32 rqtn = priv->indir_rqt.rqtn; memcpy(priv->params.indirection_rqt, indir, sizeof(priv->params.indirection_rqt)); @@ -916,6 +1039,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev, case ETHTOOL_GRXRINGS: info->data = priv->params.num_channels; break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = priv->fs.ethtool.tot_num_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = mlx5e_ethtool_get_flow(priv, info, info->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs); + break; default: err = -EOPNOTSUPP; break; @@ -1272,6 +1404,107 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } +typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); + +static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + bool rx_mode_changed; + u8 rx_cq_period_mode; + int err = 0; + bool reset; + + rx_cq_period_mode = enable ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && + !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) + return -ENOTSUPP; + + if (!rx_mode_changed) + return 0; + + reset = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (reset) + mlx5e_close_locked(netdev); + + mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode); + + if (reset) + err = mlx5e_open_locked(netdev); + + return err; +} + +static int mlx5e_handle_pflag(struct net_device *netdev, + u32 wanted_flags, + enum mlx5e_priv_flag flag, + mlx5e_pflag_handler pflag_handler) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + bool enable = !!(wanted_flags & flag); + u32 changes = wanted_flags ^ priv->pflags; + int err; + + if (!(changes & flag)) + return 0; + + err = pflag_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s private flag 0x%x failed err %d\n", + enable ? "Enable" : "Disable", flag, err); + return err; + } + + MLX5E_SET_PRIV_FLAG(priv, flag, enable); + return 0; +} + +static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_RX_CQE_BASED_MODER, + set_pflag_rx_cqe_based_moder); + + mutex_unlock(&priv->state_lock); + return err ? -EINVAL : 0; +} + +static u32 mlx5e_get_priv_flags(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return priv->pflags; +} + +static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct mlx5e_priv *priv = netdev_priv(dev); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mlx5e_ethtool_flow_replace(priv, &cmd->fs); + break; + case ETHTOOL_SRXCLSRLDEL: + err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1284,13 +1517,14 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_channels = mlx5e_set_channels, .get_coalesce = mlx5e_get_coalesce, .set_coalesce = mlx5e_set_coalesce, - .get_settings = mlx5e_get_settings, - .set_settings = mlx5e_set_settings, + .get_link_ksettings = mlx5e_get_link_ksettings, + .set_link_ksettings = mlx5e_set_link_ksettings, .get_rxfh_key_size = mlx5e_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, .get_rxnfc = mlx5e_get_rxnfc, + .set_rxnfc = mlx5e_set_rxnfc, .get_tunable = mlx5e_get_tunable, .set_tunable = mlx5e_set_tunable, .get_pauseparam = mlx5e_get_pauseparam, @@ -1301,4 +1535,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .set_wol = mlx5e_set_wol, .get_module_info = mlx5e_get_module_info, .get_module_eeprom = mlx5e_get_module_eeprom, + .get_priv_flags = mlx5e_get_priv_flags, + .set_priv_flags = mlx5e_set_priv_flags }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index b32740092..1587a9fd5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type { static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, - u16 vid, u32 *mc, u32 *mv) + u16 vid, struct mlx5_flow_spec *spec) { struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; struct mlx5_flow_rule **rule_p; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = priv->fs.l2.ft.t; - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: @@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: rule_p = &priv->fs.vlan.any_vlan_rule; - MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ rule_p = &priv->fs.vlan.active_vlans_rule[vid]; - MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); - MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, + vid); break; } - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, + *rule_p = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); @@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid) { - u32 *match_criteria; - u32 *match_value; + struct mlx5_flow_spec *spec; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_vlan_rule_out; + return -ENOMEM; } if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) mlx5e_vport_context_update_vlans(priv); - err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, - match_value); + err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); -add_vlan_rule_out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err; } @@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, u8 proto) { struct mlx5_flow_rule *rule; - u8 match_criteria_enable = 0; - u32 *match_criteria; - u32 *match_value; + struct mlx5_flow_spec *spec; int err = 0; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto out; + return ERR_PTR(-ENOMEM); } if (proto) { - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); } if (etype) { - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); } - rule = mlx5_add_flow_rule(ft, match_criteria_enable, - match_criteria, match_value, + rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, dest); @@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, err = PTR_ERR(rule); netdev_err(priv->netdev, "%s: add rule failed\n", __func__); } -out: - kvfree(match_criteria); - kvfree(match_value); + + kvfree(spec); return err ? ERR_PTR(err) : rule; } @@ -655,7 +644,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) if (tt == MLX5E_TT_ANY) dest.tir_num = priv->direct_tir[0].tirn; else - dest.tir_num = priv->indir_tirn[tt]; + dest.tir_num = priv->indir_tir[tt].tirn; rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, ttc_rules[tt].etype, ttc_rules[tt].proto); @@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, { struct mlx5_flow_table *ft = priv->fs.l2.ft.t; struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; - u32 *match_criteria; - u32 *match_value; + struct mlx5_flow_spec *spec; int err = 0; u8 *mc_dmac; u8 *mv_dmac; - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_l2_rule_out; + return -ENOMEM; } - mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dmac_47_16); - mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, + mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dmac_47_16); dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; @@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, switch (type) { case MLX5E_FULLMATCH: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; eth_broadcast_addr(mc_dmac); ether_addr_copy(mv_dmac, ai->addr); break; case MLX5E_ALLMULTI: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; mc_dmac[0] = 0x01; mv_dmac[0] = 0x01; break; @@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, break; } - ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, - match_value, + ai->rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FS_DEFAULT_FLOW_TAG, &dest); if (IS_ERR(ai->rule)) { @@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ai->rule = NULL; } -add_l2_rule_out: - kvfree(match_criteria); - kvfree(match_value); + kvfree(spec); return err; } @@ -1102,6 +1084,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) goto err_destroy_l2_table; } + mlx5e_ethtool_init_steering(priv); + return 0; err_destroy_l2_table: @@ -1121,4 +1105,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_destroy_l2_table(priv); mlx5e_destroy_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); + mlx5e_ethtool_cleanup_steering(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c new file mode 100644 index 000000000..d17c24227 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -0,0 +1,586 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/fs.h> +#include "en.h" + +struct mlx5e_ethtool_rule { + struct list_head list; + struct ethtool_rx_flow_spec flow_spec; + struct mlx5_flow_rule *rule; + struct mlx5e_ethtool_table *eth_ft; +}; + +static void put_flow_table(struct mlx5e_ethtool_table *eth_ft) +{ + if (!--eth_ft->num_rules) { + mlx5_destroy_flow_table(eth_ft->ft); + eth_ft->ft = NULL; + } +} + +#define MLX5E_ETHTOOL_L3_L4_PRIO 0 +#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS) +#define MLX5E_ETHTOOL_NUM_ENTRIES 64000 +#define MLX5E_ETHTOOL_NUM_GROUPS 10 +static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs, + int num_tuples) +{ + struct mlx5e_ethtool_table *eth_ft; + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft; + int max_tuples; + int table_size; + int prio; + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + max_tuples = ETHTOOL_NUM_L3_L4_FTS; + prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); + eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; + break; + case IP_USER_FLOW: + max_tuples = ETHTOOL_NUM_L3_L4_FTS; + prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); + eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; + break; + case ETHER_FLOW: + max_tuples = ETHTOOL_NUM_L2_FTS; + prio = max_tuples - num_tuples; + eth_ft = &priv->fs.ethtool.l2_ft[prio]; + prio += MLX5E_ETHTOOL_L2_PRIO; + break; + default: + return ERR_PTR(-EINVAL); + } + + eth_ft->num_rules++; + if (eth_ft->ft) + return eth_ft; + + ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_ETHTOOL); + if (!ns) + return ERR_PTR(-ENOTSUPP); + + table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, + flow_table_properties_nic_receive.log_max_ft_size)), + MLX5E_ETHTOOL_NUM_ENTRIES); + ft = mlx5_create_auto_grouped_flow_table(ns, prio, + table_size, + MLX5E_ETHTOOL_NUM_GROUPS, 0); + if (IS_ERR(ft)) + return (void *)ft; + + eth_ft->ft = ft; + return eth_ft; +} + +static void mask_spec(u8 *mask, u8 *val, size_t size) +{ + unsigned int i; + + for (i = 0; i < size; i++, mask++, val++) + *((u8 *)val) = *((u8 *)mask) & *((u8 *)val); +} + +static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m, + __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v) +{ + if (ip4src_m) { + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &ip4src_v, sizeof(ip4src_v)); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + 0xff, sizeof(ip4src_m)); + } + if (ip4dst_m) { + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &ip4dst_v, sizeof(ip4dst_v)); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + 0xff, sizeof(ip4dst_m)); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + ethertype, ETH_P_IP); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + ethertype, 0xffff); +} + +static int set_flow_attrs(u32 *match_c, u32 *match_v, + struct ethtool_rx_flow_spec *fs) +{ + void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, + outer_headers); + void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, + outer_headers); + u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + struct ethtool_tcpip4_spec *l4_mask; + struct ethtool_tcpip4_spec *l4_val; + struct ethtool_usrip4_spec *l3_mask; + struct ethtool_usrip4_spec *l3_val; + struct ethhdr *eth_val; + struct ethhdr *eth_mask; + + switch (flow_type) { + case TCP_V4_FLOW: + l4_mask = &fs->m_u.tcp_ip4_spec; + l4_val = &fs->h_u.tcp_ip4_spec; + set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src, + l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst); + + if (l4_mask->psrc) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, + ntohs(l4_val->psrc)); + } + if (l4_mask->pdst) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, + ntohs(l4_val->pdst)); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, + IPPROTO_TCP); + break; + case UDP_V4_FLOW: + l4_mask = &fs->m_u.tcp_ip4_spec; + l4_val = &fs->h_u.tcp_ip4_spec; + set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src, + l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst); + + if (l4_mask->psrc) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, + ntohs(l4_val->psrc)); + } + if (l4_mask->pdst) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, + ntohs(l4_val->pdst)); + } + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, + 0xffff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, + IPPROTO_UDP); + break; + case IP_USER_FLOW: + l3_mask = &fs->m_u.usr_ip4_spec; + l3_val = &fs->h_u.usr_ip4_spec; + set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src, + l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst); + break; + case ETHER_FLOW: + eth_mask = &fs->m_u.ether_spec; + eth_val = &fs->h_u.ether_spec; + + mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask)); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_c, smac_47_16), + eth_mask->h_source); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_v, smac_47_16), + eth_val->h_source); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_c, dmac_47_16), + eth_mask->h_dest); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_v, dmac_47_16), + eth_val->h_dest); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype, + ntohs(eth_mask->h_proto)); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype, + ntohs(eth_val->h_proto)); + break; + default: + return -EINVAL; + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + vlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, + first_vid, 0xfff); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + first_vid, ntohs(fs->h_ext.vlan_tci)); + } + if (fs->flow_type & FLOW_MAC_EXT && + !is_zero_ether_addr(fs->m_ext.h_dest)) { + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_c, dmac_47_16), + fs->m_ext.h_dest); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, + outer_headers_v, dmac_47_16), + fs->h_ext.h_dest); + } + + return 0; +} + +static void add_rule_to_list(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *rule) +{ + struct mlx5e_ethtool_rule *iter; + struct list_head *head = &priv->fs.ethtool.rules; + + list_for_each_entry(iter, &priv->fs.ethtool.rules, list) { + if (iter->flow_spec.location > rule->flow_spec.location) + break; + head = &iter->list; + } + priv->fs.ethtool.tot_num_rules++; + list_add(&rule->list, head); +} + +static bool outer_header_zero(u32 *match_criteria) +{ + int size = MLX5_ST_SZ_BYTES(fte_match_param); + char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers); + + return outer_headers_c[0] == 0 && !memcmp(outer_headers_c, + outer_headers_c + 1, + size - 1); +} + +static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct ethtool_rx_flow_spec *fs) +{ + struct mlx5_flow_destination *dst = NULL; + struct mlx5_flow_spec *spec; + struct mlx5_flow_rule *rule; + int err = 0; + u32 action; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) + return ERR_PTR(-ENOMEM); + err = set_flow_attrs(spec->match_criteria, spec->match_value, + fs); + if (err) + goto free; + + if (fs->ring_cookie == RX_CLS_FLOW_DISC) { + action = MLX5_FLOW_CONTEXT_ACTION_DROP; + } else { + dst = kzalloc(sizeof(*dst), GFP_KERNEL); + if (!dst) { + err = -ENOMEM; + goto free; + } + + dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn; + action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + } + + spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); + rule = mlx5_add_flow_rule(ft, spec, action, + MLX5_FS_DEFAULT_FLOW_TAG, dst); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", + __func__, err); + goto free; + } +free: + kvfree(spec); + kfree(dst); + return err ? ERR_PTR(err) : rule; +} + +static void del_ethtool_rule(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *eth_rule) +{ + if (eth_rule->rule) + mlx5_del_flow_rule(eth_rule->rule); + list_del(ð_rule->list); + priv->fs.ethtool.tot_num_rules--; + put_flow_table(eth_rule->eth_ft); + kfree(eth_rule); +} + +static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv, + int location) +{ + struct mlx5e_ethtool_rule *iter; + + list_for_each_entry(iter, &priv->fs.ethtool.rules, list) { + if (iter->flow_spec.location == location) + return iter; + } + return NULL; +} + +static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv, + int location) +{ + struct mlx5e_ethtool_rule *eth_rule; + + eth_rule = find_ethtool_rule(priv, location); + if (eth_rule) + del_ethtool_rule(priv, eth_rule); + + eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL); + if (!eth_rule) + return ERR_PTR(-ENOMEM); + + add_rule_to_list(priv, eth_rule); + return eth_rule; +} + +#define MAX_NUM_OF_ETHTOOL_RULES BIT(10) + +#define all_ones(field) (field == (__force typeof(field))-1) +#define all_zeros_or_all_ones(field) \ + ((field) == 0 || (field) == (__force typeof(field))-1) + +static int validate_flow(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs) +{ + struct ethtool_tcpip4_spec *l4_mask; + struct ethtool_usrip4_spec *l3_mask; + struct ethhdr *eth_mask; + int num_tuples = 0; + + if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES) + return -EINVAL; + + if (fs->ring_cookie >= priv->params.num_channels && + fs->ring_cookie != RX_CLS_FLOW_DISC) + return -EINVAL; + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + eth_mask = &fs->m_u.ether_spec; + if (!is_zero_ether_addr(eth_mask->h_dest)) + num_tuples++; + if (!is_zero_ether_addr(eth_mask->h_source)) + num_tuples++; + if (eth_mask->h_proto) + num_tuples++; + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (fs->m_u.tcp_ip4_spec.tos) + return -EINVAL; + l4_mask = &fs->m_u.tcp_ip4_spec; + if (l4_mask->ip4src) { + if (!all_ones(l4_mask->ip4src)) + return -EINVAL; + num_tuples++; + } + if (l4_mask->ip4dst) { + if (!all_ones(l4_mask->ip4dst)) + return -EINVAL; + num_tuples++; + } + if (l4_mask->psrc) { + if (!all_ones(l4_mask->psrc)) + return -EINVAL; + num_tuples++; + } + if (l4_mask->pdst) { + if (!all_ones(l4_mask->pdst)) + return -EINVAL; + num_tuples++; + } + /* Flow is TCP/UDP */ + num_tuples++; + break; + case IP_USER_FLOW: + l3_mask = &fs->m_u.usr_ip4_spec; + if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + if (l3_mask->ip4src) { + if (!all_ones(l3_mask->ip4src)) + return -EINVAL; + num_tuples++; + } + if (l3_mask->ip4dst) { + if (!all_ones(l3_mask->ip4dst)) + return -EINVAL; + num_tuples++; + } + /* Flow is IPv4 */ + num_tuples++; + break; + default: + return -EINVAL; + } + if ((fs->flow_type & FLOW_EXT)) { + if (fs->m_ext.vlan_etype || + (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))) + return -EINVAL; + + if (fs->m_ext.vlan_tci) { + if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) + return -EINVAL; + } + num_tuples++; + } + + if (fs->flow_type & FLOW_MAC_EXT && + !is_zero_ether_addr(fs->m_ext.h_dest)) + num_tuples++; + + return num_tuples; +} + +int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, + struct ethtool_rx_flow_spec *fs) +{ + struct mlx5e_ethtool_table *eth_ft; + struct mlx5e_ethtool_rule *eth_rule; + struct mlx5_flow_rule *rule; + int num_tuples; + int err; + + num_tuples = validate_flow(priv, fs); + if (num_tuples <= 0) { + netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__); + return -EINVAL; + } + + eth_ft = get_flow_table(priv, fs, num_tuples); + if (IS_ERR(eth_ft)) + return PTR_ERR(eth_ft); + + eth_rule = get_ethtool_rule(priv, fs->location); + if (IS_ERR(eth_rule)) { + put_flow_table(eth_ft); + return PTR_ERR(eth_rule); + } + + eth_rule->flow_spec = *fs; + eth_rule->eth_ft = eth_ft; + if (!eth_ft->ft) { + err = -EINVAL; + goto del_ethtool_rule; + } + rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto del_ethtool_rule; + } + + eth_rule->rule = rule; + + return 0; + +del_ethtool_rule: + del_ethtool_rule(priv, eth_rule); + + return err; +} + +int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, + int location) +{ + struct mlx5e_ethtool_rule *eth_rule; + int err = 0; + + if (location >= MAX_NUM_OF_ETHTOOL_RULES) + return -ENOSPC; + + eth_rule = find_ethtool_rule(priv, location); + if (!eth_rule) { + err = -ENOENT; + goto out; + } + + del_ethtool_rule(priv, eth_rule); +out: + return err; +} + +int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, + int location) +{ + struct mlx5e_ethtool_rule *eth_rule; + + if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES) + return -EINVAL; + + list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) { + if (eth_rule->flow_spec.location == location) { + info->fs = eth_rule->flow_spec; + return 0; + } + } + + return -ENOENT; +} + +int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, + u32 *rule_locs) +{ + int location = 0; + int idx = 0; + int err = 0; + + while ((!err || err == -ENOENT) && idx < info->rule_cnt) { + err = mlx5e_ethtool_get_flow(priv, info, location); + if (!err) + rule_locs[idx++] = location; + location++; + } + return err; +} + +void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) +{ + struct mlx5e_ethtool_rule *iter; + struct mlx5e_ethtool_rule *temp; + + list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list) + del_ethtool_rule(priv, iter); +} + +void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) +{ + INIT_LIST_HEAD(&priv->fs.ethtool.rules); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a4d88c2c..2459c7f3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,22 +39,17 @@ #include "eswitch.h" #include "vxlan.h" -enum { - MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000, - MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20, - MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS / - MLX5_EN_QP_FLUSH_MSLEEP_QUANT, -}; - struct mlx5e_rq_param { - u32 rqc[MLX5_ST_SZ_DW(rqc)]; - struct mlx5_wq_param wq; + u32 rqc[MLX5_ST_SZ_DW(rqc)]; + struct mlx5_wq_param wq; + bool am_enabled; }; struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; u16 max_inline; + u8 min_inline_mode; bool icosq; }; @@ -62,6 +57,7 @@ struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; u16 eq_ix; + u8 cq_period_mode; }; struct mlx5e_channel_param { @@ -159,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; + s->tx_xmit_more += sq_stats->xmit_more; s->tx_csum_partial_inner += sq_stats->csum_partial_inner; tx_offload_none += sq_stats->csum_none; } @@ -254,14 +251,14 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) mlx5e_update_sw_counters(priv); } -static void mlx5e_update_stats_work(struct work_struct *work) +void mlx5e_update_stats_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv, update_stats_work); mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - mlx5e_update_stats(priv); + priv->profile->update_stats(priv); queue_delayed_work(priv->wq, dwork, msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); } @@ -337,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; + rq->mpwqe_mtt_offset = c->ix * + MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size)); + rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; @@ -367,6 +367,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, wqe->data.byte_count = cpu_to_be32(byte_count); } + INIT_WORK(&rq->am.work, mlx5e_rx_am_work); + rq->am.mode = priv->params.rx_cq_period_mode; + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; @@ -422,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); - MLX5_SET(rqc, rqc, flush_in_error_en, 1); MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -519,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq) return -ETIMEDOUT; } +static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe; + __be16 wqe_ix_be; + u16 wqe_ix; + + /* UMR WQE (if in progress) is always at wq->head */ + if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]); + + while (!mlx5_wq_ll_is_empty(wq)) { + wqe_ix_be = *wq->tail_next; + wqe_ix = be16_to_cpu(wqe_ix_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); + rq->dealloc_wqe(rq, wqe_ix); + mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, + &wqe->next.next_wqe_index); + } +} + static int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) @@ -539,7 +562,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_disable_rq; - set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + if (param->am_enabled) + set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; sq->ico_wqe_info[pi].num_wqebbs = 1; @@ -557,22 +581,9 @@ err_destroy_rq: static void mlx5e_close_rq(struct mlx5e_rq *rq) { - int tout = 0; - int err; - - clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); + set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ - - err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); - while (!mlx5_wq_ll_is_empty(&rq->wq) && !err && - tout++ < MLX5_EN_QP_FLUSH_MAX_ITER) - msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); - - if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER) - set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state); - - /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */ - napi_synchronize(&rq->channel->napi); + cancel_work_sync(&rq->am.work); mlx5e_disable_rq(rq); mlx5e_free_rx_descs(rq); @@ -639,6 +650,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, } sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; + sq->min_inline_mode = + MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ? + param->min_inline_mode : 0; err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)); if (err) @@ -721,6 +735,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]); MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); + MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); @@ -741,7 +756,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) return err; } -static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) +static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, + int next_state, bool update_rl, int rl_index) { struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; @@ -761,6 +777,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state) MLX5_SET(modify_sq_in, in, sq_state, curr_state); MLX5_SET(sqc, sqc, state, next_state); + if (update_rl && next_state == MLX5_SQC_STATE_RDY) { + MLX5_SET64(modify_sq_in, in, modify_bitmask, 1); + MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index); + } err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen); @@ -776,6 +796,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq) struct mlx5_core_dev *mdev = priv->mdev; mlx5_core_destroy_sq(mdev, sq->sqn); + if (sq->rate_limit) + mlx5_rl_remove_rate(mdev, sq->rate_limit); } static int mlx5e_open_sq(struct mlx5e_channel *c, @@ -793,12 +815,12 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, if (err) goto err_destroy_sq; - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY); + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, + false, 0); if (err) goto err_disable_sq; if (sq->txq) { - set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); netdev_tx_reset_queue(sq->txq); netif_tx_start_queue(sq->txq); } @@ -822,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_close_sq(struct mlx5e_sq *sq) { - int tout = 0; - int err; + set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); + /* prevent netif_tx_wake_queue */ + napi_synchronize(&sq->channel->napi); if (sq->txq) { - clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); - /* prevent netif_tx_wake_queue */ - napi_synchronize(&sq->channel->napi); netif_tx_disable_queue(sq->txq); - /* ensure hw is notified of all pending wqes */ + /* last doorbell out, godspeed .. */ if (mlx5e_sq_has_room_for(sq, 1)) mlx5e_send_nop(sq, true); - - err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, - MLX5_SQC_STATE_ERR); - if (err) - set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); } - /* wait till sq is empty, unless a TX timeout occurred on this SQ */ - while (sq->cc != sq->pc && - !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) { - msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT); - if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER) - set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); - } - - /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */ - napi_synchronize(&sq->channel->napi); - - mlx5e_free_tx_descs(sq); mlx5e_disable_sq(sq); + mlx5e_free_tx_descs(sq); mlx5e_destroy_sq(sq); } @@ -891,7 +895,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar; for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); @@ -938,6 +942,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); + MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); MLX5_SET(cqc, cqc, c_eqn, eqn); MLX5_SET(cqc, cqc, uar_page, mcq->uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@ -967,8 +972,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq) static int mlx5e_open_cq(struct mlx5e_channel *c, struct mlx5e_cq_param *param, struct mlx5e_cq *cq, - u16 moderation_usecs, - u16 moderation_frames) + struct mlx5e_cq_moder moderation) { int err; struct mlx5e_priv *priv = c->priv; @@ -984,8 +988,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c, if (MLX5_CAP_GEN(mdev, cq_moderation)) mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); + moderation.usec, + moderation.pkts); return 0; err_destroy_cq: @@ -1014,8 +1018,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, for (tc = 0; tc < c->num_tc; tc++) { err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq, - priv->params.tx_cq_moderation_usec, - priv->params.tx_cq_moderation_pkts); + priv->params.tx_cq_moderation); if (err) goto err_close_tx_cqs; } @@ -1070,19 +1073,96 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix) { int i; - for (i = 0; i < MLX5E_MAX_NUM_TC; i++) + for (i = 0; i < priv->profile->max_tc; i++) priv->channeltc_to_txq_map[ix][i] = ix + i * priv->params.num_channels; } +static int mlx5e_set_sq_maxrate(struct net_device *dev, + struct mlx5e_sq *sq, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 rl_index = 0; + int err; + + if (rate == sq->rate_limit) + /* nothing to do */ + return 0; + + if (sq->rate_limit) + /* remove current rl index to free space to next ones */ + mlx5_rl_remove_rate(mdev, sq->rate_limit); + + sq->rate_limit = 0; + + if (rate) { + err = mlx5_rl_add_rate(mdev, rate, &rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + return err; + } + } + + err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, + MLX5_SQC_STATE_RDY, true, rl_index); + if (err) { + netdev_err(dev, "Failed configuring rate %u: %d\n", + rate, err); + /* remove the rate from the table */ + if (rate) + mlx5_rl_remove_rate(mdev, rate); + return err; + } + + sq->rate_limit = rate; + return 0; +} + +static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_sq *sq = priv->txq_to_sq_map[index]; + int err = 0; + + if (!mlx5_rl_is_supported(mdev)) { + netdev_err(dev, "Rate limiting is not supported on this device\n"); + return -EINVAL; + } + + /* rate is given in Mb/sec, HW config is in Kb/sec */ + rate = rate << 10; + + /* Check whether rate in valid range, 0 is always valid */ + if (rate && !mlx5_rl_is_in_range(mdev, rate)) { + netdev_err(dev, "TX rate %u, is not in range\n", rate); + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + err = mlx5e_set_sq_maxrate(dev, sq, rate); + if (!err) + priv->tx_rates[index] = rate; + mutex_unlock(&priv->state_lock); + + return err; +} + static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + struct mlx5e_cq_moder icosq_cq_moder = {0, 0}; struct net_device *netdev = priv->netdev; + struct mlx5e_cq_moder rx_cq_profile; int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; + struct mlx5e_sq *sq; int err; + int i; c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); if (!c) @@ -1093,14 +1173,19 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->cpu = cpu; c->pdev = &priv->mdev->pdev->dev; c->netdev = priv->netdev; - c->mkey_be = cpu_to_be32(priv->mkey.key); + c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->num_tc = priv->params.num_tc; + if (priv->params.rx_am_enabled) + rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); + else + rx_cq_profile = priv->params.rx_cq_moderation; + mlx5e_build_channeltc_to_txq_map(priv, ix); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); - err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder); if (err) goto err_napi_del; @@ -1109,8 +1194,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, goto err_close_icosq_cq; err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, - priv->params.rx_cq_moderation_usec, - priv->params.rx_cq_moderation_pkts); + rx_cq_profile); if (err) goto err_close_tx_cqs; @@ -1124,6 +1208,16 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, if (err) goto err_close_icosq; + for (i = 0; i < priv->params.num_tc; i++) { + u32 txq_ix = priv->channeltc_to_txq_map[ix][i]; + + if (priv->tx_rates[txq_ix]) { + sq = priv->txq_to_sq_map[txq_ix]; + mlx5e_set_sq_maxrate(priv->netdev, sq, + priv->tx_rates[txq_ix]); + } + } + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@ -1195,11 +1289,13 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; + + param->am_enabled = priv->params.rx_am_enabled; } static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) @@ -1218,7 +1314,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, void *wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); - MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); } @@ -1233,6 +1329,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv, MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); param->max_inline = priv->params.tx_max_inline; + param->min_inline_mode = priv->params.tx_min_inline_mode; } static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, @@ -1240,7 +1337,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index); + MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@ -1265,6 +1362,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = priv->params.rx_cq_period_mode; } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@ -1275,6 +1374,8 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@ -1286,6 +1387,8 @@ static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); mlx5e_build_common_cq_param(priv, param); + + param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; } static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, @@ -1432,7 +1535,8 @@ static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc, MLX5_SET(rqtc, rqtc, rq_num[0], rqn); } -static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) +static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, + int ix, struct mlx5e_rqt *rqt) { struct mlx5_core_dev *mdev = priv->mdev; void *rqtc; @@ -1455,34 +1559,36 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) else mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); - err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); + err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); + if (!err) + rqt->enabled = true; kvfree(in); return err; } -static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) +void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) { - mlx5_core_destroy_rqt(priv->mdev, rqtn); + rqt->enabled = false; + mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); } -static int mlx5e_create_rqts(struct mlx5e_priv *priv) +static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); - u32 *rqtn; + struct mlx5e_rqt *rqt = &priv->indir_rqt; + + return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt); +} + +int mlx5e_create_direct_rqts(struct mlx5e_priv *priv) +{ + struct mlx5e_rqt *rqt; int err; int ix; - /* Indirect RQT */ - rqtn = &priv->indir_rqtn; - err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); - if (err) - return err; - - /* Direct RQTs */ - for (ix = 0; ix < nch; ix++) { - rqtn = &priv->direct_tir[ix].rqtn; - err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn); + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { + rqt = &priv->direct_tir[ix].rqt; + err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt); if (err) goto err_destroy_rqts; } @@ -1491,24 +1597,11 @@ static int mlx5e_create_rqts(struct mlx5e_priv *priv) err_destroy_rqts: for (ix--; ix >= 0; ix--) - mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); + mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt); return err; } -static void mlx5e_destroy_rqts(struct mlx5e_priv *priv) -{ - int nch = mlx5e_get_max_num_channels(priv->mdev); - int i; - - for (i = 0; i < nch; i++) - mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn); - - mlx5e_destroy_rqt(priv, priv->indir_rqtn); -} - int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1544,10 +1637,15 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) u32 rqtn; int ix; - rqtn = priv->indir_rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + if (priv->indir_rqt.enabled) { + rqtn = priv->indir_rqt.rqtn; + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + } + for (ix = 0; ix < priv->params.num_channels; ix++) { - rqtn = priv->direct_tir[ix].rqtn; + if (!priv->direct_tir[ix].rqt.enabled) + continue; + rqtn = priv->direct_tir[ix].rqt.rqtn; mlx5e_redirect_rqt(priv, rqtn, 1, ix); } } @@ -1607,13 +1705,13 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) mlx5e_build_tir_ctx_lro(tirc, priv); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); if (err) goto free_in; } - for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { + for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) { err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in, inlen); if (err) @@ -1626,40 +1724,6 @@ free_in: return err; } -static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) -{ - void *in; - int inlen; - int err; - int i; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { - err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in, - inlen); - if (err) - return err; - } - - for (i = 0; i < priv->params.num_channels; i++) { - err = mlx5_core_modify_tir(priv->mdev, - priv->direct_tir[i].tirn, in, - inlen); - if (err) - return err; - } - - kvfree(in); - - return 0; -} - static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1731,6 +1795,7 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev) int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; int num_txqs; int err; @@ -1742,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev) netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->params.num_channels); - err = mlx5e_set_dev_port_mtu(netdev); - if (err) - goto err_clear_state_opened_flag; - err = mlx5e_open_channels(priv); if (err) { netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n", @@ -1753,7 +1814,7 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_clear_state_opened_flag; } - err = mlx5e_refresh_tirs_self_loopback_enable(priv); + err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev); if (err) { netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n", __func__, err); @@ -1766,9 +1827,14 @@ int mlx5e_open_locked(struct net_device *netdev) #ifdef CONFIG_RFS_ACCEL priv->netdev->rx_cpu_rmap = priv->mdev->rmap; #endif + if (priv->profile->update_stats) + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - queue_delayed_work(priv->wq, &priv->update_stats_work, 0); - + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + err = mlx5e_add_sqs_fwd_rules(priv); + if (err) + goto err_close_channels; + } return 0; err_close_channels: @@ -1778,7 +1844,7 @@ err_clear_state_opened_flag: return err; } -static int mlx5e_open(struct net_device *netdev) +int mlx5e_open(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -1793,6 +1859,7 @@ static int mlx5e_open(struct net_device *netdev) int mlx5e_close_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; /* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. @@ -1802,6 +1869,9 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5e_remove_sqs_fwd_rules(priv); + mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); mlx5e_redirect_rqts(priv); @@ -1810,7 +1880,7 @@ int mlx5e_close_locked(struct net_device *netdev) return 0; } -static int mlx5e_close(struct net_device *netdev) +int mlx5e_close(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); int err; @@ -1869,7 +1939,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv, mcq->comp = mlx5e_completion_event; mcq->event = mlx5e_cq_error_event; mcq->irqn = irqn; - mcq->uar = &priv->cq_uar; + mcq->uar = &mdev->mlx5e_res.cq_uar; cq->priv = priv; @@ -1935,7 +2005,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) memset(in, 0, sizeof(in)); MLX5_SET(tisc, tisc, prio, tc << 1); - MLX5_SET(tisc, tisc, transport_domain, priv->tdn); + MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn); return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); } @@ -1945,12 +2015,12 @@ static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc) mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]); } -static int mlx5e_create_tises(struct mlx5e_priv *priv) +int mlx5e_create_tises(struct mlx5e_priv *priv) { int err; int tc; - for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { + for (tc = 0; tc < priv->profile->max_tc; tc++) { err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; @@ -1965,11 +2035,11 @@ err_close_tises: return err; } -static void mlx5e_destroy_tises(struct mlx5e_priv *priv) +void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc; - for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) + for (tc = 0; tc < priv->profile->max_tc; tc++) mlx5e_destroy_tis(priv, tc); } @@ -1978,7 +2048,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -1995,7 +2065,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, mlx5e_build_tir_ctx_lro(tirc, priv); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); mlx5e_build_tir_ctx_hash(tirc, priv); switch (tt) { @@ -2085,7 +2155,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, u32 rqtn) { - MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); mlx5e_build_tir_ctx_lro(tirc, priv); @@ -2094,15 +2164,13 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); } -static int mlx5e_create_tirs(struct mlx5e_priv *priv) +static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); + struct mlx5e_tir *tir; void *tirc; int inlen; - u32 *tirn; int err; u32 *in; - int ix; int tt; inlen = MLX5_ST_SZ_BYTES(create_tir_in); @@ -2110,25 +2178,51 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv) if (!in) return -ENOMEM; - /* indirect tirs */ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { memset(in, 0, inlen); - tirn = &priv->indir_tirn[tt]; + tir = &priv->indir_tir[tt]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tirc, tt); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_tirs; } - /* direct tirs */ + kvfree(in); + + return 0; + +err_destroy_tirs: + for (tt--; tt >= 0; tt--) + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); + + kvfree(in); + + return err; +} + +int mlx5e_create_direct_tirs(struct mlx5e_priv *priv) +{ + int nch = priv->profile->max_nch(priv->mdev); + struct mlx5e_tir *tir; + void *tirc; + int inlen; + int err; + u32 *in; + int ix; + + inlen = MLX5_ST_SZ_BYTES(create_tir_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + for (ix = 0; ix < nch; ix++) { memset(in, 0, inlen); - tirn = &priv->direct_tir[ix].tirn; + tir = &priv->direct_tir[ix]; tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_direct_tir_ctx(priv, tirc, - priv->direct_tir[ix].rqtn); - err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + priv->direct_tir[ix].rqt.rqtn); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); if (err) goto err_destroy_ch_tirs; } @@ -2139,27 +2233,28 @@ static int mlx5e_create_tirs(struct mlx5e_priv *priv) err_destroy_ch_tirs: for (ix--; ix >= 0; ix--) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); - -err_destroy_tirs: - for (tt--; tt >= 0; tt--) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]); + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]); kvfree(in); return err; } -static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) +static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) { - int nch = mlx5e_get_max_num_channels(priv->mdev); int i; - for (i = 0; i < nch; i++) - mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn); - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); + mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); +} + +void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) +{ + int nch = priv->profile->max_nch(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]); } int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) @@ -2233,7 +2328,7 @@ mqprio: return mlx5e_setup_tc(dev, tc->tc); } -static struct rtnl_link_stats64 * +struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -2455,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) u16 max_mtu; u16 min_mtu; int err = 0; + bool reset; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -2470,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) mutex_lock(&priv->state_lock); + reset = !priv->params.lro_en && + (priv->params.rq_wq_type != + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - if (was_opened) + if (was_opened && reset) mlx5e_close_locked(netdev); netdev->mtu = new_mtu; + mlx5e_set_dev_port_mtu(netdev); - if (was_opened) + if (was_opened && reset) err = mlx5e_open_locked(netdev); mutex_unlock(&priv->state_lock); @@ -2585,25 +2686,31 @@ static int mlx5e_get_vf_stats(struct net_device *dev, } static void mlx5e_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); } static void mlx5e_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); + mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); } static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@ -2670,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev) if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) continue; sched_work = true; - set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state); + set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state); netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); } @@ -2693,6 +2800,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -2713,8 +2821,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, - .ndo_add_vxlan_port = mlx5e_add_vxlan_port, - .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, @@ -2844,13 +2953,48 @@ static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) (pci_bw < 40000) && (pci_bw < link_speed)); } -static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, - struct net_device *netdev, - int num_channels) +void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + params->rx_cq_period_mode = cq_period_mode; + + params->rx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->rx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; +} + +static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) { + case MLX5E_INLINE_MODE_L2: + *min_inline_mode = MLX5_INLINE_MODE_L2; + break; + case MLX5E_INLINE_MODE_VPORT_CONTEXT: + mlx5_query_nic_vport_min_inline(mdev, + min_inline_mode); + break; + case MLX5_INLINE_MODE_NOT_REQUIRED: + *min_inline_mode = MLX5_INLINE_MODE_NONE; + break; + } +} + +static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); u32 link_speed = 0; u32 pci_bw = 0; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; @@ -2896,15 +3040,16 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, BIT(priv->params.log_rq_size)); - priv->params.rx_cq_moderation_usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - priv->params.rx_cq_moderation_pkts = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - priv->params.tx_cq_moderation_usec = + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - priv->params.tx_cq_moderation_pkts = + priv->params.tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); + mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode); priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; @@ -2912,14 +3057,20 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, sizeof(priv->params.toeplitz_hash_key)); mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, num_channels); + MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev)); priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + /* Initialize pflags */ + MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER, + priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + priv->mdev = mdev; priv->netdev = netdev; - priv->params.num_channels = num_channels; + priv->params.num_channels = profile->max_nch(mdev); + priv->profile = profile; + priv->ppriv = ppriv; #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_ets_init(priv); @@ -2945,7 +3096,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } -static void mlx5e_build_netdev(struct net_device *netdev) +static const struct switchdev_ops mlx5e_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, +}; + +static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; @@ -3026,31 +3181,11 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); -} -static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, - struct mlx5_core_mkey *mkey) -{ - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_create_mkey_mbox_in *in; - int err; - - in = mlx5_vzalloc(sizeof(*in)); - if (!in) - return -ENOMEM; - - in->seg.flags = MLX5_PERM_LOCAL_WRITE | - MLX5_PERM_LOCAL_READ | - MLX5_ACCESS_MODE_PA; - in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); - in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - - err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL, - NULL); - - kvfree(in); - - return err; +#ifdef CONFIG_NET_SWITCHDEV + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + netdev->switchdev_ops = &mlx5e_switchdev_ops; +#endif } static void mlx5e_create_q_counter(struct mlx5e_priv *priv) @@ -3079,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) struct mlx5_create_mkey_mbox_in *in; struct mlx5_mkey_seg *mkc; int inlen = sizeof(*in); - u64 npages = - mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; + u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev), + BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)); int err; in = mlx5_vzalloc(inlen); @@ -3094,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) MLX5_PERM_LOCAL_WRITE | MLX5_ACCESS_MODE_MTT; + npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages); + mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); - mkc->flags_pd = cpu_to_be32(priv->pdn); + mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn); mkc->len = cpu_to_be64(npages << PAGE_SHIFT); - mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); + mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages)); mkc->log2_page_size = PAGE_SHIFT; err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, @@ -3108,160 +3245,236 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) return err; } -static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) +static void mlx5e_nic_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { - struct net_device *netdev; - struct mlx5e_priv *priv; - int nch = mlx5e_get_max_num_channels(mdev); - int err; - - if (mlx5e_check_required_hca_cap(mdev)) - return NULL; + struct mlx5e_priv *priv = netdev_priv(netdev); - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), - nch * MLX5E_MAX_NUM_TC, - nch); - if (!netdev) { - mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); - return NULL; - } + mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv); + mlx5e_build_nic_netdev(netdev); + mlx5e_vxlan_init(priv); +} - mlx5e_build_netdev_priv(mdev, netdev, nch); - mlx5e_build_netdev(netdev); +static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; - netif_carrier_off(netdev); + mlx5e_vxlan_cleanup(priv); - priv = netdev_priv(netdev); + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + mlx5_eswitch_unregister_vport_rep(esw, 0); +} - priv->wq = create_singlethread_workqueue("mlx5e"); - if (!priv->wq) - goto err_free_netdev; +static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int err; + int i; - err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); + err = mlx5e_create_indirect_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - goto err_destroy_wq; + mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err); + return err; } - err = mlx5_core_alloc_pd(mdev, &priv->pdn); + err = mlx5e_create_direct_rqts(priv); if (err) { - mlx5_core_err(mdev, "alloc pd failed, %d\n", err); - goto err_unmap_free_uar; + mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + goto err_destroy_indirect_rqts; } - err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn); + err = mlx5e_create_indirect_tirs(priv); if (err) { - mlx5_core_err(mdev, "alloc td failed, %d\n", err); - goto err_dealloc_pd; + mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_direct_rqts; } - err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey); + err = mlx5e_create_direct_tirs(priv); if (err) { - mlx5_core_err(mdev, "create mkey failed, %d\n", err); - goto err_dealloc_transport_domain; + mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + goto err_destroy_indirect_tirs; } - err = mlx5e_create_umr_mkey(priv); + err = mlx5e_create_flow_steering(priv); if (err) { - mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); - goto err_destroy_mkey; + mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); + goto err_destroy_direct_tirs; } + err = mlx5e_tc_init(priv); + if (err) + goto err_destroy_flow_steering; + + return 0; + +err_destroy_flow_steering: + mlx5e_destroy_flow_steering(priv); +err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); +err_destroy_indirect_tirs: + mlx5e_destroy_indirect_tirs(priv); +err_destroy_direct_rqts: + for (i = 0; i < priv->profile->max_nch(mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); +err_destroy_indirect_rqts: + mlx5e_destroy_rqt(priv, &priv->indir_rqt); + return err; +} + +static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) +{ + int i; + + mlx5e_tc_cleanup(priv); + mlx5e_destroy_flow_steering(priv); + mlx5e_destroy_direct_tirs(priv); + mlx5e_destroy_indirect_tirs(priv); + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + mlx5e_destroy_rqt(priv, &priv->indir_rqt); +} + +static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) +{ + int err; + err = mlx5e_create_tises(priv); if (err) { - mlx5_core_warn(mdev, "create tises failed, %d\n", err); - goto err_destroy_umr_mkey; + mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); + return err; } - err = mlx5e_open_drop_rq(priv); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_tises; +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); +#endif + return 0; +} + +static void mlx5e_nic_enable(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch_rep rep; + + if (mlx5e_vxlan_allowed(mdev)) { + rtnl_lock(); + udp_tunnel_get_rx_info(netdev); + rtnl_unlock(); } - err = mlx5e_create_rqts(priv); - if (err) { - mlx5_core_warn(mdev, "create rqts failed, %d\n", err); - goto err_close_drop_rq; + mlx5e_enable_async_events(priv); + queue_work(priv->wq, &priv->set_rx_mode_work); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); + rep.load = mlx5e_nic_rep_load; + rep.unload = mlx5e_nic_rep_unload; + rep.vport = 0; + rep.priv_data = priv; + mlx5_eswitch_register_vport_rep(esw, &rep); + } +} + +static void mlx5e_nic_disable(struct mlx5e_priv *priv) +{ + queue_work(priv->wq, &priv->set_rx_mode_work); + mlx5e_disable_async_events(priv); +} + +static const struct mlx5e_profile mlx5e_nic_profile = { + .init = mlx5e_nic_init, + .cleanup = mlx5e_nic_cleanup, + .init_rx = mlx5e_init_nic_rx, + .cleanup_rx = mlx5e_cleanup_nic_rx, + .init_tx = mlx5e_init_nic_tx, + .cleanup_tx = mlx5e_cleanup_nic_tx, + .enable = mlx5e_nic_enable, + .disable = mlx5e_nic_disable, + .update_stats = mlx5e_update_stats, + .max_nch = mlx5e_get_max_num_channels, + .max_tc = MLX5E_MAX_NUM_TC, +}; + +void *mlx5e_create_netdev(struct mlx5_core_dev *mdev, + const struct mlx5e_profile *profile, void *ppriv) +{ + struct net_device *netdev; + struct mlx5e_priv *priv; + int nch = profile->max_nch(mdev); + int err; + + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + nch * profile->max_tc, + nch); + if (!netdev) { + mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); + return NULL; } - err = mlx5e_create_tirs(priv); + profile->init(mdev, netdev, profile, ppriv); + + netif_carrier_off(netdev); + + priv = netdev_priv(netdev); + + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) + goto err_free_netdev; + + err = mlx5e_create_umr_mkey(priv); if (err) { - mlx5_core_warn(mdev, "create tirs failed, %d\n", err); - goto err_destroy_rqts; + mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); + goto err_destroy_wq; } - err = mlx5e_create_flow_steering(priv); + err = profile->init_tx(priv); + if (err) + goto err_destroy_umr_mkey; + + err = mlx5e_open_drop_rq(priv); if (err) { - mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_tirs; + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_cleanup_tx; } + err = profile->init_rx(priv); + if (err) + goto err_close_drop_rq; + mlx5e_create_q_counter(priv); mlx5e_init_l2_addr(priv); - mlx5e_vxlan_init(priv); - - err = mlx5e_tc_init(priv); - if (err) - goto err_dealloc_q_counters; - -#ifdef CONFIG_MLX5_CORE_EN_DCB - mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); -#endif + mlx5e_set_dev_port_mtu(netdev); err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_tc_cleanup; + goto err_dealloc_q_counters; } - if (mlx5e_vxlan_allowed(mdev)) { - rtnl_lock(); - vxlan_get_rx_port(netdev); - rtnl_unlock(); - } - - mlx5e_enable_async_events(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); + if (profile->enable) + profile->enable(priv); return priv; -err_tc_cleanup: - mlx5e_tc_cleanup(priv); - err_dealloc_q_counters: mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - -err_destroy_tirs: - mlx5e_destroy_tirs(priv); - -err_destroy_rqts: - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv); err_close_drop_rq: mlx5e_close_drop_rq(priv); -err_destroy_tises: - mlx5e_destroy_tises(priv); +err_cleanup_tx: + profile->cleanup_tx(priv); err_destroy_umr_mkey: mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); -err_destroy_mkey: - mlx5_core_destroy_mkey(mdev, &priv->mkey); - -err_dealloc_transport_domain: - mlx5_core_dealloc_transport_domain(mdev, priv->tdn); - -err_dealloc_pd: - mlx5_core_dealloc_pd(mdev, priv->pdn); - -err_unmap_free_uar: - mlx5_unmap_free_uar(mdev, &priv->cq_uar); - err_destroy_wq: destroy_workqueue(priv->wq); @@ -3271,15 +3484,63 @@ err_free_netdev: return NULL; } -static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) +static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev) { - struct mlx5e_priv *priv = vpriv; + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + int vport; + u8 mac[ETH_ALEN]; + + if (!MLX5_CAP_GEN(mdev, vport_group_manager)) + return; + + mlx5_query_nic_vport_mac_address(mdev, 0, mac); + + for (vport = 1; vport < total_vfs; vport++) { + struct mlx5_eswitch_rep rep; + + rep.load = mlx5e_vport_rep_load; + rep.unload = mlx5e_vport_rep_unload; + rep.vport = vport; + ether_addr_copy(rep.hw_id, mac); + mlx5_eswitch_register_vport_rep(esw, &rep); + } +} + +static void *mlx5e_add(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + void *ppriv = NULL; + void *ret; + + if (mlx5e_check_required_hca_cap(mdev)) + return NULL; + + if (mlx5e_create_mdev_resources(mdev)) + return NULL; + + mlx5e_register_vport_rep(mdev); + + if (MLX5_CAP_GEN(mdev, vport_group_manager)) + ppriv = &esw->offloads.vport_reps[0]; + + ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv); + if (!ret) { + mlx5e_destroy_mdev_resources(mdev); + return NULL; + } + return ret; +} + +void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv) +{ + const struct mlx5e_profile *profile = priv->profile; struct net_device *netdev = priv->netdev; set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (profile->disable) + profile->disable(priv); - queue_work(priv->wq, &priv->set_rx_mode_work); - mlx5e_disable_async_events(priv); flush_workqueue(priv->wq); if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { netif_device_detach(netdev); @@ -3288,26 +3549,35 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) unregister_netdev(netdev); } - mlx5e_tc_cleanup(priv); - mlx5e_vxlan_cleanup(priv); mlx5e_destroy_q_counter(priv); - mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_tirs(priv); - mlx5e_destroy_rqts(priv); + profile->cleanup_rx(priv); mlx5e_close_drop_rq(priv); - mlx5e_destroy_tises(priv); + profile->cleanup_tx(priv); mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); - mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); - mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); - mlx5_core_dealloc_pd(priv->mdev, priv->pdn); - mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); cancel_delayed_work_sync(&priv->update_stats_work); destroy_workqueue(priv->wq); + if (profile->cleanup) + profile->cleanup(priv); if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) free_netdev(netdev); } +static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + int total_vfs = MLX5_TOTAL_VPORTS(mdev); + struct mlx5e_priv *priv = vpriv; + int vport; + + mlx5e_destroy_netdev(mdev, priv); + + for (vport = 1; vport < total_vfs; vport++) + mlx5_eswitch_unregister_vport_rep(esw, vport); + + mlx5e_destroy_mdev_resources(mdev); +} + static void *mlx5e_get_netdev(void *vpriv) { struct mlx5e_priv *priv = vpriv; @@ -3316,8 +3586,8 @@ static void *mlx5e_get_netdev(void *vpriv) } static struct mlx5_interface mlx5e_interface = { - .add = mlx5e_create_netdev, - .remove = mlx5e_destroy_netdev, + .add = mlx5e_add, + .remove = mlx5e_remove, .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, .get_dev = mlx5e_get_netdev, @@ -3325,6 +3595,7 @@ static struct mlx5_interface mlx5e_interface = { void mlx5e_init(void) { + mlx5e_build_ptys2ethtool_map(); mlx5_register_interface(&mlx5e_interface); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c new file mode 100644 index 000000000..134de4a11 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -0,0 +1,431 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <generated/utsrelease.h> +#include <linux/mlx5/fs.h> +#include <net/switchdev.h> +#include <net/pkt_cls.h> + +#include "eswitch.h" +#include "en.h" +#include "en_tc.h" + +static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; + +static void mlx5e_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, mlx5e_rep_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static const struct counter_desc sw_rep_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, +}; + +#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc) + +static void mlx5e_rep_get_strings(struct net_device *dev, + u32 stringset, uint8_t *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + strcpy(data + (i * ETH_GSTRING_LEN), + sw_rep_stats_desc[i].format); + break; + } +} + +static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_sw_stats *s = &priv->stats.sw; + struct mlx5e_rq_stats *rq_stats; + struct mlx5e_sq_stats *sq_stats; + int i, j; + + memset(s, 0, sizeof(*s)); + for (i = 0; i < priv->params.num_channels; i++) { + rq_stats = &priv->channel[i]->rq.stats; + + s->rx_packets += rq_stats->packets; + s->rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->params.num_tc; j++) { + sq_stats = &priv->channel[i]->sq[j].stats; + + s->tx_packets += sq_stats->packets; + s->tx_bytes += sq_stats->bytes; + } + } +} + +static void mlx5e_rep_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int i; + + if (!data) + return; + + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_update_sw_rep_counters(priv); + mutex_unlock(&priv->state_lock); + + for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++) + data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, + sw_rep_stats_desc, i); +} + +static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return NUM_VPORT_REP_COUNTERS; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops mlx5e_rep_ethtool_ops = { + .get_drvinfo = mlx5e_rep_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlx5e_rep_get_strings, + .get_sset_count = mlx5e_rep_get_sset_count, + .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, +}; + +int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + + if (esw->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, rep->hw_id); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) + +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5e_channel *c; + int n, tc, err, num_sqs = 0; + u16 *sqs; + + sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL); + if (!sqs) + return -ENOMEM; + + for (n = 0; n < priv->params.num_channels; n++) { + c = priv->channel[n]; + for (tc = 0; tc < c->num_tc; tc++) + sqs[num_sqs++] = c->sq[tc].sqn; + } + + err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); + + kfree(sqs); + return err; +} + +int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = rep->priv_data; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + return mlx5e_add_sqs_fwd_rules(priv); + return 0; +} + +void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + + mlx5_eswitch_sqs2vport_stop(esw, rep); +} + +void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = rep->priv_data; + + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_remove_sqs_fwd_rules(priv); + + /* clean (and re-init) existing uplink offloaded TC rules */ + mlx5e_tc_cleanup(priv); + mlx5e_tc_init(priv); +} + +static int mlx5e_rep_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_eswitch_rep *rep = priv->ppriv; + int ret; + + ret = snprintf(buf, len, "%d", rep->vport - 1); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + return -EOPNOTSUPP; + + switch (tc->type) { + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, tc->cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, tc->cls_flower); + } + default: + return -EOPNOTSUPP; + } +} + +static const struct switchdev_ops mlx5e_rep_switchdev_ops = { + .switchdev_port_attr_get = mlx5e_attr_get, +}; + +static const struct net_device_ops mlx5e_netdev_ops_rep = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, + .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, + .ndo_get_stats64 = mlx5e_get_stats, +}; + +static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? + MLX5_CQ_PERIOD_MODE_START_FROM_CQE : + MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + + priv->params.log_sq_size = + MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; + priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST; + priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + + priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, + BIT(priv->params.log_rq_size)); + + priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); + mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode); + + priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); + priv->params.num_tc = 1; + + priv->params.lro_wqe_sz = + MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + priv->mdev = mdev; + priv->netdev = netdev; + priv->params.num_channels = profile->max_nch(mdev); + priv->profile = profile; + priv->ppriv = ppriv; + + mutex_init(&priv->state_lock); + + INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work); +} + +static void mlx5e_build_rep_netdev(struct net_device *netdev) +{ + netdev->netdev_ops = &mlx5e_netdev_ops_rep; + + netdev->watchdog_timeo = 15 * HZ; + + netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; + +#ifdef CONFIG_NET_SWITCHDEV + netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; +#endif + + netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; + netdev->hw_features |= NETIF_F_HW_TC; + + eth_hw_addr_random(netdev); +} + +static void mlx5e_init_rep(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv); + mlx5e_build_rep_netdev(netdev); +} + +static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_flow_rule *flow_rule; + int err; + int i; + + err = mlx5e_create_direct_rqts(priv); + if (err) { + mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err); + return err; + } + + err = mlx5e_create_direct_tirs(priv); + if (err) { + mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err); + goto err_destroy_direct_rqts; + } + + flow_rule = mlx5_eswitch_create_vport_rx_rule(esw, + rep->vport, + priv->direct_tir[0].tirn); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + goto err_destroy_direct_tirs; + } + rep->vport_rx_rule = flow_rule; + + err = mlx5e_tc_init(priv); + if (err) + goto err_del_flow_rule; + + return 0; + +err_del_flow_rule: + mlx5_del_flow_rule(rep->vport_rx_rule); +err_destroy_direct_tirs: + mlx5e_destroy_direct_tirs(priv); +err_destroy_direct_rqts: + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); + return err; +} + +static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) +{ + struct mlx5_eswitch_rep *rep = priv->ppriv; + int i; + + mlx5e_tc_cleanup(priv); + mlx5_del_flow_rule(rep->vport_rx_rule); + mlx5e_destroy_direct_tirs(priv); + for (i = 0; i < priv->params.num_channels; i++) + mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); +} + +static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) +{ + int err; + + err = mlx5e_create_tises(priv); + if (err) { + mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); + return err; + } + return 0; +} + +static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev) +{ +#define MLX5E_PORT_REPRESENTOR_NCH 1 + return MLX5E_PORT_REPRESENTOR_NCH; +} + +static struct mlx5e_profile mlx5e_rep_profile = { + .init = mlx5e_init_rep, + .init_rx = mlx5e_init_rep_rx, + .cleanup_rx = mlx5e_cleanup_rep_rx, + .init_tx = mlx5e_init_rep_tx, + .cleanup_tx = mlx5e_cleanup_nic_tx, + .update_stats = mlx5e_update_sw_rep_counters, + .max_nch = mlx5e_get_rep_max_num_channels, + .max_tc = 1, +}; + +int mlx5e_vport_rep_load(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep); + if (!rep->priv_data) { + pr_warn("Failed to create representor for vport %d\n", + rep->vport); + return -EINVAL; + } + return 0; +} + +void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_priv *priv = rep->priv_data; + + mlx5e_destroy_netdev(esw->dev, priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e41a06675..e7c969df3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, } } -static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) +static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix) { - return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + + return rq->mpwqe_mtt_offset + wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); } @@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5_wqe_data_seg *dseg = &wqe->data; struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); - u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); + u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix); memset(wqe, 0, sizeof(*wqe)); cseg->opmod_idx_opcode = @@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; ucseg->klm_octowords = - cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); + cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); ucseg->bsf_octowords = - cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); + cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset)); ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); dseg->lkey = sq->mkey_be; @@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, { struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; int mtt_sz = mlx5e_get_wqe_mtt_sz(); - u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; + u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT; int i; wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * @@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + + if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) { + mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]); + return; + } + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); rq->stats.mpwqe_frag++; @@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) wi->free_wqe(rq, wi); } -void mlx5e_free_rx_descs(struct mlx5e_rq *rq) -{ - struct mlx5_wq_ll *wq = &rq->wq; - struct mlx5e_rx_wqe *wqe; - __be16 wqe_ix_be; - u16 wqe_ix; - - while (!mlx5_wq_ll_is_empty(wq)) { - wqe_ix_be = *wq->tail_next; - wqe_ix = be16_to_cpu(wqe_ix_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix); - rq->dealloc_wqe(rq, wqe_ix); - mlx5_wq_ll_pop(&rq->wq, wqe_ix_be, - &wqe->next.next_wqe_index); - } -} - #define RQ_CANNOT_POST(rq) \ - (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ - test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \ + test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { @@ -924,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); int work_done = 0; - if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state))) + if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) return 0; if (cq->decmprs_left) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c new file mode 100644 index 000000000..1fffe48a9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +/* Adaptive moderation profiles */ +#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 +#define MLX5E_RX_AM_DEF_PROFILE_CQE 1 +#define MLX5E_RX_AM_DEF_PROFILE_EQE 1 +#define MLX5E_PARAMS_AM_NUM_PROFILES 5 + +/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */ +#define MLX5_AM_EQE_PROFILES { \ + {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ + {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ +} + +#define MLX5_AM_CQE_PROFILES { \ + {2, 256}, \ + {8, 128}, \ + {16, 64}, \ + {32, 64}, \ + {64, 64} \ +} + +static const struct mlx5e_cq_moder +profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = { + MLX5_AM_EQE_PROFILES, + MLX5_AM_CQE_PROFILES, +}; + +static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix) +{ + return profile[cq_period_mode][ix]; +} + +struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) +{ + int default_profile_ix; + + if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE; + else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */ + default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE; + + return profile[rx_cq_period_mode][default_profile_ix]; +} + +/* Adaptive moderation logic */ +enum { + MLX5E_AM_START_MEASURE, + MLX5E_AM_MEASURE_IN_PROGRESS, + MLX5E_AM_APPLY_NEW_PROFILE, +}; + +enum { + MLX5E_AM_PARKING_ON_TOP, + MLX5E_AM_PARKING_TIRED, + MLX5E_AM_GOING_RIGHT, + MLX5E_AM_GOING_LEFT, +}; + +enum { + MLX5E_AM_STATS_WORSE, + MLX5E_AM_STATS_SAME, + MLX5E_AM_STATS_BETTER, +}; + +enum { + MLX5E_AM_STEPPED, + MLX5E_AM_TOO_TIRED, + MLX5E_AM_ON_EDGE, +}; + +static bool mlx5e_am_on_top(struct mlx5e_rx_am *am) +{ + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n"); + return true; + case MLX5E_AM_GOING_RIGHT: + return (am->steps_left > 1) && (am->steps_right == 1); + default: /* MLX5E_AM_GOING_LEFT */ + return (am->steps_right > 1) && (am->steps_left == 1); + } +} + +static void mlx5e_am_turn(struct mlx5e_rx_am *am) +{ + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_turn: PARKING\n"); + break; + case MLX5E_AM_GOING_RIGHT: + am->tune_state = MLX5E_AM_GOING_LEFT; + am->steps_left = 0; + break; + case MLX5E_AM_GOING_LEFT: + am->tune_state = MLX5E_AM_GOING_RIGHT; + am->steps_right = 0; + break; + } +} + +static int mlx5e_am_step(struct mlx5e_rx_am *am) +{ + if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2)) + return MLX5E_AM_TOO_TIRED; + + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + case MLX5E_AM_PARKING_TIRED: + WARN_ONCE(true, "mlx5e_am_step: PARKING\n"); + break; + case MLX5E_AM_GOING_RIGHT: + if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1)) + return MLX5E_AM_ON_EDGE; + am->profile_ix++; + am->steps_right++; + break; + case MLX5E_AM_GOING_LEFT: + if (am->profile_ix == 0) + return MLX5E_AM_ON_EDGE; + am->profile_ix--; + am->steps_left++; + break; + } + + am->tired++; + return MLX5E_AM_STEPPED; +} + +static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am) +{ + am->steps_right = 0; + am->steps_left = 0; + am->tired = 0; + am->tune_state = MLX5E_AM_PARKING_ON_TOP; +} + +static void mlx5e_am_park_tired(struct mlx5e_rx_am *am) +{ + am->steps_right = 0; + am->steps_left = 0; + am->tune_state = MLX5E_AM_PARKING_TIRED; +} + +static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) +{ + am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT : + MLX5E_AM_GOING_RIGHT; + mlx5e_am_step(am); +} + +static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, + struct mlx5e_rx_am_stats *prev) +{ + int diff; + + if (!prev->ppms) + return curr->ppms ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_SAME; + + diff = curr->ppms - prev->ppms; + if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ + return (diff > 0) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; + + if (!prev->epms) + return curr->epms ? MLX5E_AM_STATS_WORSE : + MLX5E_AM_STATS_SAME; + + diff = curr->epms - prev->epms; + if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ + return (diff < 0) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; + + return MLX5E_AM_STATS_SAME; +} + +static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats, + struct mlx5e_rx_am *am) +{ + int prev_state = am->tune_state; + int prev_ix = am->profile_ix; + int stats_res; + int step_res; + + switch (am->tune_state) { + case MLX5E_AM_PARKING_ON_TOP: + stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); + if (stats_res != MLX5E_AM_STATS_SAME) + mlx5e_am_exit_parking(am); + break; + + case MLX5E_AM_PARKING_TIRED: + am->tired--; + if (!am->tired) + mlx5e_am_exit_parking(am); + break; + + case MLX5E_AM_GOING_RIGHT: + case MLX5E_AM_GOING_LEFT: + stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats); + if (stats_res != MLX5E_AM_STATS_BETTER) + mlx5e_am_turn(am); + + if (mlx5e_am_on_top(am)) { + mlx5e_am_park_on_top(am); + break; + } + + step_res = mlx5e_am_step(am); + switch (step_res) { + case MLX5E_AM_ON_EDGE: + mlx5e_am_park_on_top(am); + break; + case MLX5E_AM_TOO_TIRED: + mlx5e_am_park_tired(am); + break; + } + + break; + } + + if ((prev_state != MLX5E_AM_PARKING_ON_TOP) || + (am->tune_state != MLX5E_AM_PARKING_ON_TOP)) + am->prev_stats = *curr_stats; + + return am->profile_ix != prev_ix; +} + +static void mlx5e_am_sample(struct mlx5e_rq *rq, + struct mlx5e_rx_am_sample *s) +{ + s->time = ktime_get(); + s->pkt_ctr = rq->stats.packets; + s->event_ctr = rq->cq.event_ctr; +} + +#define MLX5E_AM_NEVENTS 64 + +static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, + struct mlx5e_rx_am_sample *end, + struct mlx5e_rx_am_stats *curr_stats) +{ + /* u32 holds up to 71 minutes, should be enough */ + u32 delta_us = ktime_us_delta(end->time, start->time); + unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + + if (!delta_us) { + WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n"); + return; + } + + curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; + curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; +} + +void mlx5e_rx_am_work(struct work_struct *work) +{ + struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am, + work); + struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am); + struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix]; + + mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq, + cur_profile.usec, cur_profile.pkts); + + am->state = MLX5E_AM_START_MEASURE; +} + +void mlx5e_rx_am(struct mlx5e_rq *rq) +{ + struct mlx5e_rx_am *am = &rq->am; + struct mlx5e_rx_am_sample end_sample; + struct mlx5e_rx_am_stats curr_stats; + u16 nevents; + + switch (am->state) { + case MLX5E_AM_MEASURE_IN_PROGRESS: + nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + if (nevents < MLX5E_AM_NEVENTS) + break; + mlx5e_am_sample(rq, &end_sample); + mlx5e_am_calc_stats(&am->start_sample, &end_sample, + &curr_stats); + if (mlx5e_am_decision(&curr_stats, am)) { + am->state = MLX5E_AM_APPLY_NEW_PROFILE; + schedule_work(&am->work); + break; + } + /* fall through */ + case MLX5E_AM_START_MEASURE: + mlx5e_am_sample(rq, &am->start_sample); + am->state = MLX5E_AM_MEASURE_IN_PROGRESS; + break; + case MLX5E_AM_APPLY_NEW_PROFILE: + break; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index fcd490cc5..499487ce3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -70,6 +70,7 @@ struct mlx5e_sw_stats { u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; + u64 tx_xmit_more; u64 rx_wqe_err; u64 rx_mpwqe_filler; u64 rx_mpwqe_frag; @@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, @@ -151,6 +153,22 @@ static const struct counter_desc vport_stats_desc[] = { VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, { "tx_vport_broadcast_bytes", VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, + { "rx_vport_rdma_unicast_packets", + VPORT_COUNTER_OFF(received_ib_unicast.packets) }, + { "rx_vport_rdma_unicast_bytes", + VPORT_COUNTER_OFF(received_ib_unicast.octets) }, + { "tx_vport_rdma_unicast_packets", + VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, + { "tx_vport_rdma_unicast_bytes", + VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, + { "rx_vport_rdma_multicast_packets", + VPORT_COUNTER_OFF(received_ib_multicast.packets) }, + { "rx_vport_rdma_multicast_bytes", + VPORT_COUNTER_OFF(received_ib_multicast.octets) }, + { "tx_vport_rdma_multicast_packets", + VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, + { "tx_vport_rdma_multicast_bytes", + VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, }; #define PPORT_802_3_OFF(c) \ @@ -238,11 +256,12 @@ static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { }; static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { - { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) }, - { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, - { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) }, - { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, - { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, + /* %s is "global" or "prio{i}" */ + { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, + { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, + { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, + { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, + { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; struct mlx5e_rq_stats { @@ -281,6 +300,7 @@ struct mlx5e_sq_stats { /* commonly accessed in data path */ u64 packets; u64 bytes; + u64 xmit_more; u64 tso_packets; u64 tso_bytes; u64 tso_inner_packets; @@ -307,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 0db51cc39..22cfc4ac1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -37,8 +37,11 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> +#include <net/switchdev.h> +#include <net/tc_act/tc_mirred.h> #include "en.h" #include "en_tc.h" +#include "eswitch.h" struct mlx5e_tc_flow { struct rhash_head node; @@ -49,9 +52,9 @@ struct mlx5e_tc_flow { #define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_GROUPS 4 -static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, - u32 *match_c, u32 *match_v, - u32 action, u32 flow_tag) +static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + u32 action, u32 flow_tag) { struct mlx5_core_dev *dev = priv->mdev; struct mlx5_flow_destination dest = { 0 }; @@ -62,7 +65,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = priv->fs.vlan.ft.t; - } else { + } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { counter = mlx5_fc_create(dev, true); if (IS_ERR(counter)) return ERR_CAST(counter); @@ -88,8 +91,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, table_created = true; } - rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, - match_c, match_v, + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + rule = mlx5_add_flow_rule(priv->fs.tc.t, spec, action, flow_tag, &dest); @@ -109,6 +112,22 @@ err_create_ft: return rule; } +static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + u32 action, u32 dst_vport) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct mlx5_eswitch_rep *rep = priv->ppriv; + u32 src_vport; + + if (rep->vport) /* set source vport for the flow */ + src_vport = rep->vport; + else + src_vport = FDB_UPLINK_VPORT; + + return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport); +} + static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5_flow_rule *rule) { @@ -120,18 +139,19 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv)) { + if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } } -static int parse_cls_flower(struct mlx5e_priv *priv, - u32 *match_c, u32 *match_v, +static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f) { - void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); - void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); + void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + outer_headers); u16 addr_type = 0; u8 ip_proto = 0; @@ -294,10 +314,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, return 0; } -static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, - u32 *action, u32 *flow_tag) +static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + u32 *action, u32 *flow_tag) { const struct tc_action *a; + LIST_HEAD(actions); if (tc_no_actions(exts)) return -EINVAL; @@ -305,7 +326,8 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; *action = 0; - tc_for_each_action(a, exts) { + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { /* Only support a single action per rule */ if (*action) return -EINVAL; @@ -338,17 +360,68 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return 0; } +static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + u32 *action, u32 *dest_vport) +{ + const struct tc_action *a; + LIST_HEAD(actions); + + if (tc_no_actions(exts)) + return -EINVAL; + + *action = 0; + + tcf_exts_to_list(exts, &actions); + list_for_each_entry(a, &actions, list) { + /* Only support a single action per rule */ + if (*action) + return -EINVAL; + + if (is_tcf_gact_shot(a)) { + *action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + continue; + } + + if (is_tcf_mirred_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out_dev; + struct mlx5e_priv *out_priv; + struct mlx5_eswitch_rep *out_rep; + + out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); + + if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) { + pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", + priv->netdev->name, out_dev->name); + return -EINVAL; + } + + out_priv = netdev_priv(out_dev); + out_rep = out_priv->ppriv; + if (out_rep->vport == 0) + *dest_vport = FDB_UPLINK_VPORT; + else + *dest_vport = out_rep->vport; + *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + continue; + } + + return -EINVAL; + } + return 0; +} + int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, struct tc_cls_flower_offload *f) { struct mlx5e_tc_table *tc = &priv->fs.tc; - u32 *match_c; - u32 *match_v; int err = 0; - u32 flow_tag; - u32 action; + u32 flow_tag, action, dest_vport = 0; struct mlx5e_tc_flow *flow; + struct mlx5_flow_spec *spec; struct mlx5_flow_rule *old = NULL; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, tc->ht_params); @@ -357,49 +430,53 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, else flow = kzalloc(sizeof(*flow), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_c || !match_v || !flow) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec || !flow) { err = -ENOMEM; goto err_free; } flow->cookie = f->cookie; - err = parse_cls_flower(priv, match_c, match_v, f); + err = parse_cls_flower(priv, spec, f); if (err < 0) goto err_free; - err = parse_tc_actions(priv, f->exts, &action, &flow_tag); - if (err < 0) + if (esw && esw->mode == SRIOV_OFFLOADS) { + err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport); + if (err < 0) + goto err_free; + flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport); + } else { + err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag); + if (err < 0) + goto err_free; + flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag); + } + + if (IS_ERR(flow->rule)) { + err = PTR_ERR(flow->rule); goto err_free; + } err = rhashtable_insert_fast(&tc->ht, &flow->node, tc->ht_params); if (err) - goto err_free; - - flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, - flow_tag); - if (IS_ERR(flow->rule)) { - err = PTR_ERR(flow->rule); - goto err_hash_del; - } + goto err_del_rule; if (old) mlx5e_tc_del_flow(priv, old); goto out; -err_hash_del: - rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); +err_del_rule: + mlx5_del_flow_rule(flow->rule); err_free: if (!old) kfree(flow); out: - kfree(match_c); - kfree(match_v); + kvfree(spec); return err; } @@ -430,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow; struct tc_action *a; struct mlx5_fc *counter; + LIST_HEAD(actions); u64 bytes; u64 packets; u64 lastuse; @@ -445,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); - tc_for_each_action(a, f->exts) + tcf_exts_to_list(f->exts, &actions); + list_for_each_entry(a, &actions, list) tcf_action_stats_update(a, bytes, packets, lastuse); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 5740b465e..eb0e72537 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -128,6 +128,50 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, return priv->channeltc_to_txq_map[channel_ix][up]; } +static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb) +{ +#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) + + return max(skb_network_offset(skb), MLX5E_MIN_INLINE); +} + +static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb) +{ + struct flow_keys keys; + + if (skb_transport_header_was_set(skb)) + return skb_transport_offset(skb); + else if (skb_flow_dissect_flow_keys(skb, &keys, 0)) + return keys.control.thoff; + else + return mlx5e_skb_l2_header_offset(skb); +} + +static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode, + struct sk_buff *skb) +{ + int hlen; + + switch (mode) { + case MLX5_INLINE_MODE_TCP_UDP: + hlen = eth_get_headlen(skb->data, skb_headlen(skb)); + if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb)) + hlen += VLAN_HLEN; + return hlen; + case MLX5_INLINE_MODE_IP: + /* When transport header is set to zero, it means no transport + * header. When transport header is set to 0xff's, it means + * transport header wasn't set. + */ + if (skb_transport_offset(skb)) + return mlx5e_skb_l3_header_offset(skb); + /* fall through */ + case MLX5_INLINE_MODE_L2: + default: + return mlx5e_skb_l2_header_offset(skb); + } +} + static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, struct sk_buff *skb, bool bf) { @@ -135,8 +179,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, * headers and occur before the data gather. * Therefore these headers must be copied into the WQE */ -#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) - if (bf) { u16 ihs = skb_headlen(skb); @@ -146,8 +188,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, if (ihs <= sq->max_inline) return skb_headlen(skb); } - - return max(skb_network_offset(skb), MLX5E_MIN_INLINE); + return mlx5e_calc_min_inline(sq->min_inline_mode, skb); } static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, @@ -315,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) sq->stats.stopped++; } + sq->stats.xmit_more += skb->xmit_more; if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { int bf_sz = 0; @@ -353,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb); } -void mlx5e_free_tx_descs(struct mlx5e_sq *sq) -{ - struct mlx5e_tx_wqe_info *wi; - struct sk_buff *skb; - u16 ci; - int i; - - while (sq->cc != sq->pc) { - ci = sq->cc & sq->wq.sz_m1; - skb = sq->skb[ci]; - wi = &sq->wqe_info[ci]; - - if (!skb) { /* nop */ - sq->cc++; - continue; - } - - for (i = 0; i < wi->num_dma; i++) { - struct mlx5e_sq_dma *dma = - mlx5e_dma_get(sq, sq->dma_fifo_cc++); - - mlx5e_tx_dma_unmap(sq->pdev, dma); - } - - dev_kfree_skb_any(skb); - sq->cc += wi->num_wqebbs; - } -} - bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq *sq; @@ -393,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) sq = container_of(cq, struct mlx5e_sq, cq); - if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) + if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) return false; npkts = 0; @@ -471,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) netdev_tx_completed_queue(sq->txq, npkts, nbytes); if (netif_tx_queue_stopped(sq->txq) && - mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && - likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { - netif_tx_wake_queue(sq->txq); - sq->stats.wake++; + mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) { + netif_tx_wake_queue(sq->txq); + sq->stats.wake++; } return (i == MLX5E_TX_CQ_POLL_BUDGET); } + +void mlx5e_free_tx_descs(struct mlx5e_sq *sq) +{ + struct mlx5e_tx_wqe_info *wi; + struct sk_buff *skb; + u16 ci; + int i; + + while (sq->cc != sq->pc) { + ci = sq->cc & sq->wq.sz_m1; + skb = sq->skb[ci]; + wi = &sq->wqe_info[ci]; + + if (!skb) { /* nop */ + sq->cc++; + continue; + } + + for (i = 0; i < wi->num_dma; i++) { + struct mlx5e_sq_dma *dma = + mlx5e_dma_get(sq, sq->dma_fifo_cc++); + + mlx5e_tx_dma_unmap(sq->pdev, dma); + } + + dev_kfree_skb_any(skb); + sq->cc += wi->num_wqebbs; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index c38781fa5..9bf33bb69 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) { + struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq); struct mlx5_wq_cyc *wq; struct mlx5_cqe64 *cqe; - struct mlx5e_sq *sq; u16 sqcc; + if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state))) + return; + cqe = mlx5e_get_cqe(cq); if (likely(!cqe)) return; - sq = container_of(cq, struct mlx5e_sq, cq); wq = &sq->wq; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), @@ -136,6 +138,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); + + if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state)) + mlx5e_rx_am(&c->rq); + mlx5e_cq_arm(&c->rq.cq); mlx5e_cq_arm(&c->icosq.cq); @@ -146,6 +152,7 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) { struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); + cq->event_ctr++; set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); napi_schedule(cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index aebbd6ccb..b247949df 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -40,17 +40,6 @@ #define UPLINK_VPORT 0xFFFF -#define MLX5_DEBUG_ESWITCH_MASK BIT(3) - -#define esw_info(dev, format, ...) \ - pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) - -#define esw_warn(dev, format, ...) \ - pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) - -#define esw_debug(dev, format, ...) \ - mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) - enum { MLX5_ACTION_NONE = 0, MLX5_ACTION_ADD = 1, @@ -92,6 +81,9 @@ enum { MC_ADDR_CHANGE | \ PROMISC_CHANGE) +int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); +void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); + static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) { @@ -337,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, MLX5_MATCH_OUTER_HEADERS); struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_destination dest; + struct mlx5_flow_spec *spec; void *mv_misc = NULL; void *mc_misc = NULL; u8 *dmac_v = NULL; u8 *dmac_c = NULL; - u32 *match_v; - u32 *match_c; if (rx_rule) match_header |= MLX5_MATCH_MISC_PARAMETERS; - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_v || !match_c) { + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { pr_warn("FDB: Failed to alloc match parameters\n"); - goto out; + return NULL; } - - dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, + dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers.dmac_47_16); - dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, + dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers.dmac_47_16); if (match_header & MLX5_MATCH_OUTER_HEADERS) { @@ -364,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, } if (match_header & MLX5_MATCH_MISC_PARAMETERS) { - mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); - mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); + mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); } @@ -376,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, esw_debug(esw->dev, "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", dmac_v, dmac_c, vport); + spec->match_criteria_enable = match_header; flow_rule = - mlx5_add_flow_rule(esw->fdb_table.fdb, - match_header, - match_c, - match_v, + mlx5_add_flow_rule(esw->fdb_table.fdb, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); if (IS_ERR(flow_rule)) { @@ -389,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); flow_rule = NULL; } -out: - kfree(match_v); - kfree(match_c); + + kvfree(spec); return flow_rule; } @@ -428,7 +417,7 @@ esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); } -static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) +static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_core_dev *dev = esw->dev; @@ -479,7 +468,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create flow group err(%d)\n", err); goto out; } - esw->fdb_table.addr_grp = g; + esw->fdb_table.legacy.addr_grp = g; /* Allmulti group : One rule that forwards any mcast traffic */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -494,7 +483,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); goto out; } - esw->fdb_table.allmulti_grp = g; + esw->fdb_table.legacy.allmulti_grp = g; /* Promiscuous group : * One rule that forward all unmatched traffic from previous groups @@ -511,17 +500,17 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); goto out; } - esw->fdb_table.promisc_grp = g; + esw->fdb_table.legacy.promisc_grp = g; out: if (err) { - if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) { - mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); - esw->fdb_table.allmulti_grp = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + esw->fdb_table.legacy.allmulti_grp = NULL; } - if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) { - mlx5_destroy_flow_group(esw->fdb_table.addr_grp); - esw->fdb_table.addr_grp = NULL; + if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); + esw->fdb_table.legacy.addr_grp = NULL; } if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { mlx5_destroy_flow_table(esw->fdb_table.fdb); @@ -533,20 +522,20 @@ out: return err; } -static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) +static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) { if (!esw->fdb_table.fdb) return; esw_debug(esw->dev, "Destroy FDB Table\n"); - mlx5_destroy_flow_group(esw->fdb_table.promisc_grp); - mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); - mlx5_destroy_flow_group(esw->fdb_table.addr_grp); + mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp); + mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp); + mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp); mlx5_destroy_flow_table(esw->fdb_table.fdb); esw->fdb_table.fdb = NULL; - esw->fdb_table.addr_grp = NULL; - esw->fdb_table.allmulti_grp = NULL; - esw->fdb_table.promisc_grp = NULL; + esw->fdb_table.legacy.addr_grp = NULL; + esw->fdb_table.legacy.allmulti_grp = NULL; + esw->fdb_table.legacy.promisc_grp = NULL; } /* E-Switch vport UC/MC lists management */ @@ -578,7 +567,8 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) if (err) goto abort; - if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */ + /* SRIOV is enabled: Forward UC MAC to vport */ + if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", @@ -1300,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_flow_spec *spec; u8 smac[ETH_ALEN]; - u32 *match_v; - u32 *match_c; int err = 0; u8 *smac_v; @@ -1336,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", vport->vport, vport->vlan, vport->qos); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_v || !match_c) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { err = -ENOMEM; esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", vport->vport, err); @@ -1346,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } if (vport->vlan || vport->qos) - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); if (vport->spoofchk) { - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0); smac_v = MLX5_ADDR_OF(fte_match_param, - match_v, + spec->match_value, outer_headers.smac_47_16); ether_addr_copy(smac_v, smac); } + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; vport->ingress.allow_rule = - mlx5_add_flow_rule(vport->ingress.acl, - MLX5_MATCH_OUTER_HEADERS, - match_c, - match_v, + mlx5_add_flow_rule(vport->ingress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_ALLOW, 0, NULL); if (IS_ERR(vport->ingress.allow_rule)) { @@ -1372,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, goto out; } - memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); - memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(spec, 0, sizeof(*spec)); vport->ingress.drop_rule = - mlx5_add_flow_rule(vport->ingress.acl, - 0, - match_c, - match_v, + mlx5_add_flow_rule(vport->ingress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_DROP, 0, NULL); if (IS_ERR(vport->ingress.drop_rule)) { @@ -1392,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, out: if (err) esw_vport_cleanup_ingress_rules(esw, vport); - - kfree(match_v); - kfree(match_c); + kvfree(spec); return err; } static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { - u32 *match_v; - u32 *match_c; + struct mlx5_flow_spec *spec; int err = 0; esw_vport_cleanup_egress_rules(esw, vport); @@ -1418,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", vport->vport, vport->vlan, vport->qos); - match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); - if (!match_v || !match_c) { + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { err = -ENOMEM; esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", vport->vport, err); @@ -1428,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Allowed vlan rule */ - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); - MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); - MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan); + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; vport->egress.allowed_vlan = - mlx5_add_flow_rule(vport->egress.acl, - MLX5_MATCH_OUTER_HEADERS, - match_c, - match_v, + mlx5_add_flow_rule(vport->egress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_ALLOW, 0, NULL); if (IS_ERR(vport->egress.allowed_vlan)) { @@ -1449,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, } /* Drop others rule (star rule) */ - memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); - memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(spec, 0, sizeof(*spec)); vport->egress.drop_rule = - mlx5_add_flow_rule(vport->egress.acl, - 0, - match_c, - match_v, + mlx5_add_flow_rule(vport->egress.acl, spec, MLX5_FLOW_CONTEXT_ACTION_DROP, 0, NULL); if (IS_ERR(vport->egress.drop_rule)) { @@ -1465,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, vport->egress.drop_rule = NULL; } out: - kfree(match_v); - kfree(match_c); + kvfree(spec); return err; } @@ -1480,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); - if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ + /* Only VFs need ACLs for VST and spoofchk filtering */ + if (vport_num && esw->mode == SRIOV_LEGACY) { esw_vport_ingress_config(esw, vport); esw_vport_egress_config(esw, vport); } @@ -1531,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; - if (vport_num) { + if (vport_num && esw->mode == SRIOV_LEGACY) { esw_vport_disable_egress_acl(esw, vport); esw_vport_disable_ingress_acl(esw, vport); } @@ -1540,10 +1512,10 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ -int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; - int i; + int i, enabled_events; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) @@ -1561,16 +1533,20 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); - esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); - + esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); + esw->mode = mode; esw_disable_vport(esw, 0); - err = esw_create_fdb_table(esw, nvfs + 1); + if (mode == SRIOV_LEGACY) + err = esw_create_legacy_fdb_table(esw, nvfs + 1); + else + err = esw_offloads_init(esw, nvfs + 1); if (err) goto abort; + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; for (i = 0; i <= nvfs; i++) - esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS); + esw_enable_vport(esw, i, enabled_events); esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", esw->enabled_vports); @@ -1578,22 +1554,25 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) abort: esw_enable_vport(esw, 0, UC_ADDR_CHANGE); + esw->mode = SRIOV_NONE; return err; } void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { struct esw_mc_addr *mc_promisc; + int nvports; int i; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return; - esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", - esw->enabled_vports); + esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", + esw->enabled_vports, esw->mode); mc_promisc = esw->mc_promisc; + nvports = esw->enabled_vports; for (i = 0; i < esw->total_vports; i++) esw_disable_vport(esw, i); @@ -1601,8 +1580,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) if (mc_promisc && mc_promisc->uplink_rule) mlx5_del_flow_rule(mc_promisc->uplink_rule); - esw_destroy_fdb_table(esw); + if (esw->mode == SRIOV_LEGACY) + esw_destroy_legacy_fdb_table(esw); + else if (esw->mode == SRIOV_OFFLOADS) + esw_offloads_cleanup(esw, nvports); + esw->mode = SRIOV_NONE; /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ esw_enable_vport(esw, 0, UC_ADDR_CHANGE); } @@ -1660,6 +1643,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + esw->offloads.vport_reps = + kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep), + GFP_KERNEL); + if (!esw->offloads.vport_reps) { + err = -ENOMEM; + goto abort; + } + mutex_init(&esw->state_lock); for (vport_num = 0; vport_num < total_vports; vport_num++) { @@ -1673,6 +1664,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->total_vports = total_vports; esw->enabled_vports = 0; + esw->mode = SRIOV_NONE; dev->priv.eswitch = esw; esw_enable_vport(esw, 0, UC_ADDR_CHANGE); @@ -1683,6 +1675,7 @@ abort: destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); kfree(esw->vports); + kfree(esw->offloads.vport_reps); kfree(esw); return err; } @@ -1700,6 +1693,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); kfree(esw->mc_promisc); + kfree(esw->offloads.vport_reps); kfree(esw->vports); kfree(esw); } @@ -1775,7 +1769,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, vport, err); mutex_lock(&esw->state_lock); - if (evport->enabled) + if (evport->enabled && esw->mode == SRIOV_LEGACY) err = esw_vport_ingress_config(esw, evport); mutex_unlock(&esw->state_lock); return err; @@ -1847,7 +1841,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); evport->vlan = vlan; evport->qos = qos; - if (evport->enabled) { + if (evport->enabled && esw->mode == SRIOV_LEGACY) { err = esw_vport_ingress_config(esw, evport); if (err) goto out; @@ -1876,10 +1870,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, mutex_lock(&esw->state_lock); pschk = evport->spoofchk; evport->spoofchk = spoofchk; - if (evport->enabled) + if (evport->enabled && esw->mode == SRIOV_LEGACY) { err = esw_vport_ingress_config(esw, evport); - if (err) - evport->spoofchk = pschk; + if (err) + evport->spoofchk = pschk; + } mutex_unlock(&esw->state_lock); return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index fd6800256..a96140971 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -35,6 +35,7 @@ #include <linux/if_ether.h> #include <linux/if_link.h> +#include <net/devlink.h> #include <linux/mlx5/device.h> #define MLX5_MAX_UC_PER_VPORT(dev) \ @@ -46,6 +47,8 @@ #define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) #define MLX5_L2_ADDR_HASH(addr) (addr[5]) +#define FDB_UPLINK_VPORT 0xffff + /* L2 -mac address based- hash helpers */ struct l2addr_node { struct hlist_node hlist; @@ -134,9 +137,50 @@ struct mlx5_l2_table { struct mlx5_eswitch_fdb { void *fdb; - struct mlx5_flow_group *addr_grp; - struct mlx5_flow_group *allmulti_grp; - struct mlx5_flow_group *promisc_grp; + union { + struct legacy_fdb { + struct mlx5_flow_group *addr_grp; + struct mlx5_flow_group *allmulti_grp; + struct mlx5_flow_group *promisc_grp; + } legacy; + + struct offloads_fdb { + struct mlx5_flow_table *fdb; + struct mlx5_flow_group *send_to_vport_grp; + struct mlx5_flow_group *miss_grp; + struct mlx5_flow_rule *miss_rule; + } offloads; + }; +}; + +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +struct mlx5_esw_sq { + struct mlx5_flow_rule *send_to_vport_rule; + struct list_head list; +}; + +struct mlx5_eswitch_rep { + int (*load)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); + void (*unload)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); + u16 vport; + struct mlx5_flow_rule *vport_rx_rule; + void *priv_data; + struct list_head vport_sqs_list; + bool valid; + u8 hw_id[ETH_ALEN]; +}; + +struct mlx5_esw_offload { + struct mlx5_flow_table *ft_offloads; + struct mlx5_flow_group *vport_rx_group; + struct mlx5_eswitch_rep *vport_reps; }; struct mlx5_eswitch { @@ -153,13 +197,15 @@ struct mlx5_eswitch { */ struct mutex state_lock; struct esw_mc_addr *mc_promisc; + struct mlx5_esw_offload offloads; + int mode; }; /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); -int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs); +int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int vport, u8 mac[ETH_ALEN]); @@ -177,4 +223,36 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats); +struct mlx5_flow_spec; + +struct mlx5_flow_rule * +mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + u32 action, u32 src_vport, u32 dst_vport); +struct mlx5_flow_rule * +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); + +int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + u16 *sqns_array, int sqns_num); +void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); + +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); +int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep); +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport); + +#define MLX5_DEBUG_ESWITCH_MASK BIT(3) + +#define esw_info(dev, format, ...) \ + pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_warn(dev, format, ...) \ + pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__) + +#define esw_debug(dev, format, ...) \ + mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c new file mode 100644 index 000000000..7de40e6b0 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -0,0 +1,646 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/etherdevice.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/mlx5_ifc.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include "mlx5_core.h" +#include "eswitch.h" + +enum { + FDB_FAST_PATH = 0, + FDB_SLOW_PATH +}; + +struct mlx5_flow_rule * +mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, + struct mlx5_flow_spec *spec, + u32 action, u32 src_vport, u32 dst_vport) +{ + struct mlx5_flow_destination dest = { 0 }; + struct mlx5_fc *counter = NULL; + struct mlx5_flow_rule *rule; + void *misc; + + if (esw->mode != SRIOV_OFFLOADS) + return ERR_PTR(-EOPNOTSUPP); + + if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = dst_vport; + action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + counter = mlx5_fc_create(esw->dev, true); + if (IS_ERR(counter)) + return ERR_CAST(counter); + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = counter; + } + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, src_vport); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | + MLX5_MATCH_MISC_PARAMETERS; + + rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb, + spec, action, 0, &dest); + + if (IS_ERR(rule)) + mlx5_fc_destroy(esw->dev, counter); + + return rule; +} + +static struct mlx5_flow_rule * +mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_spec *spec; + void *misc; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); + flow_rule = ERR_PTR(-ENOMEM); + goto out; + } + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); + MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = vport; + + flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR(flow_rule)) + esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); +out: + kvfree(spec); + return flow_rule; +} + +void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5_esw_sq *esw_sq, *tmp; + + if (esw->mode != SRIOV_OFFLOADS) + return; + + list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { + mlx5_del_flow_rule(esw_sq->send_to_vport_rule); + list_del(&esw_sq->list); + kfree(esw_sq); + } +} + +int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + u16 *sqns_array, int sqns_num) +{ + struct mlx5_flow_rule *flow_rule; + struct mlx5_esw_sq *esw_sq; + int vport; + int err; + int i; + + if (esw->mode != SRIOV_OFFLOADS) + return 0; + + vport = rep->vport == 0 ? + FDB_UPLINK_VPORT : rep->vport; + + for (i = 0; i < sqns_num; i++) { + esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL); + if (!esw_sq) { + err = -ENOMEM; + goto out_err; + } + + /* Add re-inject rule to the PF/representor sqs */ + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, + vport, + sqns_array[i]); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + kfree(esw_sq); + goto out_err; + } + esw_sq->send_to_vport_rule = flow_rule; + list_add(&esw_sq->list, &rep->vport_sqs_list); + } + return 0; + +out_err: + mlx5_eswitch_sqs2vport_stop(esw, rep); + return err; +} + +static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_spec *spec; + int err = 0; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { + esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); + err = -ENOMEM; + goto out; + } + + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport_num = 0; + + flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); + goto out; + } + + esw->fdb_table.offloads.miss_rule = flow_rule; +out: + kvfree(spec); + return err; +} + +#define MAX_PF_SQ 256 +#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */ +#define ESW_OFFLOADS_NUM_GROUPS 4 + +static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *fdb = NULL; + struct mlx5_flow_group *g; + u32 *flow_group_in; + void *match_criteria; + int table_size, ix, err = 0; + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); + if (!root_ns) { + esw_warn(dev, "Failed to get FDB flow namespace\n"); + goto ns_err; + } + + esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n", + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); + + fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH, + ESW_OFFLOADS_NUM_ENTRIES, + ESW_OFFLOADS_NUM_GROUPS, 0); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err); + goto fast_fdb_err; + } + esw->fdb_table.fdb = fdb; + + table_size = nvports + MAX_PF_SQ + 1; + fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0); + if (IS_ERR(fdb)) { + err = PTR_ERR(fdb); + esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err); + goto slow_fdb_err; + } + esw->fdb_table.offloads.fdb = fdb; + + /* create send-to-vport group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + + ix = nvports + MAX_PF_SQ; + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err); + goto send_vport_err; + } + esw->fdb_table.offloads.send_to_vport_grp = g; + + /* create miss group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0); + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1); + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create miss flow group err(%d)\n", err); + goto miss_err; + } + esw->fdb_table.offloads.miss_grp = g; + + err = esw_add_fdb_miss_rule(esw); + if (err) + goto miss_rule_err; + + return 0; + +miss_rule_err: + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); +miss_err: + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); +send_vport_err: + mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); +slow_fdb_err: + mlx5_destroy_flow_table(esw->fdb_table.fdb); +fast_fdb_err: +ns_err: + kvfree(flow_group_in); + return err; +} + +static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) +{ + if (!esw->fdb_table.fdb) + return; + + esw_debug(esw->dev, "Destroy offloads FDB Table\n"); + mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule); + mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); + mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); + + mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb); + mlx5_destroy_flow_table(esw->fdb_table.fdb); +} + +static int esw_create_offloads_table(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table *ft_offloads; + struct mlx5_core_dev *dev = esw->dev; + int err = 0; + + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); + if (!ns) { + esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); + return -ENOMEM; + } + + ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0); + if (IS_ERR(ft_offloads)) { + err = PTR_ERR(ft_offloads); + esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err); + return err; + } + + esw->offloads.ft_offloads = ft_offloads; + return 0; +} + +static void esw_destroy_offloads_table(struct mlx5_eswitch *esw) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + + mlx5_destroy_flow_table(offloads->ft_offloads); +} + +static int esw_create_vport_rx_group(struct mlx5_eswitch *esw) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *g; + struct mlx5_priv *priv = &esw->dev->priv; + u32 *flow_group_in; + void *match_criteria, *misc; + int err = 0; + int nvports = priv->sriov.num_vfs + 2; + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return -ENOMEM; + + /* create vport rx group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1); + + g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in); + + if (IS_ERR(g)) { + err = PTR_ERR(g); + mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err); + goto out; + } + + esw->offloads.vport_rx_group = g; +out: + kfree(flow_group_in); + return err; +} + +static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) +{ + mlx5_destroy_flow_group(esw->offloads.vport_rx_group); +} + +struct mlx5_flow_rule * +mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) +{ + struct mlx5_flow_destination dest; + struct mlx5_flow_rule *flow_rule; + struct mlx5_flow_spec *spec; + void *misc; + + spec = mlx5_vzalloc(sizeof(*spec)); + if (!spec) { + esw_warn(esw->dev, "Failed to alloc match parameters\n"); + flow_rule = ERR_PTR(-ENOMEM); + goto out; + } + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, vport); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = tirn; + + flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + 0, &dest); + if (IS_ERR(flow_rule)) { + esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); + goto out; + } + +out: + kvfree(spec); + return flow_rule; +} + +static int esw_offloads_start(struct mlx5_eswitch *esw) +{ + int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; + + if (esw->mode != SRIOV_LEGACY) { + esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n"); + return -EINVAL; + } + + mlx5_eswitch_disable_sriov(esw); + err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); + if (err) { + esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); + err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); + if (err1) + esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); + } + return err; +} + +int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) +{ + struct mlx5_eswitch_rep *rep; + int vport; + int err; + + err = esw_create_offloads_fdb_table(esw, nvports); + if (err) + return err; + + err = esw_create_offloads_table(esw); + if (err) + goto create_ft_err; + + err = esw_create_vport_rx_group(esw); + if (err) + goto create_fg_err; + + for (vport = 0; vport < nvports; vport++) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->valid) + continue; + + err = rep->load(esw, rep); + if (err) + goto err_reps; + } + return 0; + +err_reps: + for (vport--; vport >= 0; vport--) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->valid) + continue; + rep->unload(esw, rep); + } + esw_destroy_vport_rx_group(esw); + +create_fg_err: + esw_destroy_offloads_table(esw); + +create_ft_err: + esw_destroy_offloads_fdb_table(esw); + return err; +} + +static int esw_offloads_stop(struct mlx5_eswitch *esw) +{ + int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; + + mlx5_eswitch_disable_sriov(esw); + err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); + if (err) { + esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err); + err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS); + if (err1) + esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); + } + + return err; +} + +void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) +{ + struct mlx5_eswitch_rep *rep; + int vport; + + for (vport = 0; vport < nvports; vport++) { + rep = &esw->offloads.vport_reps[vport]; + if (!rep->valid) + continue; + rep->unload(esw, rep); + } + + esw_destroy_vport_rx_group(esw); + esw_destroy_offloads_table(esw); + esw_destroy_offloads_fdb_table(esw); +} + +static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode) +{ + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + *mlx5_mode = SRIOV_LEGACY; + break; + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + *mlx5_mode = SRIOV_OFFLOADS; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode) +{ + switch (mlx5_mode) { + case SRIOV_LEGACY: + *mode = DEVLINK_ESWITCH_MODE_LEGACY; + break; + case SRIOV_OFFLOADS: + *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + break; + default: + return -EINVAL; + } + + return 0; +} + +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct mlx5_core_dev *dev; + u16 cur_mlx5_mode, mlx5_mode = 0; + + dev = devlink_priv(devlink); + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + cur_mlx5_mode = dev->priv.eswitch->mode; + + if (cur_mlx5_mode == SRIOV_NONE) + return -EOPNOTSUPP; + + if (esw_mode_from_devlink(mode, &mlx5_mode)) + return -EINVAL; + + if (cur_mlx5_mode == mlx5_mode) + return 0; + + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + return esw_offloads_start(dev->priv.eswitch); + else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) + return esw_offloads_stop(dev->priv.eswitch); + else + return -EINVAL; +} + +int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct mlx5_core_dev *dev; + + dev = devlink_priv(devlink); + + if (!MLX5_CAP_GEN(dev, vport_group_manager)) + return -EOPNOTSUPP; + + if (dev->priv.eswitch->mode == SRIOV_NONE) + return -EOPNOTSUPP; + + return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); +} + +void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + + memcpy(&offloads->vport_reps[rep->vport], rep, + sizeof(struct mlx5_eswitch_rep)); + + INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list); + offloads->vport_reps[rep->vport].valid = true; +} + +void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, + int vport) +{ + struct mlx5_esw_offload *offloads = &esw->offloads; + struct mlx5_eswitch_rep *rep; + + rep = &offloads->vport_reps[vport]; + + if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled) + rep->unload(esw, rep); + + offloads->vport_reps[vport].valid = false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a5bb6b695..287ade151 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -413,3 +413,70 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, return 0; } + +struct mlx5_cmd_fc_bulk { + u16 id; + int num; + int outlen; + u32 out[0]; +}; + +struct mlx5_cmd_fc_bulk * +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) +{ + struct mlx5_cmd_fc_bulk *b; + int outlen = + MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter) * num; + + b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL); + if (!b) + return NULL; + + b->id = id; + b->num = num; + b->outlen = outlen; + + return b; +} + +void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b) +{ + kfree(b); +} + +int +mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) +{ + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_flow_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_COUNTER); + MLX5_SET(query_flow_counter_in, in, op_mod, 0); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id); + MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + b->out, b->outlen); +} + +void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, + struct mlx5_cmd_fc_bulk *b, u16 id, + u64 *packets, u64 *bytes) +{ + int index = id - b->id; + void *stats; + + if (index < 0 || index >= b->num) { + mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n", + id, b->id, b->id + b->num - 1); + return; + } + + stats = MLX5_ADDR_OF(query_flow_counter_out, b->out, + flow_statistics[index]); + *packets = MLX5_GET64(traffic_counter, stats, packets); + *bytes = MLX5_GET64(traffic_counter, stats, octets); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index fc4f7b83f..158844cef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -76,4 +76,16 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, u64 *packets, u64 *bytes); + +struct mlx5_cmd_fc_bulk; + +struct mlx5_cmd_fc_bulk * +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num); +void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); +int +mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); +void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, + struct mlx5_cmd_fc_bulk *b, u16 id, + u64 *packets, u64 *bytes); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e912a3d25..3d6c1f65e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -67,13 +67,21 @@ #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ .caps = (long[]) {__VA_ARGS__} } +#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \ + FS_CAP(flow_table_properties_nic_receive.modify_root), \ + FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \ + FS_CAP(flow_table_properties_nic_receive.flow_table_modify)) + #define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 #define BY_PASS_PRIO_NUM_LEVELS 1 -#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ +#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_NUM_PRIOS) +#define ETHTOOL_PRIO_NUM_LEVELS 1 +#define ETHTOOL_NUM_PRIOS 11 +#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) /* Vlan, mac, ttc, aRFS */ #define KERNEL_NIC_PRIO_NUM_LEVELS 4 #define KERNEL_NIC_NUM_PRIOS 1 @@ -83,6 +91,11 @@ #define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_PRIOS 1 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) + +#define OFFLOADS_MAX_FT 1 +#define OFFLOADS_NUM_PRIOS 1 +#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1) + struct node_caps { size_t arr_sz; long *caps; @@ -98,24 +111,24 @@ static struct init_tree_node { int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, - .ar_size = 4, + .ar_size = 6, .children = (struct init_tree_node[]) { ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), - FS_CAP(flow_table_properties_nic_receive.modify_root), - FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), - FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))), + ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, + ETHTOOL_PRIO_NUM_LEVELS))), ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(1, 1), ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, - FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), - FS_CAP(flow_table_properties_nic_receive.modify_root), - FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), - FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), + FS_CHAINING_CAPS, ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), @@ -1152,9 +1165,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, static struct mlx5_flow_rule * _mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, + struct mlx5_flow_spec *spec, u32 action, u32 flow_tag, struct mlx5_flow_destination *dest) @@ -1168,22 +1179,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); fs_for_each_fg(g, ft) if (compare_match_criteria(g->mask.match_criteria_enable, - match_criteria_enable, + spec->match_criteria_enable, g->mask.match_criteria, - match_criteria)) { - rule = add_rule_fg(g, match_value, + spec->match_criteria)) { + rule = add_rule_fg(g, spec->match_value, action, flow_tag, dest); if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) goto unlock; } - g = create_autogroup(ft, match_criteria_enable, match_criteria); + g = create_autogroup(ft, spec->match_criteria_enable, + spec->match_criteria); if (IS_ERR(g)) { rule = (void *)g; goto unlock; } - rule = add_rule_fg(g, match_value, + rule = add_rule_fg(g, spec->match_value, action, flow_tag, dest); if (IS_ERR(rule)) { /* Remove assumes refcount > 0 and autogroup creates a group @@ -1207,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) struct mlx5_flow_rule * mlx5_add_flow_rule(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, + struct mlx5_flow_spec *spec, u32 action, u32 flow_tag, struct mlx5_flow_destination *dest) @@ -1240,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, } } - rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, - match_value, action, flow_tag, dest); + rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest); if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (!IS_ERR_OR_NULL(rule) && @@ -1359,40 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type) { - struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; + struct mlx5_flow_steering *steering = dev->priv.steering; + struct mlx5_flow_root_namespace *root_ns; int prio; struct fs_prio *fs_prio; struct mlx5_flow_namespace *ns; - if (!root_ns) + if (!steering) return NULL; switch (type) { case MLX5_FLOW_NAMESPACE_BYPASS: + case MLX5_FLOW_NAMESPACE_OFFLOADS: + case MLX5_FLOW_NAMESPACE_ETHTOOL: case MLX5_FLOW_NAMESPACE_KERNEL: case MLX5_FLOW_NAMESPACE_LEFTOVERS: case MLX5_FLOW_NAMESPACE_ANCHOR: prio = type; break; case MLX5_FLOW_NAMESPACE_FDB: - if (dev->priv.fdb_root_ns) - return &dev->priv.fdb_root_ns->ns; + if (steering->fdb_root_ns) + return &steering->fdb_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_EGRESS: - if (dev->priv.esw_egress_root_ns) - return &dev->priv.esw_egress_root_ns->ns; + if (steering->esw_egress_root_ns) + return &steering->esw_egress_root_ns->ns; else return NULL; case MLX5_FLOW_NAMESPACE_ESW_INGRESS: - if (dev->priv.esw_ingress_root_ns) - return &dev->priv.esw_ingress_root_ns->ns; + if (steering->esw_ingress_root_ns) + return &steering->esw_ingress_root_ns->ns; else return NULL; default: return NULL; } + root_ns = steering->root_ns; + if (!root_ns) + return NULL; + fs_prio = find_prio(&root_ns->ns, prio); if (!fs_prio) return NULL; @@ -1478,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) return true; } -static int init_root_tree_recursive(struct mlx5_core_dev *dev, +static int init_root_tree_recursive(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node, struct init_tree_node *init_parent_node, int prio) { - int max_ft_level = MLX5_CAP_FLOWTABLE(dev, + int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev, flow_table_properties_nic_receive. max_ft_level); struct mlx5_flow_namespace *fs_ns; @@ -1495,7 +1511,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, if (init_node->type == FS_TYPE_PRIO) { if ((init_node->min_ft_level > max_ft_level) || - !has_required_caps(dev, &init_node->caps)) + !has_required_caps(steering->dev, &init_node->caps)) return 0; fs_get_obj(fs_ns, fs_parent_node); @@ -1516,7 +1532,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, } prio = 0; for (i = 0; i < init_node->ar_size; i++) { - err = init_root_tree_recursive(dev, &init_node->children[i], + err = init_root_tree_recursive(steering, &init_node->children[i], base, init_node, prio); if (err) return err; @@ -1529,7 +1545,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, return 0; } -static int init_root_tree(struct mlx5_core_dev *dev, +static int init_root_tree(struct mlx5_flow_steering *steering, struct init_tree_node *init_node, struct fs_node *fs_parent_node) { @@ -1539,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev, fs_get_obj(fs_ns, fs_parent_node); for (i = 0; i < init_node->ar_size; i++) { - err = init_root_tree_recursive(dev, &init_node->children[i], + err = init_root_tree_recursive(steering, &init_node->children[i], &fs_ns->node, init_node, i); if (err) @@ -1548,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev, return 0; } -static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev, +static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering, enum fs_flow_table_type table_type) { @@ -1560,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev if (!root_ns) return NULL; - root_ns->dev = dev; + root_ns->dev = steering->dev; root_ns->table_type = table_type; ns = &root_ns->ns; @@ -1615,220 +1631,135 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) #define ANCHOR_PRIO 0 #define ANCHOR_SIZE 1 #define ANCHOR_LEVEL 0 -static int create_anchor_flow_table(struct mlx5_core_dev - *dev) +static int create_anchor_flow_table(struct mlx5_flow_steering *steering) { struct mlx5_flow_namespace *ns = NULL; struct mlx5_flow_table *ft; - ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); + ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); if (!ns) return -EINVAL; ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); if (IS_ERR(ft)) { - mlx5_core_err(dev, "Failed to create last anchor flow table"); + mlx5_core_err(steering->dev, "Failed to create last anchor flow table"); return PTR_ERR(ft); } return 0; } -static int init_root_ns(struct mlx5_core_dev *dev) +static int init_root_ns(struct mlx5_flow_steering *steering) { - dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX); - if (IS_ERR_OR_NULL(dev->priv.root_ns)) + steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); + if (IS_ERR_OR_NULL(steering->root_ns)) goto cleanup; - if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node)) + if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) goto cleanup; - set_prio_attrs(dev->priv.root_ns); + set_prio_attrs(steering->root_ns); - if (create_anchor_flow_table(dev)) + if (create_anchor_flow_table(steering)) goto cleanup; return 0; cleanup: - mlx5_cleanup_fs(dev); + mlx5_cleanup_fs(steering->dev); return -ENOMEM; } -static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev, - struct mlx5_flow_root_namespace *root_ns) +static void clean_tree(struct fs_node *node) { - struct fs_node *prio; - - if (!root_ns) - return; + if (node) { + struct fs_node *iter; + struct fs_node *temp; - if (!list_empty(&root_ns->ns.node.children)) { - prio = list_first_entry(&root_ns->ns.node.children, - struct fs_node, - list); - if (tree_remove_node(prio)) - mlx5_core_warn(dev, - "Flow steering priority wasn't destroyed, refcount > 1\n"); + list_for_each_entry_safe(iter, temp, &node->children, list) + clean_tree(iter); + tree_remove_node(node); } - if (tree_remove_node(&root_ns->ns.node)) - mlx5_core_warn(dev, - "Flow steering namespace wasn't destroyed, refcount > 1\n"); - root_ns = NULL; } -static void destroy_flow_tables(struct fs_prio *prio) +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns) { - struct mlx5_flow_table *iter; - struct mlx5_flow_table *tmp; - - fs_for_each_ft_safe(iter, tmp, prio) - mlx5_destroy_flow_table(iter); -} - -static void cleanup_root_ns(struct mlx5_core_dev *dev) -{ - struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; - struct fs_prio *iter_prio; - - if (!MLX5_CAP_GEN(dev, nic_flow_table)) - return; - if (!root_ns) return; - /* stage 1 */ - fs_for_each_prio(iter_prio, &root_ns->ns) { - struct fs_node *node; - struct mlx5_flow_namespace *iter_ns; - - fs_for_each_ns_or_ft(node, iter_prio) { - if (node->type == FS_TYPE_FLOW_TABLE) - continue; - fs_get_obj(iter_ns, node); - while (!list_empty(&iter_ns->node.children)) { - struct fs_prio *obj_iter_prio2; - struct fs_node *iter_prio2 = - list_first_entry(&iter_ns->node.children, - struct fs_node, - list); - - fs_get_obj(obj_iter_prio2, iter_prio2); - destroy_flow_tables(obj_iter_prio2); - if (tree_remove_node(iter_prio2)) { - mlx5_core_warn(dev, - "Priority %d wasn't destroyed, refcount > 1\n", - obj_iter_prio2->prio); - return; - } - } - } - } - - /* stage 2 */ - fs_for_each_prio(iter_prio, &root_ns->ns) { - while (!list_empty(&iter_prio->node.children)) { - struct fs_node *iter_ns = - list_first_entry(&iter_prio->node.children, - struct fs_node, - list); - if (tree_remove_node(iter_ns)) { - mlx5_core_warn(dev, - "Namespace wasn't destroyed, refcount > 1\n"); - return; - } - } - } - - /* stage 3 */ - while (!list_empty(&root_ns->ns.node.children)) { - struct fs_prio *obj_prio_node; - struct fs_node *prio_node = - list_first_entry(&root_ns->ns.node.children, - struct fs_node, - list); - - fs_get_obj(obj_prio_node, prio_node); - if (tree_remove_node(prio_node)) { - mlx5_core_warn(dev, - "Priority %d wasn't destroyed, refcount > 1\n", - obj_prio_node->prio); - return; - } - } - - if (tree_remove_node(&root_ns->ns.node)) { - mlx5_core_warn(dev, - "root namespace wasn't destroyed, refcount > 1\n"); - return; - } - - dev->priv.root_ns = NULL; + clean_tree(&root_ns->ns.node); } void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { + struct mlx5_flow_steering *steering = dev->priv.steering; + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) return; - cleanup_root_ns(dev); - cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); - cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); - cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); + cleanup_root_ns(steering->root_ns); + cleanup_root_ns(steering->esw_egress_root_ns); + cleanup_root_ns(steering->esw_ingress_root_ns); + cleanup_root_ns(steering->fdb_root_ns); mlx5_cleanup_fc_stats(dev); + kfree(steering); } -static int init_fdb_root_ns(struct mlx5_core_dev *dev) +static int init_fdb_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; - dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB); - if (!dev->priv.fdb_root_ns) + steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB); + if (!steering->fdb_root_ns) return -ENOMEM; - /* Create single prio */ - prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1); - if (IS_ERR(prio)) { - cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); - return PTR_ERR(prio); - } else { - return 0; - } + prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1); + if (IS_ERR(prio)) + goto out_err; + + prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1); + if (IS_ERR(prio)) + goto out_err; + + set_prio_attrs(steering->fdb_root_ns); + return 0; + +out_err: + cleanup_root_ns(steering->fdb_root_ns); + steering->fdb_root_ns = NULL; + return PTR_ERR(prio); } -static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) +static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; - dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL); - if (!dev->priv.esw_egress_root_ns) + steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); + if (!steering->esw_egress_root_ns) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); - if (IS_ERR(prio)) - return PTR_ERR(prio); - else - return 0; + prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0, + MLX5_TOTAL_VPORTS(steering->dev)); + return PTR_ERR_OR_ZERO(prio); } -static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) +static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering) { struct fs_prio *prio; - dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL); - if (!dev->priv.esw_ingress_root_ns) + steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); + if (!steering->esw_ingress_root_ns) return -ENOMEM; /* create 1 prio*/ - prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); - if (IS_ERR(prio)) - return PTR_ERR(prio); - else - return 0; + prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0, + MLX5_TOTAL_VPORTS(steering->dev)); + return PTR_ERR_OR_ZERO(prio); } int mlx5_init_fs(struct mlx5_core_dev *dev) { + struct mlx5_flow_steering *steering; int err = 0; if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) @@ -1838,26 +1769,32 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) if (err) return err; + steering = kzalloc(sizeof(*steering), GFP_KERNEL); + if (!steering) + return -ENOMEM; + steering->dev = dev; + dev->priv.steering = steering; + if (MLX5_CAP_GEN(dev, nic_flow_table) && MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { - err = init_root_ns(dev); + err = init_root_ns(steering); if (err) goto err; } if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { - err = init_fdb_root_ns(dev); + err = init_fdb_root_ns(steering); if (err) goto err; } if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { - err = init_egress_acl_root_ns(dev); + err = init_egress_acl_root_ns(steering); if (err) goto err; } if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { - err = init_ingress_acl_root_ns(dev); + err = init_ingress_acl_root_ns(steering); if (err) goto err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index aa41a7314..9cffb6aeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -55,6 +55,14 @@ enum fs_fte_status { FS_FTE_STATUS_EXISTING = 1UL << 0, }; +struct mlx5_flow_steering { + struct mlx5_core_dev *dev; + struct mlx5_flow_root_namespace *root_ns; + struct mlx5_flow_root_namespace *fdb_root_ns; + struct mlx5_flow_root_namespace *esw_egress_root_ns; + struct mlx5_flow_root_namespace *esw_ingress_root_ns; +}; + struct fs_node { struct list_head list; struct list_head children; @@ -103,6 +111,7 @@ struct mlx5_fc_cache { }; struct mlx5_fc { + struct rb_node node; struct list_head list; /* last{packets,bytes} members are used when calculating the delta since diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 164dc37fd..3a9195b41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -32,6 +32,7 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/fs.h> +#include <linux/rbtree.h> #include "mlx5_core.h" #include "fs_core.h" #include "fs_cmd.h" @@ -68,32 +69,117 @@ * elapsed, the thread will actually query the hardware. */ +static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) +{ + struct rb_node **new = &root->rb_node; + struct rb_node *parent = NULL; + + while (*new) { + struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node); + int result = counter->id - this->id; + + parent = *new; + if (result < 0) + new = &((*new)->rb_left); + else + new = &((*new)->rb_right); + } + + /* Add new node and rebalance tree. */ + rb_link_node(&counter->node, parent, new); + rb_insert_color(&counter->node, root); +} + +static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, + struct mlx5_fc *first, + u16 last_id) +{ + struct mlx5_cmd_fc_bulk *b; + struct rb_node *node = NULL; + u16 afirst_id; + int num; + int err; + int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk); + + /* first id must be aligned to 4 when using bulk query */ + afirst_id = first->id & ~0x3; + + /* number of counters to query inc. the last counter */ + num = ALIGN(last_id - afirst_id + 1, 4); + if (num > max_bulk) { + num = max_bulk; + last_id = afirst_id + num - 1; + } + + b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num); + if (!b) { + mlx5_core_err(dev, "Error allocating resources for bulk query\n"); + return NULL; + } + + err = mlx5_cmd_fc_bulk_query(dev, b); + if (err) { + mlx5_core_err(dev, "Error doing bulk query: %d\n", err); + goto out; + } + + for (node = &first->node; node; node = rb_next(node)) { + struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); + struct mlx5_fc_cache *c = &counter->cache; + u64 packets; + u64 bytes; + + if (counter->id > last_id) + break; + + mlx5_cmd_fc_bulk_get(dev, b, + counter->id, &packets, &bytes); + + if (c->packets == packets) + continue; + + c->packets = packets; + c->bytes = bytes; + c->lastuse = jiffies; + } + +out: + mlx5_cmd_fc_bulk_free(b); + + return node; +} + static void mlx5_fc_stats_work(struct work_struct *work) { struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, priv.fc_stats.work.work); struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; unsigned long now = jiffies; - struct mlx5_fc *counter; - struct mlx5_fc *tmp; - int err = 0; + struct mlx5_fc *counter = NULL; + struct mlx5_fc *last = NULL; + struct rb_node *node; + LIST_HEAD(tmplist); spin_lock(&fc_stats->addlist_lock); - list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); + list_splice_tail_init(&fc_stats->addlist, &tmplist); - if (!list_empty(&fc_stats->list)) + if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); spin_unlock(&fc_stats->addlist_lock); - list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { - struct mlx5_fc_cache *c = &counter->cache; - u64 packets; - u64 bytes; + list_for_each_entry(counter, &tmplist, list) + mlx5_fc_stats_insert(&fc_stats->counters, counter); + + node = rb_first(&fc_stats->counters); + while (node) { + counter = rb_entry(node, struct mlx5_fc, node); + + node = rb_next(node); if (counter->deleted) { - list_del(&counter->list); + rb_erase(&counter->node, &fc_stats->counters); mlx5_cmd_fc_free(dev, counter->id); @@ -101,26 +187,20 @@ static void mlx5_fc_stats_work(struct work_struct *work) continue; } - if (time_before(now, fc_stats->next_query)) - continue; + last = counter; + } - err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes); - if (err) { - pr_err("Error querying stats for counter id %d\n", - counter->id); - continue; - } + if (time_before(now, fc_stats->next_query) || !last) + return; - if (packets == c->packets) - continue; + node = rb_first(&fc_stats->counters); + while (node) { + counter = rb_entry(node, struct mlx5_fc, node); - c->lastuse = jiffies; - c->packets = packets; - c->bytes = bytes; + node = mlx5_fc_stats_query(dev, counter, last->id); } - if (time_after_eq(now, fc_stats->next_query)) - fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; + fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; } struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) @@ -176,7 +256,7 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - INIT_LIST_HEAD(&fc_stats->list); + fc_stats->counters = RB_ROOT; INIT_LIST_HEAD(&fc_stats->addlist); spin_lock_init(&fc_stats->addlist_lock); @@ -194,20 +274,32 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc *counter; struct mlx5_fc *tmp; + struct rb_node *node; cancel_delayed_work_sync(&dev->priv.fc_stats.work); destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; - list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); - - list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { + list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { list_del(&counter->list); mlx5_cmd_fc_free(dev, counter->id); kfree(counter); } + + node = rb_first(&fc_stats->counters); + while (node) { + counter = rb_entry(node, struct mlx5_fc, node); + + node = rb_next(node); + + rb_erase(&counter->node, &fc_stats->counters); + + mlx5_cmd_fc_free(dev, counter->id); + + kfree(counter); + } } void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 75c7ae6a5..77fc1aa26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return err; } + if (MLX5_CAP_GEN(dev, qos)) { + err = mlx5_core_get_caps(dev, MLX5_CAP_QOS); + if (err) + return err; + } + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 96a59463a..1a05fb965 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -272,7 +272,7 @@ static void poll_health(unsigned long data) if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); - queue_work(health->wq, &health->work); + schedule_work(&health->work); } } @@ -301,7 +301,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; - destroy_workqueue(health->wq); + flush_work(&health->work); } int mlx5_health_init(struct mlx5_core_dev *dev) @@ -316,10 +316,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev) strcpy(name, "mlx5_health"); strcat(name, dev_name(&dev->pdev->dev)); - health->wq = create_singlethread_workqueue(name); kfree(name); - if (!health->wq) - return -ENOMEM; INIT_WORK(&health->work, health_care); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e782d0fde..2385bae92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -51,6 +51,7 @@ #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif +#include <net/devlink.h> #include "mlx5_core.h" #include "fs_core.h" #ifdef CONFIG_MLX5_CORE_EN @@ -1144,6 +1145,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) dev_err(&pdev->dev, "Failed to init flow steering\n"); goto err_fs; } + + err = mlx5_init_rl_table(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init rate limiting\n"); + goto err_rl; + } + #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { @@ -1183,6 +1191,8 @@ err_sriov: mlx5_eswitch_cleanup(dev->priv.eswitch); #endif err_reg_dev: + mlx5_cleanup_rl_table(dev); +err_rl: mlx5_cleanup_fs(dev); err_fs: mlx5_cleanup_mkey_table(dev); @@ -1253,6 +1263,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_eswitch_cleanup(dev->priv.eswitch); #endif + mlx5_cleanup_rl_table(dev); mlx5_cleanup_fs(dev); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@ -1305,19 +1316,28 @@ struct mlx5_core_event_handler { void *data); }; +static const struct devlink_ops mlx5_devlink_ops = { +#ifdef CONFIG_MLX5_CORE_EN + .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, + .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, +#endif +}; static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct mlx5_core_dev *dev; + struct devlink *devlink; struct mlx5_priv *priv; int err; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { + devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev)); + if (!devlink) { dev_err(&pdev->dev, "kzalloc failed\n"); return -ENOMEM; } + + dev = devlink_priv(devlink); priv = &dev->priv; priv->pci_dev_data = id->driver_data; @@ -1354,15 +1374,21 @@ static int init_one(struct pci_dev *pdev, goto clean_health; } + err = devlink_register(devlink, &pdev->dev); + if (err) + goto clean_load; + return 0; +clean_load: + mlx5_unload_one(dev, priv); clean_health: mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); clean_dev: pci_set_drvdata(pdev, NULL); - kfree(dev); + devlink_free(devlink); return err; } @@ -1370,8 +1396,10 @@ clean_dev: static void remove_one(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct devlink *devlink = priv_to_devlink(dev); struct mlx5_priv *priv = &dev->priv; + devlink_unregister(devlink); if (mlx5_unload_one(dev, priv)) { dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n"); mlx5_health_cleanup(dev); @@ -1380,7 +1408,7 @@ static void remove_one(struct pci_dev *pdev) mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); pci_set_drvdata(pdev, NULL); - kfree(dev); + devlink_free(devlink); } static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 3e35611b1..752c08127 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -202,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask) +int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, + u32 proto_admin, int proto_mask) { - u32 in[MLX5_ST_SZ_DW(ptys_reg)]; u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + u32 in[MLX5_ST_SZ_DW(ptys_reg)]; + u8 an_disable_admin; + u8 an_disable_cap; + u8 an_status; + + mlx5_query_port_autoneg(dev, proto_mask, &an_status, + &an_disable_cap, &an_disable_admin); + if (!an_disable_cap && an_disable) + return -EPERM; memset(in, 0, sizeof(in)); MLX5_SET(ptys_reg, in, local_port, 1); + MLX5_SET(ptys_reg, in, an_disable_admin, an_disable); MLX5_SET(ptys_reg, in, proto_mask, proto_mask); if (proto_mask == MLX5_PTYS_EN) MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin); @@ -220,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_PTYS, 0, 1); } -EXPORT_SYMBOL_GPL(mlx5_set_port_proto); +EXPORT_SYMBOL_GPL(mlx5_set_port_ptys); + +/* This function should be used after setting a port register only */ +void mlx5_toggle_port_link(struct mlx5_core_dev *dev) +{ + enum mlx5_port_status ps; + + mlx5_query_port_admin_status(dev, &ps); + mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(dev, MLX5_PORT_UP); +} +EXPORT_SYMBOL_GPL(mlx5_toggle_port_link); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status) @@ -518,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); +void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, + u8 *an_status, + u8 *an_disable_cap, u8 *an_disable_admin) +{ + u32 out[MLX5_ST_SZ_DW(ptys_reg)]; + + *an_status = 0; + *an_disable_cap = 0; + *an_disable_admin = 0; + + if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1)) + return; + + *an_status = MLX5_GET(ptys_reg, out, an_status); + *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap); + *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin); +} +EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg); + int mlx5_max_tc(struct mlx5_core_dev *mdev) { u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c new file mode 100644 index 000000000..c07c28bd3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cmd.h> +#include "mlx5_core.h" + +/* Finds an entry where we can register the given rate + * If the rate already exists, return the entry where it is registered, + * otherwise return the first available entry. + * If the table is full, return NULL + */ +static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, + u32 rate) +{ + struct mlx5_rl_entry *ret_entry = NULL; + bool empty_found = false; + int i; + + for (i = 0; i < table->max_size; i++) { + if (table->rl_entry[i].rate == rate) + return &table->rl_entry[i]; + if (!empty_found && !table->rl_entry[i].rate) { + empty_found = true; + ret_entry = &table->rl_entry[i]; + } + } + + return ret_entry; +} + +static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev, + u32 rate, u16 index) +{ + u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]; + u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_rate_limit_in, in, opcode, + MLX5_CMD_OP_SET_RATE_LIMIT); + MLX5_SET(set_rate_limit_in, in, rate_limit_index, index); + MLX5_SET(set_rate_limit_in, in, rate_limit, rate); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), + out, sizeof(out)); +} + +bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + + return (rate <= table->max_rate && rate >= table->min_rate); +} +EXPORT_SYMBOL(mlx5_rl_is_in_range); + +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rl_entry *entry; + int err = 0; + + mutex_lock(&table->rl_lock); + + if (!rate || !mlx5_rl_is_in_range(dev, rate)) { + mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", + rate, table->min_rate, table->max_rate); + err = -EINVAL; + goto out; + } + + entry = find_rl_entry(table, rate); + if (!entry) { + mlx5_core_err(dev, "Max number of %u rates reached\n", + table->max_size); + err = -ENOSPC; + goto out; + } + if (entry->refcount) { + /* rate already configured */ + entry->refcount++; + } else { + /* new rate limit */ + err = mlx5_set_rate_limit_cmd(dev, rate, entry->index); + if (err) { + mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n", + rate, err); + goto out; + } + entry->rate = rate; + entry->refcount = 1; + } + *index = entry->index; + +out: + mutex_unlock(&table->rl_lock); + return err; +} +EXPORT_SYMBOL(mlx5_rl_add_rate); + +void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + struct mlx5_rl_entry *entry = NULL; + + /* 0 is a reserved value for unlimited rate */ + if (rate == 0) + return; + + mutex_lock(&table->rl_lock); + entry = find_rl_entry(table, rate); + if (!entry || !entry->refcount) { + mlx5_core_warn(dev, "Rate %u is not configured\n", rate); + goto out; + } + + entry->refcount--; + if (!entry->refcount) { + /* need to remove rate */ + mlx5_set_rate_limit_cmd(dev, 0, entry->index); + entry->rate = 0; + } + +out: + mutex_unlock(&table->rl_lock); +} +EXPORT_SYMBOL(mlx5_rl_remove_rate); + +int mlx5_init_rl_table(struct mlx5_core_dev *dev) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + int i; + + mutex_init(&table->rl_lock); + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) { + table->max_size = 0; + return 0; + } + + /* First entry is reserved for unlimited rate */ + table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1; + table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate); + table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate); + + table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry), + GFP_KERNEL); + if (!table->rl_entry) + return -ENOMEM; + + /* The index represents the index in HW rate limit table + * Index 0 is reserved for unlimited rate + */ + for (i = 0; i < table->max_size; i++) + table->rl_entry[i].index = i + 1; + + /* Index 0 is reserved */ + mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n", + table->max_size, + table->min_rate >> 10, + table->max_rate >> 10); + + return 0; +} + +void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) +{ + struct mlx5_rl_table *table = &dev->priv.rl_table; + int i; + + /* Clear all configured rates */ + for (i = 0; i < table->max_size; i++) + if (table->rl_entry[i].rate) + mlx5_set_rate_limit_cmd(dev, 0, + table->rl_entry[i].index); + + kfree(dev->priv.rl_table.rl_entry); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index d6a3f412b..b380a6bc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -167,7 +167,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) mlx5_core_init_vfs(dev, num_vfs); #ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs); + mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); #endif return num_vfs; @@ -209,7 +209,8 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev) mlx5_core_init_vfs(dev, cur_vfs); #ifdef CONFIG_MLX5_CORE_EN if (cur_vfs) - mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs); + mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs, + SRIOV_LEGACY); #endif enable_vfs(dev, cur_vfs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 04bc52260..c07f4d01b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -63,12 +63,12 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) complete(&srq->free); } -static int get_pas_size(void *srqc) +static int get_pas_size(struct mlx5_srq_attr *in) { - u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12; - u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size); - u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride); - u32 page_offset = MLX5_GET(srqc, srqc, page_offset); + u32 log_page_size = in->log_page_size + 12; + u32 log_srq_size = in->log_size; + u32 log_rq_stride = in->wqe_shift; + u32 page_offset = in->page_offset; u32 po_quanta = 1 << (log_page_size - 6); u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); u32 page_size = 1 << log_page_size; @@ -78,57 +78,58 @@ static int get_pas_size(void *srqc) return rq_num_pas * sizeof(u64); } -static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc) +static void set_wq(void *wq, struct mlx5_srq_attr *in) { - void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); - - if (srqc_to_rmpc) { - switch (MLX5_GET(srqc, srqc, state)) { - case MLX5_SRQC_STATE_GOOD: - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); - break; - case MLX5_SRQC_STATE_ERROR: - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR); - break; - default: - pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__, - __LINE__, MLX5_GET(srqc, srqc, state)); - MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state)); - } - - MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); - MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); - MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); - MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); - MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); - MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); - MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); - MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); - } else { - switch (MLX5_GET(rmpc, rmpc, state)) { - case MLX5_RMPC_STATE_RDY: - MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD); - break; - case MLX5_RMPC_STATE_ERR: - MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR); - break; - default: - pr_warn("%s: %d: Unknown rmp state = 0x%x\n", - __func__, __LINE__, - MLX5_GET(rmpc, rmpc, state)); - MLX5_SET(srqc, srqc, state, - MLX5_GET(rmpc, rmpc, state)); - } - - MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); - MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); - MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); - MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); - MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); - MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); - MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); - MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); - } + MLX5_SET(wq, wq, wq_signature, !!(in->flags + & MLX5_SRQ_FLAG_WQ_SIG)); + MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); + MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); + MLX5_SET(wq, wq, log_wq_sz, in->log_size); + MLX5_SET(wq, wq, page_offset, in->page_offset); + MLX5_SET(wq, wq, lwm, in->lwm); + MLX5_SET(wq, wq, pd, in->pd); + MLX5_SET64(wq, wq, dbr_addr, in->db_record); +} + +static void set_srqc(void *srqc, struct mlx5_srq_attr *in) +{ + MLX5_SET(srqc, srqc, wq_signature, !!(in->flags + & MLX5_SRQ_FLAG_WQ_SIG)); + MLX5_SET(srqc, srqc, log_page_size, in->log_page_size); + MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift); + MLX5_SET(srqc, srqc, log_srq_size, in->log_size); + MLX5_SET(srqc, srqc, page_offset, in->page_offset); + MLX5_SET(srqc, srqc, lwm, in->lwm); + MLX5_SET(srqc, srqc, pd, in->pd); + MLX5_SET64(srqc, srqc, dbr_addr, in->db_record); + MLX5_SET(srqc, srqc, xrcd, in->xrcd); + MLX5_SET(srqc, srqc, cqn, in->cqn); +} + +static void get_wq(void *wq, struct mlx5_srq_attr *in) +{ + if (MLX5_GET(wq, wq, wq_signature)) + in->flags &= MLX5_SRQ_FLAG_WQ_SIG; + in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz); + in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4; + in->log_size = MLX5_GET(wq, wq, log_wq_sz); + in->page_offset = MLX5_GET(wq, wq, page_offset); + in->lwm = MLX5_GET(wq, wq, lwm); + in->pd = MLX5_GET(wq, wq, pd); + in->db_record = MLX5_GET64(wq, wq, dbr_addr); +} + +static void get_srqc(void *srqc, struct mlx5_srq_attr *in) +{ + if (MLX5_GET(srqc, srqc, wq_signature)) + in->flags &= MLX5_SRQ_FLAG_WQ_SIG; + in->log_page_size = MLX5_GET(srqc, srqc, log_page_size); + in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride); + in->log_size = MLX5_GET(srqc, srqc, log_srq_size); + in->page_offset = MLX5_GET(srqc, srqc, page_offset); + in->lwm = MLX5_GET(srqc, srqc, lwm); + in->pd = MLX5_GET(srqc, srqc, pd); + in->db_record = MLX5_GET64(srqc, srqc, dbr_addr); } struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) @@ -149,19 +150,36 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) EXPORT_SYMBOL(mlx5_core_get_srq); static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen) + struct mlx5_srq_attr *in) { - struct mlx5_create_srq_mbox_out out; + u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0}; + void *create_in; + void *srqc; + void *pas; + int pas_size; + int inlen; int err; - memset(&out, 0, sizeof(out)); + pas_size = get_pas_size(in); + inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size; + create_in = mlx5_vzalloc(inlen); + if (!create_in) + return -ENOMEM; + + srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); + pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); - in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ); + set_srqc(srqc, in); + memcpy(pas, in->pas, pas_size); - err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), - sizeof(out)); + MLX5_SET(create_srq_in, create_in, opcode, + MLX5_CMD_OP_CREATE_SRQ); - srq->srqn = be32_to_cpu(out.srqn) & 0xffffff; + err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out, + sizeof(create_out)); + kvfree(create_in); + if (!err) + srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); return err; } @@ -169,67 +187,75 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, static int destroy_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) { - struct mlx5_destroy_srq_mbox_in in; - struct mlx5_destroy_srq_mbox_out out; + u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0}; + u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0}; - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); + MLX5_SET(destroy_srq_in, srq_in, opcode, + MLX5_CMD_OP_DESTROY_SRQ); + MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), - (u32 *)(&out), sizeof(out)); + return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm, int is_srq) { - struct mlx5_arm_srq_mbox_in in; - struct mlx5_arm_srq_mbox_out out; - - memset(&in, 0, sizeof(in)); - memset(&out, 0, sizeof(out)); + /* arm_srq structs missing using identical xrc ones */ + u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; + u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ); - in.hdr.opmod = cpu_to_be16(!!is_srq); - in.srqn = cpu_to_be32(srq->srqn); - in.lwm = cpu_to_be16(lwm); + MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); + MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn); + MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm); - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), - sizeof(in), (u32 *)(&out), - sizeof(out)); + return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), + srq_out, sizeof(srq_out)); } static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { - struct mlx5_query_srq_mbox_in in; + u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0}; + u32 *srq_out; + void *srqc; + int err; - memset(&in, 0, sizeof(in)); + srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out)); + if (!srq_out) + return -ENOMEM; - in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ); - in.srqn = cpu_to_be32(srq->srqn); + MLX5_SET(query_srq_in, srq_in, opcode, + MLX5_CMD_OP_QUERY_SRQ); + MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); + err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in), + srq_out, + MLX5_ST_SZ_BYTES(query_srq_out)); + if (err) + goto out; - return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), - (u32 *)out, sizeof(*out)); + srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry); + get_srqc(srqc, out); + if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD) + out->flags |= MLX5_SRQ_FLAG_ERR; +out: + kvfree(srq_out); + return err; } static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, - int srq_inlen) + struct mlx5_srq_attr *in) { u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; void *create_in; - void *srqc; void *xrc_srqc; void *pas; int pas_size; int inlen; int err; - srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); - pas_size = get_pas_size(srqc); + pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; create_in = mlx5_vzalloc(inlen); if (!create_in) @@ -239,7 +265,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, xrc_srq_context_entry); pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); - memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc)); + set_srqc(xrc_srqc, in); + MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index); memcpy(pas, in->pas, pas_size); MLX5_SET(create_xrc_srq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); @@ -293,11 +320,10 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; u32 *xrcsrq_out; - void *srqc; void *xrc_srqc; int err; @@ -317,8 +343,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, xrc_srq_context_entry); - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); - memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc)); + get_srqc(xrc_srqc, out); + if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD) + out->flags |= MLX5_SRQ_FLAG_ERR; out: kvfree(xrcsrq_out); @@ -326,26 +353,27 @@ out: } static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int srq_inlen) + struct mlx5_srq_attr *in) { void *create_in; void *rmpc; - void *srqc; + void *wq; int pas_size; int inlen; int err; - srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry); - pas_size = get_pas_size(srqc); + pas_size = get_pas_size(in); inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; create_in = mlx5_vzalloc(inlen); if (!create_in) return -ENOMEM; rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); + wq = MLX5_ADDR_OF(rmpc, rmpc, wq); + MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); + set_wq(wq, in); memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); - rmpc_srqc_reformat(srqc, rmpc, true); err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); @@ -390,11 +418,10 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev, } static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { u32 *rmp_out; void *rmpc; - void *srqc; int err; rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out)); @@ -405,9 +432,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, if (err) goto out; - srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry); rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); - rmpc_srqc_reformat(srqc, rmpc, false); + get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out); + if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY) + out->flags |= MLX5_SRQ_FLAG_ERR; out: kvfree(rmp_out); @@ -416,15 +444,14 @@ out: static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, - int inlen, int is_xrc) + struct mlx5_srq_attr *in) { if (!dev->issi) - return create_srq_cmd(dev, srq, in, inlen); + return create_srq_cmd(dev, srq, in); else if (srq->common.res == MLX5_RES_XSRQ) - return create_xrc_srq_cmd(dev, srq, in, inlen); + return create_xrc_srq_cmd(dev, srq, in); else - return create_rmp_cmd(dev, srq, in, inlen); + return create_rmp_cmd(dev, srq, in); } static int destroy_srq_split(struct mlx5_core_dev *dev, @@ -439,15 +466,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev, } int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_create_srq_mbox_in *in, int inlen, - int is_xrc) + struct mlx5_srq_attr *in) { int err; struct mlx5_srq_table *table = &dev->priv.srq_table; - srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ; + if (in->type == IB_SRQT_XRC) + srq->common.res = MLX5_RES_XSRQ; + else + srq->common.res = MLX5_RES_SRQ; - err = create_srq_split(dev, srq, in, inlen, is_xrc); + err = create_srq_split(dev, srq, in); if (err) return err; @@ -502,7 +531,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) EXPORT_SYMBOL(mlx5_core_destroy_srq); int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_query_srq_mbox_out *out) + struct mlx5_srq_attr *out) { if (!dev->issi) return query_srq_cmd(dev, srq, out); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index 03a5093ff..28274a6fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -85,6 +85,7 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) return err; } +EXPORT_SYMBOL(mlx5_core_create_rq); int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen) { @@ -110,6 +111,7 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_destroy_rq); int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out) { @@ -430,6 +432,7 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, return err; } +EXPORT_SYMBOL(mlx5_core_create_rqt); int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, int inlen) @@ -455,3 +458,4 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn) mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); } +EXPORT_SYMBOL(mlx5_core_destroy_rqt); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 91846dfcb..21365d069 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -135,6 +135,18 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in, return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out)); } +void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, + u8 *min_inline_mode) +{ + u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0}; + + mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out)); + + *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.min_wqe_inline_mode); +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline); + int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, u16 vport, u8 *addr) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 9b5ebf84c..d20ae1838 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ - spectrum_switchdev.o + spectrum_switchdev.o spectrum_router.o \ + spectrum_kvdl.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index cd63b8263..28271bedd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode { MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013, MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014, MLXSW_CMD_OPCODE_QUERY_EQ = 0x015, + MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101, }; static inline const char *mlxsw_cmd_opcode_str(u16 opcode) @@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode) return "HW2SW_EQ"; case MLXSW_CMD_OPCODE_QUERY_EQ: return "QUERY_EQ"; + case MLXSW_CMD_OPCODE_QUERY_RESOURCES: + return "QUERY_RESOURCES"; default: return "*UNKNOWN*"; } @@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core) return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0); } +/* QUERY_RESOURCES - Query chip resources + * -------------------------------------- + * OpMod == 0 (N/A) , INMmod is index + * ---------------------------------- + * The QUERY_RESOURCES command retrieves information related to chip resources + * by resource ID. Every command returns 32 entries. INmod is being use as base. + * for example, index 1 will return entries 32-63. When the tables end and there + * are no more sources in the table, will return resource id 0xFFF to indicate + * it. + */ +static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core, + char *out_mbox, int index) +{ + return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES, + 0, index, false, out_mbox, + MLXSW_CMD_MBOX_SIZE); +} + +/* cmd_mbox_query_resource_id + * The resource id. 0xFFFF indicates table's end. + */ +MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false); + +/* cmd_mbox_query_resource_data + * The resource + */ +MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data, + 0x00, 0, 40, 0x8, 0, false); + /* CONFIG_PROFILE (Set) - Configure Switch Profile * ------------------------------ * OpMod == 1 (Set), INMmod == 0 (N/A) @@ -607,6 +639,24 @@ MLXSW_ITEM32(cmd_mbox, config_profile, */ MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1); +/* cmd_mbox_config_set_kvd_linear_size + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1); + +/* cmd_mbox_config_set_kvd_hash_single_size + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1); + +/* cmd_mbox_config_set_kvd_hash_double_size + * Capability bit. Setting a bit to 1 configures the profile + * according to the mailbox contents. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1); + /* cmd_mbox_config_profile_max_vepa_channels * Maximum number of VEPA channels per port (0 through 16) * 0 - multi-channel VEPA is disabled @@ -733,6 +783,31 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16); */ MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1); +/* cmd_mbox_config_kvd_linear_size + * KVD Linear Size + * Valid for Spectrum only + * Allowed values are 128*N where N=0 or higher + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24); + +/* cmd_mbox_config_kvd_hash_single_size + * KVD Hash single-entries size + * Valid for Spectrum only + * Allowed values are 128*N where N=0 or higher + * Must be greater or equal to cap_min_kvd_hash_single_size + * Must be smaller or equal to cap_kvd_size - kvd_linear_size + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24); + +/* cmd_mbox_config_kvd_hash_double_size + * KVD Hash double-entries size (units of single-size entries) + * Valid for Spectrum only + * Allowed values are 128*N where N=0 or higher + * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size + * Must be smaller or equal to cap_kvd_size - kvd_linear_size + */ +MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24); + /* cmd_mbox_config_profile_swid_config_mask * Modify Switch Partition Configuration mask. When set, the configu- * ration value for the Switch Partition are taken from the mailbox. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index b0a0b01bb..068ee65a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -58,6 +58,7 @@ #include <linux/workqueue.h> #include <asm/byteorder.h> #include <net/devlink.h> +#include <trace/events/devlink.h> #include "core.h" #include "item.h" @@ -110,6 +111,7 @@ struct mlxsw_core { struct { u8 *mapping; /* lag_id+port_index to local_port mapping */ } lag; + struct mlxsw_resources resources; struct mlxsw_hwmon *hwmon; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ @@ -447,6 +449,10 @@ static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, if (!skb) return -ENOMEM; + trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0, + skb->data + mlxsw_core->driver->txhdr_len, + skb->len - mlxsw_core->driver->txhdr_len); + atomic_set(&trans->active, 1); err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); if (err) { @@ -529,6 +535,9 @@ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, struct mlxsw_core *mlxsw_core = priv; struct mlxsw_reg_trans *trans; + trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0, + skb->data, skb->len); + if (!mlxsw_emad_is_resp(skb)) goto free_skb; @@ -1102,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, } } - err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile); + err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, + &mlxsw_core->resources); if (err) goto err_bus_init; @@ -1110,14 +1120,14 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_emad_init; - err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); - if (err) - goto err_hwmon_init; - err = devlink_register(devlink, mlxsw_bus_info->dev); if (err) goto err_devlink_register; + err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); + if (err) + goto err_hwmon_init; + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); if (err) goto err_driver_init; @@ -1131,9 +1141,9 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core); err_driver_init: +err_hwmon_init: devlink_unregister(devlink); err_devlink_register: -err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: mlxsw_bus->fini(bus_priv); @@ -1644,6 +1654,12 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); +struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core) +{ + return &mlxsw_core->resources; +} +EXPORT_SYMBOL(mlxsw_core_resources_get); + int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, struct mlxsw_core_port *mlxsw_core_port, u8 local_port, struct net_device *dev, bool split, u32 split_group) @@ -1736,7 +1752,7 @@ static int __init mlxsw_core_module_init(void) { int err; - mlxsw_wq = create_workqueue(mlxsw_core_driver_name); + mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); if (!mlxsw_wq) return -ENOMEM; mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 436bc49df..d3476ead9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -190,7 +190,8 @@ struct mlxsw_config_profile { used_max_ib_mc:1, used_max_pkey:1, used_ar_sec:1, - used_adaptive_routing_group_cap:1; + used_adaptive_routing_group_cap:1, + used_kvd_sizes:1; u8 max_vepa_channels; u16 max_lag; u16 max_port_per_lag; @@ -211,6 +212,10 @@ struct mlxsw_config_profile { u8 ar_sec; u16 adaptive_routing_group_cap; u8 arn; + u32 kvd_linear_size; + u32 kvd_hash_single_size; + u32 kvd_hash_double_size; + u8 resource_query_enable; struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT]; }; @@ -262,10 +267,18 @@ struct mlxsw_driver { const struct mlxsw_config_profile *profile; }; +struct mlxsw_resources { + u8 max_span_valid:1; + u8 max_span; +}; + +struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core); + struct mlxsw_bus { const char *kind; int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core, - const struct mlxsw_config_profile *profile); + const struct mlxsw_config_profile *profile, + struct mlxsw_resources *resources); void (*fini)(void *bus_priv); bool (*skb_transmit_busy)(void *bus_priv, const struct mlxsw_tx_info *tx_info); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 7f4173c8e..1d1360c17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1154,6 +1154,61 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); } +#define MLXSW_RESOURCES_TABLE_END_ID 0xffff +#define MLXSW_MAX_SPAN_ID 0x2420 +#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100 +#define MLXSW_RESOURCES_PER_QUERY 32 + +static void mlxsw_pci_resources_query_parse(int id, u64 val, + struct mlxsw_resources *resources) +{ + switch (id) { + case MLXSW_MAX_SPAN_ID: + resources->max_span = val; + resources->max_span_valid = 1; + break; + default: + break; + } +} + +static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_resources *resources, + u8 query_enabled) +{ + int index, i; + u64 data; + u16 id; + int err; + + /* Not all the versions support resources query */ + if (!query_enabled) + return 0; + + mlxsw_cmd_mbox_zero(mbox); + + for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) { + err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); + if (err) + return err; + + for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) { + id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); + data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); + + if (id == MLXSW_RESOURCES_TABLE_END_ID) + return 0; + + mlxsw_pci_resources_query_parse(id, data, resources); + } + } + + /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get + * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. + */ + return -EIO; +} + static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, const struct mlxsw_config_profile *profile) { @@ -1255,6 +1310,20 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( mbox, profile->adaptive_routing_group_cap); } + if (profile->used_kvd_sizes) { + mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvd_linear_size_set( + mbox, profile->kvd_linear_size); + mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set( + mbox, profile->kvd_hash_single_size); + mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( + mbox, 1); + mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set( + mbox, profile->kvd_hash_double_size); + } for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, @@ -1390,7 +1459,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, } static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, - const struct mlxsw_config_profile *profile) + const struct mlxsw_config_profile *profile, + struct mlxsw_resources *resources) { struct mlxsw_pci *mlxsw_pci = bus_priv; struct pci_dev *pdev = mlxsw_pci->pdev; @@ -1449,6 +1519,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_boardinfo; + err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources, + profile->resource_query_enable); + if (err) + goto err_query_resources; + err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile); if (err) goto err_config_profile; @@ -1471,6 +1546,7 @@ err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: err_config_profile: +err_query_resources: err_boardinfo: mlxsw_pci_fw_area_fini(mlxsw_pci); err_fw_area_init: diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index f33b997f2..af371a82c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -56,6 +56,7 @@ #define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1) #define MLXSW_PORT_CPU_PORT 0x0 +#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2) #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 57d48da70..1721098ee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1,9 +1,10 @@ /* * drivers/net/ethernet/mellanox/mlxsw/reg.h * Copyright (c) 2015 Mellanox Technologies. All rights reserved. - * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com> * Copyright (c) 2015 Elad Raz <eladr@mellanox.com> - * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: @@ -386,7 +387,9 @@ enum mlxsw_reg_sfd_rec_action { /* forward and trap, trap_id is FDB_TRAP */ MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, /* trap and do not forward, trap_id is FDB_TRAP */ - MLXSW_REG_SFD_REC_ACTION_TRAP = 3, + MLXSW_REG_SFD_REC_ACTION_TRAP = 2, + /* forward to IP router */ + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3, MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, }; @@ -2500,6 +2503,7 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, + MLXSW_REG_PPCNT_TC_CNT = 0x11, }; /* reg_ppcnt_grp @@ -2700,6 +2704,23 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64); */ MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64); +/* Ethernet Per Traffic Group Counters */ + +/* reg_ppcnt_tc_transmit_queue + * Contains the transmit queue depth in cells of traffic class + * selected by prio_tc and the port selected by local_port. + * The field cannot be cleared. + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64); + +/* reg_ppcnt_tc_no_buffer_discard_uc + * The number of unicast packets dropped due to lack of shared + * buffer resources. + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) @@ -3201,6 +3222,1194 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* RGCR - Router General Configuration Register + * -------------------------------------------- + * The register is used for setting up the router configuration. + */ +#define MLXSW_REG_RGCR_ID 0x8001 +#define MLXSW_REG_RGCR_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_rgcr = { + .id = MLXSW_REG_RGCR_ID, + .len = MLXSW_REG_RGCR_LEN, +}; + +/* reg_rgcr_ipv4_en + * IPv4 router enable. + * Access: RW + */ +MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1); + +/* reg_rgcr_ipv6_en + * IPv6 router enable. + * Access: RW + */ +MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1); + +/* reg_rgcr_max_router_interfaces + * Defines the maximum number of active router interfaces for all virtual + * routers. + * Access: RW + */ +MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16); + +/* reg_rgcr_usp + * Update switch priority and packet color. + * 0 - Preserve the value of Switch Priority and packet color. + * 1 - Recalculate the value of Switch Priority and packet color. + * Access: RW + * + * Note: Not supported by SwitchX and SwitchX-2. + */ +MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1); + +/* reg_rgcr_pcp_rw + * Indicates how to handle the pcp_rewrite_en value: + * 0 - Preserve the value of pcp_rewrite_en. + * 2 - Disable PCP rewrite. + * 3 - Enable PCP rewrite. + * Access: RW + * + * Note: Not supported by SwitchX and SwitchX-2. + */ +MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2); + +/* reg_rgcr_activity_dis + * Activity disable: + * 0 - Activity will be set when an entry is hit (default). + * 1 - Activity will not be set when an entry is hit. + * + * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry + * (RALUE). + * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host + * Entry (RAUHT). + * Bits 2:7 are reserved. + * Access: RW + * + * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB. + */ +MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8); + +static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) +{ + MLXSW_REG_ZERO(rgcr, payload); + mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en); +} + +/* RITR - Router Interface Table Register + * -------------------------------------- + * The register is used to configure the router interface table. + */ +#define MLXSW_REG_RITR_ID 0x8002 +#define MLXSW_REG_RITR_LEN 0x40 + +static const struct mlxsw_reg_info mlxsw_reg_ritr = { + .id = MLXSW_REG_RITR_ID, + .len = MLXSW_REG_RITR_LEN, +}; + +/* reg_ritr_enable + * Enables routing on the router interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1); + +/* reg_ritr_ipv4 + * IPv4 routing enable. Enables routing of IPv4 traffic on the router + * interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); + +/* reg_ritr_ipv6 + * IPv6 routing enable. Enables routing of IPv6 traffic on the router + * interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); + +enum mlxsw_reg_ritr_if_type { + MLXSW_REG_RITR_VLAN_IF, + MLXSW_REG_RITR_FID_IF, + MLXSW_REG_RITR_SP_IF, +}; + +/* reg_ritr_type + * Router interface type. + * 0 - VLAN interface. + * 1 - FID interface. + * 2 - Sub-port interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3); + +enum { + MLXSW_REG_RITR_RIF_CREATE, + MLXSW_REG_RITR_RIF_DEL, +}; + +/* reg_ritr_op + * Opcode: + * 0 - Create or edit RIF. + * 1 - Delete RIF. + * Reserved for SwitchX-2. For Spectrum, editing of interface properties + * is not supported. An interface must be deleted and re-created in order + * to update properties. + * Access: WO + */ +MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2); + +/* reg_ritr_rif + * Router interface index. A pointer to the Router Interface Table. + * Access: Index + */ +MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16); + +/* reg_ritr_ipv4_fe + * IPv4 Forwarding Enable. + * Enables routing of IPv4 traffic on the router interface. When disabled, + * forwarding is blocked but local traffic (traps and IP2ME) will be enabled. + * Not supported in SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); + +/* reg_ritr_ipv6_fe + * IPv6 Forwarding Enable. + * Enables routing of IPv6 traffic on the router interface. When disabled, + * forwarding is blocked but local traffic (traps and IP2ME) will be enabled. + * Not supported in SwitchX-2. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); + +/* reg_ritr_lb_en + * Loop-back filter enable for unicast packets. + * If the flag is set then loop-back filter for unicast packets is + * implemented on the RIF. Multicast packets are always subject to + * loop-back filtering. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1); + +/* reg_ritr_virtual_router + * Virtual router ID associated with the router interface. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16); + +/* reg_ritr_mtu + * Router interface MTU. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16); + +/* reg_ritr_if_swid + * Switch partition ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8); + +/* reg_ritr_if_mac + * Router interface MAC address. + * In Spectrum, all MAC addresses must have the same 38 MSBits. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6); + +/* VLAN Interface */ + +/* reg_ritr_vlan_if_vid + * VLAN ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12); + +/* FID Interface */ + +/* reg_ritr_fid_if_fid + * Filtering ID. Used to connect a bridge to the router. Only FIDs from + * the vFID range are supported. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16); + +static inline void mlxsw_reg_ritr_fid_set(char *payload, + enum mlxsw_reg_ritr_if_type rif_type, + u16 fid) +{ + if (rif_type == MLXSW_REG_RITR_FID_IF) + mlxsw_reg_ritr_fid_if_fid_set(payload, fid); + else + mlxsw_reg_ritr_vlan_if_vid_set(payload, fid); +} + +/* Sub-port Interface */ + +/* reg_ritr_sp_if_lag + * LAG indication. When this bit is set the system_port field holds the + * LAG identifier. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1); + +/* reg_ritr_sp_system_port + * Port unique indentifier. When lag bit is set, this field holds the + * lag_id in bits 0:9. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16); + +/* reg_ritr_sp_if_vid + * VLAN ID. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12); + +static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif) +{ + MLXSW_REG_ZERO(ritr, payload); + mlxsw_reg_ritr_rif_set(payload, rif); +} + +static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag, + u16 system_port, u16 vid) +{ + mlxsw_reg_ritr_sp_if_lag_set(payload, lag); + mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port); + mlxsw_reg_ritr_sp_if_vid_set(payload, vid); +} + +static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, + enum mlxsw_reg_ritr_if_type type, + u16 rif, u16 mtu, const char *mac) +{ + bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL; + + MLXSW_REG_ZERO(ritr, payload); + mlxsw_reg_ritr_enable_set(payload, enable); + mlxsw_reg_ritr_ipv4_set(payload, 1); + mlxsw_reg_ritr_type_set(payload, type); + mlxsw_reg_ritr_op_set(payload, op); + mlxsw_reg_ritr_rif_set(payload, rif); + mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_lb_en_set(payload, 1); + mlxsw_reg_ritr_mtu_set(payload, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac); +} + +/* RATR - Router Adjacency Table Register + * -------------------------------------- + * The RATR register is used to configure the Router Adjacency (next-hop) + * Table. + */ +#define MLXSW_REG_RATR_ID 0x8008 +#define MLXSW_REG_RATR_LEN 0x2C + +static const struct mlxsw_reg_info mlxsw_reg_ratr = { + .id = MLXSW_REG_RATR_ID, + .len = MLXSW_REG_RATR_LEN, +}; + +enum mlxsw_reg_ratr_op { + /* Read */ + MLXSW_REG_RATR_OP_QUERY_READ = 0, + /* Read and clear activity */ + MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2, + /* Write Adjacency entry */ + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1, + /* Write Adjacency entry only if the activity is cleared. + * The write may not succeed if the activity is set. There is not + * direct feedback if the write has succeeded or not, however + * the get will reveal the actual entry (SW can compare the get + * response to the set command). + */ + MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3, +}; + +/* reg_ratr_op + * Note that Write operation may also be used for updating + * counter_set_type and counter_index. In this case all other + * fields must not be updated. + * Access: OP + */ +MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4); + +/* reg_ratr_v + * Valid bit. Indicates if the adjacency entry is valid. + * Note: the device may need some time before reusing an invalidated + * entry. During this time the entry can not be reused. It is + * recommended to use another entry before reusing an invalidated + * entry (e.g. software can put it at the end of the list for + * reusing). Trying to access an invalidated entry not yet cleared + * by the device results with failure indicating "Try Again" status. + * When valid is '0' then egress_router_interface,trap_action, + * adjacency_parameters and counters are reserved + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1); + +/* reg_ratr_a + * Activity. Set for new entries. Set if a packet lookup has hit on + * the specific entry. To clear the a bit, use "clear activity". + * Access: RO + */ +MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1); + +/* reg_ratr_adjacency_index_low + * Bits 15:0 of index into the adjacency table. + * For SwitchX and SwitchX-2, the adjacency table is linear and + * used for adjacency entries only. + * For Spectrum, the index is to the KVD linear. + * Access: Index + */ +MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16); + +/* reg_ratr_egress_router_interface + * Range is 0 .. cap_max_router_interfaces - 1 + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16); + +enum mlxsw_reg_ratr_trap_action { + MLXSW_REG_RATR_TRAP_ACTION_NOP, + MLXSW_REG_RATR_TRAP_ACTION_TRAP, + MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RATR_TRAP_ACTION_MIRROR, + MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS, +}; + +/* reg_ratr_trap_action + * see mlxsw_reg_ratr_trap_action + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4); + +enum mlxsw_reg_ratr_trap_id { + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0, + MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1, +}; + +/* reg_ratr_adjacency_index_high + * Bits 23:16 of the adjacency_index. + * Access: Index + */ +MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8); + +/* reg_ratr_trap_id + * Trap ID to be reported to CPU. + * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8); + +/* reg_ratr_eth_destination_mac + * MAC address of the destination next-hop. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6); + +static inline void +mlxsw_reg_ratr_pack(char *payload, + enum mlxsw_reg_ratr_op op, bool valid, + u32 adjacency_index, u16 egress_rif) +{ + MLXSW_REG_ZERO(ratr, payload); + mlxsw_reg_ratr_op_set(payload, op); + mlxsw_reg_ratr_v_set(payload, valid); + mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index); + mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16); + mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif); +} + +static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload, + const char *dest_mac) +{ + mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac); +} + +/* RALTA - Router Algorithmic LPM Tree Allocation Register + * ------------------------------------------------------- + * RALTA is used to allocate the LPM trees of the SHSPM method. + */ +#define MLXSW_REG_RALTA_ID 0x8010 +#define MLXSW_REG_RALTA_LEN 0x04 + +static const struct mlxsw_reg_info mlxsw_reg_ralta = { + .id = MLXSW_REG_RALTA_ID, + .len = MLXSW_REG_RALTA_LEN, +}; + +/* reg_ralta_op + * opcode (valid for Write, must be 0 on Read) + * 0 - allocate a tree + * 1 - deallocate a tree + * Access: OP + */ +MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2); + +enum mlxsw_reg_ralxx_protocol { + MLXSW_REG_RALXX_PROTOCOL_IPV4, + MLXSW_REG_RALXX_PROTOCOL_IPV6, +}; + +/* reg_ralta_protocol + * Protocol. + * Deallocation opcode: Reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4); + +/* reg_ralta_tree_id + * An identifier (numbered from 1..cap_shspm_max_trees-1) representing + * the tree identifier (managed by software). + * Note that tree_id 0 is allocated for a default-route tree. + * Access: Index + */ +MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8); + +static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) +{ + MLXSW_REG_ZERO(ralta, payload); + mlxsw_reg_ralta_op_set(payload, !alloc); + mlxsw_reg_ralta_protocol_set(payload, protocol); + mlxsw_reg_ralta_tree_id_set(payload, tree_id); +} + +/* RALST - Router Algorithmic LPM Structure Tree Register + * ------------------------------------------------------ + * RALST is used to set and query the structure of an LPM tree. + * The structure of the tree must be sorted as a sorted binary tree, while + * each node is a bin that is tagged as the length of the prefixes the lookup + * will refer to. Therefore, bin X refers to a set of entries with prefixes + * of X bits to match with the destination address. The bin 0 indicates + * the default action, when there is no match of any prefix. + */ +#define MLXSW_REG_RALST_ID 0x8011 +#define MLXSW_REG_RALST_LEN 0x104 + +static const struct mlxsw_reg_info mlxsw_reg_ralst = { + .id = MLXSW_REG_RALST_ID, + .len = MLXSW_REG_RALST_LEN, +}; + +/* reg_ralst_root_bin + * The bin number of the root bin. + * 0<root_bin=<(length of IP address) + * For a default-route tree configure 0xff + * Access: RW + */ +MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8); + +/* reg_ralst_tree_id + * Tree identifier numbered from 1..(cap_shspm_max_trees-1). + * Access: Index + */ +MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8); + +#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff +#define MLXSW_REG_RALST_BIN_OFFSET 0x04 +#define MLXSW_REG_RALST_BIN_COUNT 128 + +/* reg_ralst_left_child_bin + * Holding the children of the bin according to the stored tree's structure. + * For trees composed of less than 4 blocks, the bins in excess are reserved. + * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff + * Access: RW + */ +MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false); + +/* reg_ralst_right_child_bin + * Holding the children of the bin according to the stored tree's structure. + * For trees composed of less than 4 blocks, the bins in excess are reserved. + * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff + * Access: RW + */ +MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00, + false); + +static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id) +{ + MLXSW_REG_ZERO(ralst, payload); + + /* Initialize all bins to have no left or right child */ + memset(payload + MLXSW_REG_RALST_BIN_OFFSET, + MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2); + + mlxsw_reg_ralst_root_bin_set(payload, root_bin); + mlxsw_reg_ralst_tree_id_set(payload, tree_id); +} + +static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number, + u8 left_child_bin, + u8 right_child_bin) +{ + int bin_index = bin_number - 1; + + mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin); + mlxsw_reg_ralst_right_child_bin_set(payload, bin_index, + right_child_bin); +} + +/* RALTB - Router Algorithmic LPM Tree Binding Register + * ---------------------------------------------------- + * RALTB is used to bind virtual router and protocol to an allocated LPM tree. + */ +#define MLXSW_REG_RALTB_ID 0x8012 +#define MLXSW_REG_RALTB_LEN 0x04 + +static const struct mlxsw_reg_info mlxsw_reg_raltb = { + .id = MLXSW_REG_RALTB_ID, + .len = MLXSW_REG_RALTB_LEN, +}; + +/* reg_raltb_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ +MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16); + +/* reg_raltb_protocol + * Protocol. + * Access: Index + */ +MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4); + +/* reg_raltb_tree_id + * Tree to be used for the {virtual_router, protocol} + * Tree identifier numbered from 1..(cap_shspm_max_trees-1). + * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0. + * Access: RW + */ +MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8); + +static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router, + enum mlxsw_reg_ralxx_protocol protocol, + u8 tree_id) +{ + MLXSW_REG_ZERO(raltb, payload); + mlxsw_reg_raltb_virtual_router_set(payload, virtual_router); + mlxsw_reg_raltb_protocol_set(payload, protocol); + mlxsw_reg_raltb_tree_id_set(payload, tree_id); +} + +/* RALUE - Router Algorithmic LPM Unicast Entry Register + * ----------------------------------------------------- + * RALUE is used to configure and query LPM entries that serve + * the Unicast protocols. + */ +#define MLXSW_REG_RALUE_ID 0x8013 +#define MLXSW_REG_RALUE_LEN 0x38 + +static const struct mlxsw_reg_info mlxsw_reg_ralue = { + .id = MLXSW_REG_RALUE_ID, + .len = MLXSW_REG_RALUE_LEN, +}; + +/* reg_ralue_protocol + * Protocol. + * Access: Index + */ +MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4); + +enum mlxsw_reg_ralue_op { + /* Read operation. If entry doesn't exist, the operation fails. */ + MLXSW_REG_RALUE_OP_QUERY_READ = 0, + /* Clear on read operation. Used to read entry and + * clear Activity bit. + */ + MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1, + /* Write operation. Used to write a new entry to the table. All RW + * fields are written for new entry. Activity bit is set + * for new entries. + */ + MLXSW_REG_RALUE_OP_WRITE_WRITE = 0, + /* Update operation. Used to update an existing route entry and + * only update the RW fields that are detailed in the field + * op_u_mask. If entry doesn't exist, the operation fails. + */ + MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1, + /* Clear activity. The Activity bit (the field a) is cleared + * for the entry. + */ + MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2, + /* Delete operation. Used to delete an existing entry. If entry + * doesn't exist, the operation fails. + */ + MLXSW_REG_RALUE_OP_WRITE_DELETE = 3, +}; + +/* reg_ralue_op + * Operation. + * Access: OP + */ +MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3); + +/* reg_ralue_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry, only if the entry is a route. To clear the a bit, use + * "clear activity" op. + * Enabled by activity_dis in RGCR + * Access: RO + */ +MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1); + +/* reg_ralue_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ +MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16); + +#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0) +#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1) +#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2) + +/* reg_ralue_op_u_mask + * opcode update mask. + * On read operation, this field is reserved. + * This field is valid for update opcode, otherwise - reserved. + * This field is a bitmask of the fields that should be updated. + * Access: WO + */ +MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3); + +/* reg_ralue_prefix_len + * Number of bits in the prefix of the LPM route. + * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes + * two entries in the physical HW table. + * Access: Index + */ +MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); + +/* reg_ralue_dip* + * The prefix of the route or of the marker that the object of the LPM + * is compared with. The most significant bits of the dip are the prefix. + * The list significant bits must be '0' if the prefix_len is smaller + * than 128 for IPv6 or smaller than 32 for IPv4. + * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved. + * Access: Index + */ +MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32); + +enum mlxsw_reg_ralue_entry_type { + MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2, + MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3, +}; + +/* reg_ralue_entry_type + * Entry type. + * Note - for Marker entries, the action_type and action fields are reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2); + +/* reg_ralue_bmp_len + * The best match prefix length in the case that there is no match for + * longer prefixes. + * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len + * Note for any update operation with entry_type modification this + * field must be set. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8); + +enum mlxsw_reg_ralue_action_type { + MLXSW_REG_RALUE_ACTION_TYPE_REMOTE, + MLXSW_REG_RALUE_ACTION_TYPE_LOCAL, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME, +}; + +/* reg_ralue_action_type + * Action Type + * Indicates how the IP address is connected. + * It can be connected to a local subnet through local_erif or can be + * on a remote subnet connected through a next-hop router, + * or transmitted to the CPU. + * Reserved when entry_type = MARKER_ENTRY + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2); + +enum mlxsw_reg_ralue_trap_action { + MLXSW_REG_RALUE_TRAP_ACTION_NOP, + MLXSW_REG_RALUE_TRAP_ACTION_TRAP, + MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RALUE_TRAP_ACTION_MIRROR, + MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR, +}; + +/* reg_ralue_trap_action + * Trap action. + * For IP2ME action, only NOP and MIRROR are possible. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4); + +/* reg_ralue_trap_id + * Trap ID to be reported to CPU. + * Trap ID is RTR_INGRESS0 or RTR_INGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9); + +/* reg_ralue_adjacency_index + * Points to the first entry of the group-based ECMP. + * Only relevant in case of REMOTE action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24); + +/* reg_ralue_ecmp_size + * Amount of sequential entries starting + * from the adjacency_index (the number of ECMPs). + * The valid range is 1-64, 512, 1024, 2048 and 4096. + * Reserved when trap_action is TRAP or DISCARD_ERROR. + * Only relevant in case of REMOTE action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13); + +/* reg_ralue_local_erif + * Egress Router Interface. + * Only relevant in case of LOCAL action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); + +/* reg_ralue_v + * Valid bit for the tunnel_ptr field. + * If valid = 0 then trap to CPU as IP2ME trap ID. + * If valid = 1 and the packet format allows NVE or IPinIP tunnel + * decapsulation then tunnel decapsulation is done. + * If valid = 1 and packet format does not allow NVE or IPinIP tunnel + * decapsulation then trap as IP2ME trap ID. + * Only relevant in case of IP2ME action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1); + +/* reg_ralue_tunnel_ptr + * Tunnel Pointer for NVE or IPinIP tunnel decapsulation. + * For Spectrum, pointer to KVD Linear. + * Only relevant in case of IP2ME action. + * Access: RW + */ +MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24); + +static inline void mlxsw_reg_ralue_pack(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len) +{ + MLXSW_REG_ZERO(ralue, payload); + mlxsw_reg_ralue_protocol_set(payload, protocol); + mlxsw_reg_ralue_op_set(payload, op); + mlxsw_reg_ralue_virtual_router_set(payload, virtual_router); + mlxsw_reg_ralue_prefix_len_set(payload, prefix_len); + mlxsw_reg_ralue_entry_type_set(payload, + MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY); + mlxsw_reg_ralue_bmp_len_set(payload, prefix_len); +} + +static inline void mlxsw_reg_ralue_pack4(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len, + u32 dip) +{ + mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); + mlxsw_reg_ralue_dip4_set(payload, dip); +} + +static inline void +mlxsw_reg_ralue_act_remote_pack(char *payload, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u32 adjacency_index, u16 ecmp_size) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_REMOTE); + mlxsw_reg_ralue_trap_action_set(payload, trap_action); + mlxsw_reg_ralue_trap_id_set(payload, trap_id); + mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size); +} + +static inline void +mlxsw_reg_ralue_act_local_pack(char *payload, + enum mlxsw_reg_ralue_trap_action trap_action, + u16 trap_id, u16 local_erif) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_LOCAL); + mlxsw_reg_ralue_trap_action_set(payload, trap_action); + mlxsw_reg_ralue_trap_id_set(payload, trap_id); + mlxsw_reg_ralue_local_erif_set(payload, local_erif); +} + +static inline void +mlxsw_reg_ralue_act_ip2me_pack(char *payload) +{ + mlxsw_reg_ralue_action_type_set(payload, + MLXSW_REG_RALUE_ACTION_TYPE_IP2ME); +} + +/* RAUHT - Router Algorithmic LPM Unicast Host Table Register + * ---------------------------------------------------------- + * The RAUHT register is used to configure and query the Unicast Host table in + * devices that implement the Algorithmic LPM. + */ +#define MLXSW_REG_RAUHT_ID 0x8014 +#define MLXSW_REG_RAUHT_LEN 0x74 + +static const struct mlxsw_reg_info mlxsw_reg_rauht = { + .id = MLXSW_REG_RAUHT_ID, + .len = MLXSW_REG_RAUHT_LEN, +}; + +enum mlxsw_reg_rauht_type { + MLXSW_REG_RAUHT_TYPE_IPV4, + MLXSW_REG_RAUHT_TYPE_IPV6, +}; + +/* reg_rauht_type + * Access: Index + */ +MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2); + +enum mlxsw_reg_rauht_op { + MLXSW_REG_RAUHT_OP_QUERY_READ = 0, + /* Read operation */ + MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1, + /* Clear on read operation. Used to read entry and clear + * activity bit. + */ + MLXSW_REG_RAUHT_OP_WRITE_ADD = 0, + /* Add. Used to write a new entry to the table. All R/W fields are + * relevant for new entry. Activity bit is set for new entries. + */ + MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1, + /* Update action. Used to update an existing route entry and + * only update the following fields: + * trap_action, trap_id, mac, counter_set_type, counter_index + */ + MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2, + /* Clear activity. A bit is cleared for the entry. */ + MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3, + /* Delete entry */ + MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4, + /* Delete all host entries on a RIF. In this command, dip + * field is reserved. + */ +}; + +/* reg_rauht_op + * Access: OP + */ +MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3); + +/* reg_rauht_a + * Activity. Set for new entries. Set if a packet lookup has hit on + * the specific entry. + * To clear the a bit, use "clear activity" op. + * Enabled by activity_dis in RGCR + * Access: RO + */ +MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1); + +/* reg_rauht_rif + * Router Interface + * Access: Index + */ +MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16); + +/* reg_rauht_dip* + * Destination address. + * Access: Index + */ +MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32); + +enum mlxsw_reg_rauht_trap_action { + MLXSW_REG_RAUHT_TRAP_ACTION_NOP, + MLXSW_REG_RAUHT_TRAP_ACTION_TRAP, + MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU, + MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR, + MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS, +}; + +/* reg_rauht_trap_action + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4); + +enum mlxsw_reg_rauht_trap_id { + MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0, + MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1, +}; + +/* reg_rauht_trap_id + * Trap ID to be reported to CPU. + * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1. + * For trap_action of NOP, MIRROR and DISCARD_ERROR, + * trap_id is reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9); + +/* reg_rauht_counter_set_type + * Counter set type for flow counters + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8); + +/* reg_rauht_counter_index + * Counter index for flow counters + * Access: RW + */ +MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24); + +/* reg_rauht_mac + * MAC address. + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6); + +static inline void mlxsw_reg_rauht_pack(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac) +{ + MLXSW_REG_ZERO(rauht, payload); + mlxsw_reg_rauht_op_set(payload, op); + mlxsw_reg_rauht_rif_set(payload, rif); + mlxsw_reg_rauht_mac_memcpy_to(payload, mac); +} + +static inline void mlxsw_reg_rauht_pack4(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac, u32 dip) +{ + mlxsw_reg_rauht_pack(payload, op, rif, mac); + mlxsw_reg_rauht_dip4_set(payload, dip); +} + +/* RALEU - Router Algorithmic LPM ECMP Update Register + * --------------------------------------------------- + * The register enables updating the ECMP section in the action for multiple + * LPM Unicast entries in a single operation. The update is executed to + * all entries of a {virtual router, protocol} tuple using the same ECMP group. + */ +#define MLXSW_REG_RALEU_ID 0x8015 +#define MLXSW_REG_RALEU_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_raleu = { + .id = MLXSW_REG_RALEU_ID, + .len = MLXSW_REG_RALEU_LEN, +}; + +/* reg_raleu_protocol + * Protocol. + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4); + +/* reg_raleu_virtual_router + * Virtual Router ID + * Range is 0..cap_max_virtual_routers-1 + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16); + +/* reg_raleu_adjacency_index + * Adjacency Index used for matching on the existing entries. + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24); + +/* reg_raleu_ecmp_size + * ECMP Size used for matching on the existing entries. + * Access: Index + */ +MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13); + +/* reg_raleu_new_adjacency_index + * New Adjacency Index. + * Access: WO + */ +MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24); + +/* reg_raleu_new_ecmp_size + * New ECMP Size. + * Access: WO + */ +MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13); + +static inline void mlxsw_reg_raleu_pack(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + u16 virtual_router, + u32 adjacency_index, u16 ecmp_size, + u32 new_adjacency_index, + u16 new_ecmp_size) +{ + MLXSW_REG_ZERO(raleu, payload); + mlxsw_reg_raleu_protocol_set(payload, protocol); + mlxsw_reg_raleu_virtual_router_set(payload, virtual_router); + mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index); + mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size); + mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index); + mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size); +} + +/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register + * ---------------------------------------------------------------- + * The RAUHTD register allows dumping entries from the Router Unicast Host + * Table. For a given session an entry is dumped no more than one time. The + * first RAUHTD access after reset is a new session. A session ends when the + * num_rec response is smaller than num_rec request or for IPv4 when the + * num_entries is smaller than 4. The clear activity affect the current session + * or the last session if a new session has not started. + */ +#define MLXSW_REG_RAUHTD_ID 0x8018 +#define MLXSW_REG_RAUHTD_BASE_LEN 0x20 +#define MLXSW_REG_RAUHTD_REC_LEN 0x20 +#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32 +#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \ + MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN) +#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4 + +static const struct mlxsw_reg_info mlxsw_reg_rauhtd = { + .id = MLXSW_REG_RAUHTD_ID, + .len = MLXSW_REG_RAUHTD_LEN, +}; + +#define MLXSW_REG_RAUHTD_FILTER_A BIT(0) +#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3) + +/* reg_rauhtd_filter_fields + * if a bit is '0' then the relevant field is ignored and dump is done + * regardless of the field value + * Bit0 - filter by activity: entry_a + * Bit3 - filter by entry rip: entry_rif + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8); + +enum mlxsw_reg_rauhtd_op { + MLXSW_REG_RAUHTD_OP_DUMP, + MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR, +}; + +/* reg_rauhtd_op + * Access: OP + */ +MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2); + +/* reg_rauhtd_num_rec + * At request: number of records requested + * At response: number of records dumped + * For IPv4, each record has 4 entries at request and up to 4 entries + * at response + * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8); + +/* reg_rauhtd_entry_a + * Dump only if activity has value of entry_a + * Reserved if filter_fields bit0 is '0' + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1); + +enum mlxsw_reg_rauhtd_type { + MLXSW_REG_RAUHTD_TYPE_IPV4, + MLXSW_REG_RAUHTD_TYPE_IPV6, +}; + +/* reg_rauhtd_type + * Dump only if record type is: + * 0 - IPv4 + * 1 - IPv6 + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4); + +/* reg_rauhtd_entry_rif + * Dump only if RIF has value of entry_rif + * Reserved if filter_fields bit3 is '0' + * Access: Index + */ +MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16); + +static inline void mlxsw_reg_rauhtd_pack(char *payload, + enum mlxsw_reg_rauhtd_type type) +{ + MLXSW_REG_ZERO(rauhtd, payload); + mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A); + mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR); + mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM); + mlxsw_reg_rauhtd_entry_a_set(payload, 1); + mlxsw_reg_rauhtd_type_set(payload, type); +} + +/* reg_rauhtd_ipv4_rec_num_entries + * Number of valid entries in this record: + * 0 - 1 valid entry + * 1 - 2 valid entries + * 2 - 3 valid entries + * 3 - 4 valid entries + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries, + MLXSW_REG_RAUHTD_BASE_LEN, 28, 2, + MLXSW_REG_RAUHTD_REC_LEN, 0x00, false); + +/* reg_rauhtd_rec_type + * Record type. + * 0 - IPv4 + * 1 - IPv6 + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2, + MLXSW_REG_RAUHTD_REC_LEN, 0x00, false); + +#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8 + +/* reg_rauhtd_ipv4_ent_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1, + MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv4_ent_rif + * Router interface. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv4_ent_dip + * Destination IPv4 address. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false); + +static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, + int ent_index, u16 *p_rif, + u32 *p_dip) +{ + *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index); + *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -3435,6 +4644,123 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp, mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name); } +/* MPAT - Monitoring Port Analyzer Table + * ------------------------------------- + * MPAT Register is used to query and configure the Switch PortAnalyzer Table. + * For an enabled analyzer, all fields except e (enable) cannot be modified. + */ +#define MLXSW_REG_MPAT_ID 0x901A +#define MLXSW_REG_MPAT_LEN 0x78 + +static const struct mlxsw_reg_info mlxsw_reg_mpat = { + .id = MLXSW_REG_MPAT_ID, + .len = MLXSW_REG_MPAT_LEN, +}; + +/* reg_mpat_pa_id + * Port Analyzer ID. + * Access: Index + */ +MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4); + +/* reg_mpat_system_port + * A unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16); + +/* reg_mpat_e + * Enable. Indicating the Port Analyzer is enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1); + +/* reg_mpat_qos + * Quality Of Service Mode. + * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation + * PCP, DEI, DSCP or VL) are configured. + * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the + * same as in the original packet that has triggered the mirroring. For + * SPAN also the pcp,dei are maintained. + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1); + +/* reg_mpat_be + * Best effort mode. Indicates mirroring traffic should not cause packet + * drop or back pressure, but will discard the mirrored packets. Mirrored + * packets will be forwarded on a best effort manner. + * 0: Do not discard mirrored packets + * 1: Discard mirrored packets if causing congestion + * Access: RW + */ +MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1); + +static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id, + u16 system_port, bool e) +{ + MLXSW_REG_ZERO(mpat, payload); + mlxsw_reg_mpat_pa_id_set(payload, pa_id); + mlxsw_reg_mpat_system_port_set(payload, system_port); + mlxsw_reg_mpat_e_set(payload, e); + mlxsw_reg_mpat_qos_set(payload, 1); + mlxsw_reg_mpat_be_set(payload, 1); +} + +/* MPAR - Monitoring Port Analyzer Register + * ---------------------------------------- + * MPAR register is used to query and configure the port analyzer port mirroring + * properties. + */ +#define MLXSW_REG_MPAR_ID 0x901B +#define MLXSW_REG_MPAR_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_mpar = { + .id = MLXSW_REG_MPAR_ID, + .len = MLXSW_REG_MPAR_LEN, +}; + +/* reg_mpar_local_port + * The local port to mirror the packets from. + * Access: Index + */ +MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8); + +enum mlxsw_reg_mpar_i_e { + MLXSW_REG_MPAR_TYPE_EGRESS, + MLXSW_REG_MPAR_TYPE_INGRESS, +}; + +/* reg_mpar_i_e + * Ingress/Egress + * Access: Index + */ +MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4); + +/* reg_mpar_enable + * Enable mirroring + * By default, port mirroring is disabled for all ports. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1); + +/* reg_mpar_pa_id + * Port Analyzer ID. + * Access: RW + */ +MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4); + +static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port, + enum mlxsw_reg_mpar_i_e i_e, + bool enable, u8 pa_id) +{ + MLXSW_REG_ZERO(mpar, payload); + mlxsw_reg_mpar_local_port_set(payload, local_port); + mlxsw_reg_mpar_enable_set(payload, enable); + mlxsw_reg_mpar_i_e_set(payload, i_e); + mlxsw_reg_mpar_pa_id_set(payload, pa_id); +} + /* MLCR - Management LED Control Register * -------------------------------------- * Controls the system LEDs. @@ -3864,6 +5190,45 @@ static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index); } +/* SBIB - Shared Buffer Internal Buffer Register + * --------------------------------------------- + * The SBIB register configures per port buffers for internal use. The internal + * buffers consume memory on the port buffers (note that the port buffers are + * used also by PBMC). + * + * For Spectrum this is used for egress mirroring. + */ +#define MLXSW_REG_SBIB_ID 0xB006 +#define MLXSW_REG_SBIB_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_sbib = { + .id = MLXSW_REG_SBIB_ID, + .len = MLXSW_REG_SBIB_LEN, +}; + +/* reg_sbib_local_port + * Local port number + * Not supported for CPU port and router port + * Access: Index + */ +MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8); + +/* reg_sbib_buff_size + * Units represented in cells + * Allowed range is 0 to (cap_max_headroom_size - 1) + * Default is 0 + * Access: RW + */ +MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24); + +static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port, + u32 buff_size) +{ + MLXSW_REG_ZERO(sbib, payload); + mlxsw_reg_sbib_local_port_set(payload, local_port); + mlxsw_reg_sbib_buff_size_set(payload, buff_size); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -3939,6 +5304,26 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_RGCR_ID: + return "RGCR"; + case MLXSW_REG_RITR_ID: + return "RITR"; + case MLXSW_REG_RATR_ID: + return "RATR"; + case MLXSW_REG_RALTA_ID: + return "RALTA"; + case MLXSW_REG_RALST_ID: + return "RALST"; + case MLXSW_REG_RALTB_ID: + return "RALTB"; + case MLXSW_REG_RALUE_ID: + return "RALUE"; + case MLXSW_REG_RAUHT_ID: + return "RAUHT"; + case MLXSW_REG_RALEU_ID: + return "RALEU"; + case MLXSW_REG_RAUHTD_ID: + return "RAUHTD"; case MLXSW_REG_MFCR_ID: return "MFCR"; case MLXSW_REG_MFSC_ID: @@ -3947,6 +5332,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "MFSM"; case MLXSW_REG_MTCAP_ID: return "MTCAP"; + case MLXSW_REG_MPAT_ID: + return "MPAT"; + case MLXSW_REG_MPAR_ID: + return "MPAR"; case MLXSW_REG_MTMP_ID: return "MTMP"; case MLXSW_REG_MLCR_ID: @@ -3961,6 +5350,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SBMM"; case MLXSW_REG_SBSR_ID: return "SBSR"; + case MLXSW_REG_SBIB_ID: + return "SBIB"; default: return "*UNKNOWN*"; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 374080027..d48873bcb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -49,9 +49,14 @@ #include <linux/jiffies.h> #include <linux/bitops.h> #include <linux/list.h> +#include <linux/notifier.h> #include <linux/dcbnl.h> +#include <linux/inetdevice.h> #include <net/switchdev.h> #include <generated/utsrelease.h> +#include <net/pkt_cls.h> +#include <net/tc_act/tc_mirred.h> +#include <net/netevent.h> #include "spectrum.h" #include "core.h" @@ -131,6 +136,8 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); */ MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); +static bool mlxsw_sp_port_dev_check(const struct net_device *dev); + static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { @@ -159,6 +166,303 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_resources *resources; + int i; + + resources = mlxsw_core_resources_get(mlxsw_sp->core); + if (!resources->max_span_valid) + return -EIO; + + mlxsw_sp->span.entries_count = resources->max_span; + mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count, + sizeof(struct mlxsw_sp_span_entry), + GFP_KERNEL); + if (!mlxsw_sp->span.entries) + return -ENOMEM; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) + INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list); + + return 0; +} + +static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + WARN_ON_ONCE(!list_empty(&curr->bound_ports_list)); + } + kfree(mlxsw_sp->span.entries); +} + +static struct mlxsw_sp_span_entry * +mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_entry *span_entry; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + u8 local_port = port->local_port; + int index; + int i; + int err; + + /* find a free entry to use */ + index = -1; + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + if (!mlxsw_sp->span.entries[i].used) { + index = i; + span_entry = &mlxsw_sp->span.entries[i]; + break; + } + } + if (index < 0) + return NULL; + + /* create a new port analayzer entry for local_port */ + mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); + if (err) + return NULL; + + span_entry->used = true; + span_entry->id = index; + span_entry->ref_count = 0; + span_entry->local_port = local_port; + return span_entry; +} + +static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + u8 local_port = span_entry->local_port; + char mpat_pl[MLXSW_REG_MPAT_LEN]; + int pa_id = span_entry->id; + + mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl); + span_entry->used = false; +} + +struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + if (curr->used && curr->local_port == port->local_port) + return curr; + } + return NULL; +} + +struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find(port); + if (span_entry) { + span_entry->ref_count++; + return span_entry; + } + + return mlxsw_sp_span_entry_create(port); +} + +static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_span_entry *span_entry) +{ + if (--span_entry->ref_count == 0) + mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); + return 0; +} + +static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + struct mlxsw_sp_span_inspected_port *p; + int i; + + for (i = 0; i < mlxsw_sp->span.entries_count; i++) { + struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i]; + + list_for_each_entry(p, &curr->bound_ports_list, list) + if (p->local_port == port->local_port && + p->type == MLXSW_SP_SPAN_EGRESS) + return true; + } + + return false; +} + +static int mlxsw_sp_span_mtu_to_buffsize(int mtu) +{ + return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1; +} + +static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int err; + + /* If port is egress mirrored, the shared buffer size should be + * updated according to the mtu value + */ + if (mlxsw_sp_span_is_egress_mirror(port)) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, + mlxsw_sp_span_mtu_to_buffsize(mtu)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not update shared buffer for mirroring\n"); + return err; + } + } + + return 0; +} + +static struct mlxsw_sp_span_inspected_port * +mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry) +{ + struct mlxsw_sp_span_inspected_port *p; + + list_for_each_entry(p, &span_entry->bound_ports_list, list) + if (port->local_port == p->local_port) + return p; + return NULL; +} + +static int +mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int pa_id = span_entry->id; + int err; + + /* if it is an egress SPAN, bind a shared buffer to it */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, + mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + if (err) { + netdev_err(port->dev, "Could not create shared buffer for mirroring\n"); + return err; + } + } + + /* bind the port to the SPAN entry */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); + if (err) + goto err_mpar_reg_write; + + inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL); + if (!inspected_port) { + err = -ENOMEM; + goto err_inspected_port_alloc; + } + inspected_port->local_port = port->local_port; + inspected_port->type = type; + list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); + + return 0; + +err_mpar_reg_write: +err_inspected_port_alloc: + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + return err; +} + +static void +mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port, + struct mlxsw_sp_span_entry *span_entry, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_inspected_port *inspected_port; + struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; + char mpar_pl[MLXSW_REG_MPAR_LEN]; + char sbib_pl[MLXSW_REG_SBIB_LEN]; + int pa_id = span_entry->id; + + inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); + if (!inspected_port) + return; + + /* remove the inspected port */ + mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl); + + /* remove the SBIB buffer if it was egress SPAN */ + if (type == MLXSW_SP_SPAN_EGRESS) { + mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl); + } + + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + + list_del(&inspected_port->list); + kfree(inspected_port); +} + +static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp; + struct mlxsw_sp_span_entry *span_entry; + int err; + + span_entry = mlxsw_sp_span_entry_get(to); + if (!span_entry) + return -ENOENT; + + netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n", + span_entry->id); + + err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type); + if (err) + goto err_port_bind; + + return 0; + +err_port_bind: + mlxsw_sp_span_entry_put(mlxsw_sp, span_entry); + return err; +} + +static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from, + struct mlxsw_sp_port *to, + enum mlxsw_sp_span_type type) +{ + struct mlxsw_sp_span_entry *span_entry; + + span_entry = mlxsw_sp_span_entry_find(to); + if (!span_entry) { + netdev_err(from->dev, "no span entry found\n"); + return; + } + + netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n", + span_entry->id); + mlxsw_sp_span_inspected_port_unbind(from, span_entry, type); +} + static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, bool is_up) { @@ -192,23 +496,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); } -static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid, enum mlxsw_reg_spms_state state) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *spms_pl; - int err; - - spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); - if (!spms_pl) - return -ENOMEM; - mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); - mlxsw_reg_spms_vid_pack(spms_pl, vid, state); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); - kfree(spms_pl); - return err; -} - static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -508,6 +795,9 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); if (err) return err; + err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu); + if (err) + goto err_span_port_mtu_update; err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); if (err) goto err_port_mtu_set; @@ -515,6 +805,8 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) return 0; err_port_mtu_set: + mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu); +err_span_port_mtu_update: mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); return err; } @@ -619,94 +911,8 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static struct mlxsw_sp_vfid * -mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) -{ - struct mlxsw_sp_vfid *vfid; - - list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { - if (vfid->vid == vid) - return vfid; - } - - return NULL; -} - -static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) -{ - return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, - MLXSW_SP_VFID_PORT_MAX); -} - -static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, - u16 vid) -{ - struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; - int err; - - n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); - if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { - dev_err(dev, "No available vFIDs\n"); - return ERR_PTR(-ERANGE); - } - - err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); - if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); - return ERR_PTR(err); - } - - vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) - goto err_allocate_vfid; - - vfid->vfid = n_vfid; - vfid->vid = vid; - - list_add(&vfid->list, &mlxsw_sp->port_vfids.list); - set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); - - return vfid; - -err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); - return ERR_PTR(-ENOMEM); -} - -static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) -{ - clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); - list_del(&vfid->list); - - __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); - - kfree(vfid); -} - static struct mlxsw_sp_port * -mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_vfid *vfid) +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_vport; @@ -724,8 +930,7 @@ mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; - mlxsw_sp_vport->vport.vfid = vfid; - mlxsw_sp_vport->vport.vid = vfid->vid; + mlxsw_sp_vport->vport.vid = vid; list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); @@ -738,13 +943,12 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) kfree(mlxsw_sp_vport); } -int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, - u16 vid) +static int mlxsw_sp_port_add_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; + bool untagged = vid == 1; int err; /* VLAN 0 is added to HW filter when device goes up, but it is @@ -753,37 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { - netdev_warn(dev, "VID=%d already configured\n", vid); + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) return 0; - } - - vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!vfid) { - vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(vfid); - } - } - - mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); - if (!mlxsw_sp_vport) { - netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); - err = -ENOMEM; - goto err_port_vport_create; - } - if (!vfid->nr_vports) { - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, - true, false); - if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_vport_flood_set; - } - } + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) + return -ENOMEM; /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit @@ -791,77 +970,36 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, */ if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to set to Virtual mode\n"); + if (err) goto err_port_vp_mode_trans; - } - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, vfid->vfid); - goto err_port_vid_to_fid_set; } err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); + if (err) goto err_port_vid_learning_set; - } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); - if (err) { - netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", - vid); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged); + if (err) goto err_port_add_vid; - } - - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); - goto err_port_stp_state_set; - } - - vfid->nr_vports++; return 0; -err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(vfid->vfid), vid); -err_port_vid_to_fid_set: if (list_is_singular(&mlxsw_sp_port->vports_list)) mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); err_port_vp_mode_trans: - if (!vfid->nr_vports) - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); -err_vport_flood_set: mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); -err_port_vport_create: - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; } -int mlxsw_sp_port_kill_vid(struct net_device *dev, - __be16 __always_unused proto, u16 vid) +static int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_vfid *vfid; - int err; + struct mlxsw_sp_fid *f; /* VLAN 0 is removed from HW filter when device goes down, but * it is reserved in our case, so simply return. @@ -870,63 +1008,29 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - netdev_warn(dev, "VID=%d does not exist\n", vid); + if (WARN_ON(!mlxsw_sp_vport)) return 0; - } - - vfid = mlxsw_sp_vport->vport.vfid; - - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_DISCARDING); - if (err) { - netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); - return err; - } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - if (err) { - netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", - vid); - return err; - } + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - if (err) { - netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); - return err; - } + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, vfid->vfid); - return err; - } + /* Drop FID reference. If this was the last reference the + * resources will be freed. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport); /* When removing the last VLAN interface on a bridged port we need to * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (list_is_singular(&mlxsw_sp_port->vports_list)) { - err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to set to VLAN mode\n"); - return err; - } - } + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); - vfid->nr_vports--; mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); - /* Destroy the vFID if no vPorts are assigned to it anymore. */ - if (!vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); - return 0; } @@ -951,16 +1055,164 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, return 0; } +static struct mlxsw_sp_port_mall_tc_entry * +mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port, + unsigned long cookie) { + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + + list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list) + if (mall_tc_entry->cookie == cookie) + return mall_tc_entry; + + return NULL; +} + +static int +mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls, + const struct tc_action *a, + bool ingress) +{ + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + struct net *net = dev_net(mlxsw_sp_port->dev); + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + struct net_device *to_dev; + int ifindex; + int err; + + ifindex = tcf_mirred_ifindex(a); + to_dev = __dev_get_by_index(net, ifindex); + if (!to_dev) { + netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n"); + return -EINVAL; + } + + if (!mlxsw_sp_port_dev_check(to_dev)) { + netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port"); + return -ENOTSUPP; + } + to_port = netdev_priv(to_dev); + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR; + mall_tc_entry->mirror.to_local_port = to_port->local_port; + mall_tc_entry->mirror.ingress = ingress; + list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list); + + span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type); + if (err) + goto err_mirror_add; + return 0; + +err_mirror_add: + list_del(&mall_tc_entry->list); + kfree(mall_tc_entry); + return err; +} + +static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + __be16 protocol, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + const struct tc_action *a; + LIST_HEAD(actions); + int err; + + if (!tc_single_action(cls->exts)) { + netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); + return -ENOTSUPP; + } + + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { + if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL)) + return -ENOTSUPP; + + err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls, + a, ingress); + if (err) + return err; + } + + return 0; +} + +static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_cls_matchall_offload *cls) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + enum mlxsw_sp_span_type span_type; + struct mlxsw_sp_port *to_port; + + mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port, + cls->cookie); + if (!mall_tc_entry) { + netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); + return; + } + + switch (mall_tc_entry->type) { + case MLXSW_SP_PORT_MALL_MIRROR: + to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port]; + span_type = mall_tc_entry->mirror.ingress ? + MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS; + + mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type); + break; + default: + WARN_ON(1); + } + + list_del(&mall_tc_entry->list); + kfree(mall_tc_entry); +} + +static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + + if (tc->type == TC_SETUP_MATCHALL) { + switch (tc->cls_mall->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, + proto, + tc->cls_mall, + ingress); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, + tc->cls_mall); + return 0; + default: + return -EINVAL; + } + } + + return -ENOTSUPP; +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_setup_tc = mlxsw_sp_setup_tc, .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, .ndo_change_mtu = mlxsw_sp_port_change_mtu, .ndo_get_stats64 = mlxsw_sp_port_get_stats64, .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, + .ndo_neigh_construct = mlxsw_sp_router_neigh_construct, + .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy, .ndo_fdb_add = switchdev_port_fdb_add, .ndo_fdb_del = switchdev_port_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, @@ -1055,7 +1307,7 @@ struct mlxsw_sp_port_hw_stats { u64 (*getter)(char *payload); }; -static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { { .str = "a_frames_transmitted_ok", .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, @@ -1136,6 +1388,90 @@ static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { + { + .str = "rx_octets_prio", + .getter = mlxsw_reg_ppcnt_rx_octets_get, + }, + { + .str = "rx_frames_prio", + .getter = mlxsw_reg_ppcnt_rx_frames_get, + }, + { + .str = "tx_octets_prio", + .getter = mlxsw_reg_ppcnt_tx_octets_get, + }, + { + .str = "tx_frames_prio", + .getter = mlxsw_reg_ppcnt_tx_frames_get, + }, + { + .str = "rx_pause_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_get, + }, + { + .str = "rx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_rx_pause_duration_get, + }, + { + .str = "tx_pause_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_get, + }, + { + .str = "tx_pause_duration_prio", + .getter = mlxsw_reg_ppcnt_tx_pause_duration_get, + }, +}; + +#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats) + +static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl) +{ + u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); + + return MLXSW_SP_CELLS_TO_BYTES(transmit_queue); +} + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { + { + .str = "tc_transmit_queue_tc", + .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get, + }, + { + .str = "tc_no_buffer_discard_uc_tc", + .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get, + }, +}; + +#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) + +#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ + (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \ + MLXSW_SP_PORT_HW_TC_STATS_LEN) * \ + IEEE_8021QAZ_MAX_TCS) + +static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + mlxsw_sp_port_hw_prio_stats[i].str, prio); + *p += ETH_GSTRING_LEN; + } +} + +static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s_%d", + mlxsw_sp_port_hw_tc_stats[i].str, tc); + *p += ETH_GSTRING_LEN; + } +} + static void mlxsw_sp_port_get_strings(struct net_device *dev, u32 stringset, u8 *data) { @@ -1149,6 +1485,13 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_prio_strings(&p, i); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port_get_tc_strings(&p, i); + break; } } @@ -1176,27 +1519,80 @@ static int mlxsw_sp_port_set_phys_id(struct net_device *dev, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); } -static void mlxsw_sp_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) +static int +mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, + int *p_len, enum mlxsw_reg_ppcnt_grp grp) +{ + switch (grp) { + case MLXSW_REG_PPCNT_IEEE_8023_CNT: + *p_hw_stats = mlxsw_sp_port_hw_stats; + *p_len = MLXSW_SP_PORT_HW_STATS_LEN; + break; + case MLXSW_REG_PPCNT_PRIO_CNT: + *p_hw_stats = mlxsw_sp_port_hw_prio_stats; + *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + break; + case MLXSW_REG_PPCNT_TC_CNT: + *p_hw_stats = mlxsw_sp_port_hw_tc_stats; + *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN; + break; + default: + WARN_ON(1); + return -ENOTSUPP; + } + return 0; +} + +static void __mlxsw_sp_port_get_stats(struct net_device *dev, + enum mlxsw_reg_ppcnt_grp grp, int prio, + u64 *data, int data_index) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_hw_stats *hw_stats; char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; - int i; + int i, len; int err; - mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, - MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); + err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); + if (err) + return; + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); - for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) - data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; + for (i = 0; i < len; i++) + data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0; +} + +static void mlxsw_sp_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + int i, data_index = 0; + + /* IEEE 802.3 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0, + data, data_index); + data_index = MLXSW_SP_PORT_HW_STATS_LEN; + + /* Per-Priority Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN; + } + + /* Per-TC Counters */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i, + data, data_index); + data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN; + } } static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: - return MLXSW_SP_PORT_HW_STATS_LEN; + return MLXSW_SP_PORT_ETHTOOL_STATS_LEN; default: return -EOPNOTSUPP; } @@ -1655,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->pvid = 1; + + return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1); +} + +static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); +} + static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width, u8 lane) { @@ -1686,6 +2094,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_untagged_vlans_alloc; } INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); + INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1697,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, dev->netdev_ops = &mlxsw_sp_port_netdev_ops; dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; + err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sp_port->local_port); + goto err_port_swid_set; + } + err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", @@ -1707,7 +2123,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, netif_carrier_off(dev); dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; + dev->hw_features |= NETIF_F_HW_TC; /* Each packet needs to have a Tx header (metadata) on top all other * headers. @@ -1721,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_system_port_mapping_set; } - err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", - mlxsw_sp_port->local_port); - goto err_port_swid_set; - } - err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", @@ -1768,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_dcb_init; } + err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n", + mlxsw_sp_port->local_port); + goto err_port_pvid_vport_create; + } + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); + mlxsw_sp->ports[local_port] = mlxsw_sp_port; err = register_netdev(dev); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", @@ -1785,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_core_port_init; } - err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); - if (err) - goto err_port_vlan_init; - - mlxsw_sp->ports[local_port] = mlxsw_sp_port; return 0; -err_port_vlan_init: - mlxsw_core_port_fini(&mlxsw_sp_port->core_port); err_core_port_init: unregister_netdev(dev); err_register_netdev: + mlxsw_sp->ports[local_port] = NULL; + mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); +err_port_pvid_vport_create: + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); err_port_dcb_init: err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: err_port_speed_by_width_set: -err_port_swid_set: err_port_system_port_mapping_set: err_dev_addr_init: + mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); +err_port_swid_set: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: kfree(mlxsw_sp_port->untagged_vlans); @@ -1816,40 +2233,24 @@ err_port_active_vlans_alloc: return err; } -static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct net_device *dev = mlxsw_sp_port->dev; - struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - - list_for_each_entry_safe(mlxsw_sp_vport, tmp, - &mlxsw_sp_port->vports_list, vport.list) { - u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - - /* vPorts created for VLAN devices should already be gone - * by now, since we unregistered the port netdev. - */ - WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); - mlxsw_sp_port_kill_vid(dev, 0, vid); - } -} - static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; if (!mlxsw_sp_port) return; - mlxsw_sp->ports[local_port] = NULL; mlxsw_core_port_fini(&mlxsw_sp_port->core_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ - mlxsw_sp_port_dcb_fini(mlxsw_sp_port); - mlxsw_sp_port_vports_fini(mlxsw_sp_port); + mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port); + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); + WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list)); free_netdev(mlxsw_sp_port->dev); } @@ -2086,11 +2487,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, local_port = mlxsw_reg_pude_local_port_get(pude_pl); mlxsw_sp_port = mlxsw_sp->ports[local_port]; - if (!mlxsw_sp_port) { - dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", - local_port); + if (!mlxsw_sp_port) return; - } status = mlxsw_reg_pude_oper_status_get(pude_pl); if (status == MLXSW_PORT_OPER_STATUS_UP) { @@ -2245,6 +2643,51 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { .local_port = MLXSW_PORT_DONT_CARE, .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ARPBC, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_ARPUC, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MTUERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_TTLERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LBERROR, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_OSPF, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IP2ME, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4, + }, }; static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) @@ -2285,7 +2728,7 @@ err_rx_trap_set: mlxsw_sp); err_rx_listener_register: for (i--; i >= 0; i--) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, mlxsw_sp_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); @@ -2302,7 +2745,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) int i; for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, mlxsw_sp_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); @@ -2381,8 +2824,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; - INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); - INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->fids); + INIT_LIST_HEAD(&mlxsw_sp->vfids.list); INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); @@ -2391,16 +2834,10 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return err; } - err = mlxsw_sp_ports_create(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - return err; - } - err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); - goto err_event_register; + return err; } err = mlxsw_sp_traps_init(mlxsw_sp); @@ -2433,8 +2870,32 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_switchdev_init; } + err = mlxsw_sp_router_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); + goto err_router_init; + } + + err = mlxsw_sp_span_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); + goto err_span_init; + } + + err = mlxsw_sp_ports_create(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); + goto err_ports_create; + } + return 0; +err_ports_create: + mlxsw_sp_span_fini(mlxsw_sp); +err_span_init: + mlxsw_sp_router_fini(mlxsw_sp); +err_router_init: + mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: err_lag_init: mlxsw_sp_buffers_fini(mlxsw_sp); @@ -2443,20 +2904,25 @@ err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); -err_event_register: - mlxsw_sp_ports_remove(mlxsw_sp); return err; } static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + int i; + mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_span_fini(mlxsw_sp); + mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); - mlxsw_sp_ports_remove(mlxsw_sp); + WARN_ON(!list_empty(&mlxsw_sp->vfids.list)); + WARN_ON(!list_empty(&mlxsw_sp->fids)); + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + WARN_ON_ONCE(mlxsw_sp->rifs[i]); } static struct mlxsw_config_profile mlxsw_sp_config_profile = { @@ -2487,12 +2953,17 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .max_ib_mc = 0, .used_max_pkey = 1, .max_pkey = 0, + .used_kvd_sizes = 1, + .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, + .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE, + .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE, .swid_config = { { .used_type = 1, .type = MLXSW_PORT_SWID_TYPE_ETH, } }, + .resource_query_enable = 1, }; static struct mlxsw_driver mlxsw_sp_driver = { @@ -2518,16 +2989,631 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; -static int -mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; +} + +static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev) +{ + struct net_device *lower_dev; + struct list_head *iter; + + if (mlxsw_sp_port_dev_check(dev)) + return netdev_priv(dev); + + netdev_for_each_all_lower_dev(dev, lower_dev, iter) { + if (mlxsw_sp_port_dev_check(lower_dev)) + return netdev_priv(lower_dev); + } + return NULL; +} + +static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev); + return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL; +} + +static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev) +{ + struct net_device *lower_dev; + struct list_head *iter; + + if (mlxsw_sp_port_dev_check(dev)) + return netdev_priv(dev); + + netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) { + if (mlxsw_sp_port_dev_check(lower_dev)) + return netdev_priv(lower_dev); + } + return NULL; +} + +struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + + rcu_read_lock(); + mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev); + if (mlxsw_sp_port) + dev_hold(mlxsw_sp_port->dev); + rcu_read_unlock(); + return mlxsw_sp_port; +} + +void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) +{ + dev_put(mlxsw_sp_port->dev); +} + +static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r, + unsigned long event) +{ + switch (event) { + case NETDEV_UP: + if (!r) + return true; + r->ref_count++; + return false; + case NETDEV_DOWN: + if (r && --r->ref_count == 0) + return true; + /* It is possible we already removed the RIF ourselves + * if it was assigned to a netdev that is now a bridge + * or LAG slave. + */ + return false; + } + + return false; +} + +static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + if (!mlxsw_sp->rifs[i]) + return i; + + return MLXSW_SP_RIF_MAX; +} + +static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport, + bool *p_lagged, u16 *p_system_port) +{ + u8 local_port = mlxsw_sp_vport->local_port; + + *p_lagged = mlxsw_sp_vport->lagged; + *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port; +} + +static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev, u16 rif, + bool create) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + bool lagged = mlxsw_sp_vport->lagged; + char ritr_pl[MLXSW_REG_RITR_LEN]; + u16 system_port; + + mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif, + l3_dev->mtu, l3_dev->dev_addr); + + mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port); + mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port, + mlxsw_sp_vport_vid_get(mlxsw_sp_vport)); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static struct mlxsw_sp_fid * +mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev) +{ + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->leave = mlxsw_sp_vport_rif_sp_leave; + f->ref_count = 0; + f->dev = l3_dev; + f->fid = fid; + + return f; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f) +{ + struct mlxsw_sp_rif *r; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (!r) + return NULL; + + ether_addr_copy(r->addr, l3_dev->dev_addr); + r->mtu = l3_dev->mtu; + r->ref_count = 1; + r->dev = l3_dev; + r->rif = rif; + r->f = f; + + return r; +} + +static struct mlxsw_sp_rif * +mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_fid *f; + struct mlxsw_sp_rif *r; + u16 fid, rif; + int err; + + rif = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif == MLXSW_SP_RIF_MAX) + return ERR_PTR(-ERANGE); + + err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true); + if (err) + return ERR_PTR(err); + + fid = mlxsw_sp_rif_sp_to_fid(rif); + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true); + if (err) + goto err_rif_fdb_op; + + f = mlxsw_sp_rfid_alloc(fid, l3_dev); + if (!f) { + err = -ENOMEM; + goto err_rfid_alloc; + } + + r = mlxsw_sp_rif_alloc(rif, l3_dev, f); + if (!r) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->r = r; + mlxsw_sp->rifs[rif] = r; + + return r; + +err_rif_alloc: + kfree(f); +err_rfid_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); +err_rif_fdb_op: + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); + return ERR_PTR(err); +} + +static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_rif *r) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct net_device *l3_dev = r->dev; + struct mlxsw_sp_fid *f = r->f; + u16 fid = f->fid; + u16 rif = r->rif; + + mlxsw_sp->rifs[rif] = NULL; + f->r = NULL; + + kfree(r); + + kfree(f); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false); + + mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false); +} + +static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *l3_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_rif *r; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); + if (!r) { + r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev); + if (IS_ERR(r)) + return PTR_ERR(r); + } + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f); + r->f->ref_count++; + + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid); + + return 0; +} + +static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r); +} + +static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev, + struct net_device *port_dev, + unsigned long event, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev); + case NETDEV_DOWN: + mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, + unsigned long event) +{ + if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev)) + return 0; + + return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1); +} + +static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, + struct net_device *lag_dev, + unsigned long event, u16 vid) +{ + struct net_device *port_dev; + struct list_head *iter; + int err; + + netdev_for_each_lower_dev(lag_dev, port_dev, iter) { + if (mlxsw_sp_port_dev_check(port_dev)) { + err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev, + event, vid); + if (err) + return err; + } + } + + return 0; +} + +static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, + unsigned long event) +{ + if (netif_is_bridge_port(lag_dev)) + return 0; + + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); +} + +static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev) +{ + u16 fid; + + if (is_vlan_dev(l3_dev)) + fid = vlan_dev_vlan_id(l3_dev); + else if (mlxsw_sp->master_bridge.dev == l3_dev) + fid = 1; + else + return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev); + + return mlxsw_sp_fid_find(mlxsw_sp, fid); +} + +static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID : + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; +} + +static u16 mlxsw_sp_flood_table_index_get(u16 fid) +{ + return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid; +} + +static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid, + bool set) +{ + enum mlxsw_flood_table_type table_type; + char *sftr_pl; + u16 index; + int err; + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + + table_type = mlxsw_sp_flood_table_type_get(fid); + index = mlxsw_sp_flood_table_index_get(fid); + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type, + 1, MLXSW_PORT_ROUTER_PORT, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + + kfree(sftr_pl); + return err; +} + +static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return MLXSW_REG_RITR_FID_IF; + else + return MLXSW_REG_RITR_VLAN_IF; +} + +static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + u16 fid, u16 rif, + bool create) +{ + enum mlxsw_reg_ritr_if_type rif_type; + char ritr_pl[MLXSW_REG_RITR_LEN]; + + rif_type = mlxsw_sp_rif_type_get(fid); + mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu, + l3_dev->dev_addr); + mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + struct mlxsw_sp_fid *f) +{ + struct mlxsw_sp_rif *r; + u16 rif; + int err; + + rif = mlxsw_sp_avail_rif_get(mlxsw_sp); + if (rif == MLXSW_SP_RIF_MAX) + return -ERANGE; + + err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true); + if (err) + return err; + + err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true); + if (err) + goto err_rif_bridge_op; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true); + if (err) + goto err_rif_fdb_op; + + r = mlxsw_sp_rif_alloc(rif, l3_dev, f); + if (!r) { + err = -ENOMEM; + goto err_rif_alloc; + } + + f->r = r; + mlxsw_sp->rifs[rif] = r; + + netdev_dbg(l3_dev, "RIF=%d created\n", rif); + + return 0; + +err_rif_alloc: + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); +err_rif_fdb_op: + mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); +err_rif_bridge_op: + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); + return err; +} + +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r) +{ + struct net_device *l3_dev = r->dev; + struct mlxsw_sp_fid *f = r->f; + u16 rif = r->rif; + + mlxsw_sp->rifs[rif] = NULL; + f->r = NULL; + + kfree(r); + + mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false); + + mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false); + + mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false); + + netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif); +} + +static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, + struct net_device *br_dev, + unsigned long event) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); + struct mlxsw_sp_fid *f; + + /* FID can either be an actual FID if the L3 device is the + * VLAN-aware bridge or a VLAN device on top. Otherwise, the + * L3 device is a VLAN-unaware bridge and we get a vFID. + */ + f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev); + if (WARN_ON(!f)) + return -EINVAL; + + switch (event) { + case NETDEV_UP: + return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f); + case NETDEV_DOWN: + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + break; + } + + return 0; +} + +static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, + unsigned long event) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_lag_master(real_dev)) + return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, + vid); + else if (netif_is_bridge_master(real_dev) && + mlxsw_sp->master_bridge.dev == real_dev) + return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev, + event); + + return 0; +} + +static int mlxsw_sp_inetaddr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *r; + int err = 0; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(r, event)) + goto out; + + if (mlxsw_sp_port_dev_check(dev)) + err = mlxsw_sp_inetaddr_port_event(dev, event); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_inetaddr_lag_event(dev, event); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_inetaddr_vlan_event(dev, event); + +out: + return notifier_from_errno(err); +} + +static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif, + const char *mac, int mtu) +{ + char ritr_pl[MLXSW_REG_RITR_LEN]; + int err; + + mlxsw_reg_ritr_rif_pack(ritr_pl, rif); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); + if (err) + return err; + + mlxsw_reg_ritr_mtu_set(ritr_pl, mtu); + mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac); + mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); +} + +static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) +{ + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *r; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + return 0; + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!r) + return 0; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false); + if (err) + return err; + + err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu); + if (err) + goto err_rif_edit; + + err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true); + if (err) + goto err_rif_fdb_op; + + ether_addr_copy(r->addr, dev->dev_addr); + r->mtu = dev->mtu; + + netdev_dbg(dev, "Updated RIF=%d\n", r->rif); + + return 0; + +err_rif_fdb_op: + mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu); +err_rif_edit: + mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true); + return err; +} + +static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port, + u16 fid) +{ + if (mlxsw_sp_fid_is_vfid(fid)) + return mlxsw_sp_port_vport_find_by_fid(lag_port, fid); + else + return test_bit(fid, lag_port->active_vlans); +} + +static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; + u8 local_port = mlxsw_sp_port->local_port; + u16 lag_id = mlxsw_sp_port->lag_id; + int i, count = 0; - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); - mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); + if (!mlxsw_sp_port->lagged) + return true; - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + struct mlxsw_sp_port *lag_port; + + lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i); + if (!lag_port || lag_port->local_port == local_port) + continue; + if (mlxsw_sp_lag_port_fid_member(lag_port, fid)) + count++; + } + + return !count; } static int @@ -2542,17 +3628,8 @@ mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); -} - -static int -mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char sfdf_pl[MLXSW_REG_SFDF_LEN]; - - mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); - mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n", + mlxsw_sp_port->local_port, fid); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } @@ -2568,71 +3645,64 @@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n", + mlxsw_sp_port->lag_id, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } -static int -__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) +int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } + if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid)) + return 0; - return last_err; + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid); } -static int -__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) +static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp) { - int err, last_err = 0; - u16 vid; - - for (vid = 1; vid < VLAN_N_VID - 1; vid++) { - err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); - if (err) - last_err = err; - } + struct mlxsw_sp_fid *f, *tmp; - return last_err; + list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list) + if (--f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, f); + else + WARN_ON_ONCE(1); } -static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) +static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - if (!list_empty(&mlxsw_sp_port->vports_list)) - if (mlxsw_sp_port->lagged) - return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); - else - return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); - else - if (mlxsw_sp_port->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); - else - return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; } -static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) +static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); - u16 fid = mlxsw_sp_vfid_to_fid(vfid); - - if (mlxsw_sp_vport->lagged) - return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, - fid); - else - return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; } -static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp) { - return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; + if (--mlxsw_sp->master_bridge.ref_count == 0) { + mlxsw_sp->master_bridge.dev = NULL; + /* It's possible upper VLAN devices are still holding + * references to underlying FIDs. Drop the reference + * and release the resources if it was the last one. + * If it wasn't, then something bad happened. + */ + mlxsw_sp_master_bridge_gone_sync(mlxsw_sp); + } } -static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *br_dev) { struct net_device *dev = mlxsw_sp_port->dev; int err; @@ -2646,6 +3716,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) if (err) return err; + mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev); + mlxsw_sp_port->learning = 1; mlxsw_sp_port->learning_sync = 1; mlxsw_sp_port->uc_flood = 1; @@ -2654,16 +3726,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, - bool flush_fdb) +static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; - if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@ -2672,28 +3742,7 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - return mlxsw_sp_port_add_vid(dev, 0, 1); -} - -static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - return !mlxsw_sp->master_bridge.dev || - mlxsw_sp->master_bridge.dev == br_dev; -} - -static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - mlxsw_sp->master_bridge.dev = br_dev; - mlxsw_sp->master_bridge.ref_count++; -} - -static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) -{ - if (--mlxsw_sp->master_bridge.ref_count == 0) - mlxsw_sp->master_bridge.dev = NULL; + mlxsw_sp_port_add_vid(dev, 0, 1); } static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) @@ -2809,6 +3858,45 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, return -EBUSY; } +static void +mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *f; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_vport)) + return; + + /* If vPort is assigned a RIF, then leave it since it's no + * longer valid. + */ + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f) + f->leave(mlxsw_sp_vport); + + mlxsw_sp_vport->lag_id = lag_id; + mlxsw_sp_vport->lagged = 1; +} + +static void +mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_fid *f; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1); + if (WARN_ON(!mlxsw_sp_vport)) + return; + + f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + if (f) + f->leave(mlxsw_sp_vport); + + mlxsw_sp_vport->lagged = 0; +} + static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { @@ -2844,6 +3932,9 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->lag_id = lag_id; mlxsw_sp_port->lagged = 1; lag->ref_count++; + + mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id); + return 0; err_col_port_enable: @@ -2854,65 +3945,35 @@ err_col_port_add: return err; } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb); - -static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *lag_dev) +static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port *mlxsw_sp_vport; - struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; - int err; + struct mlxsw_sp_upper *lag; if (!mlxsw_sp_port->lagged) - return 0; + return; lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); WARN_ON(lag->ref_count == 0); - err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); - if (err) - return err; - err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); - if (err) - return err; - - /* In case we leave a LAG device that has bridges built on top, - * then their teardown sequence is never issued and we need to - * invoke the necessary cleanup routines ourselves. - */ - list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, - vport.list) { - struct net_device *br_dev; - - if (!mlxsw_sp_vport->bridged) - continue; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); - } + mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); if (mlxsw_sp_port->bridged) { mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); - mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); - mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } - if (lag->ref_count == 1) { - if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) - netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); - err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); - if (err) - return err; - } + if (lag->ref_count == 1) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, mlxsw_sp_port->local_port); mlxsw_sp_port->lagged = 0; lag->ref_count--; - return 0; + + mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port); } static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -2961,42 +4022,25 @@ static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid = vlan_dev_vlan_id(vlan_dev); mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); + if (WARN_ON(!mlxsw_sp_vport)) return -EINVAL; - } mlxsw_sp_vport->dev = vlan_dev; return 0; } -static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, - struct net_device *vlan_dev) +static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) { struct mlxsw_sp_port *mlxsw_sp_vport; u16 vid = vlan_dev_vlan_id(vlan_dev); mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return -EINVAL; - } - - /* When removing a VLAN device while still bridged we should first - * remove it from the bridge, as we receive the bridge's notification - * when the vPort is already gone. - */ - if (mlxsw_sp_vport->bridged) { - struct net_device *br_dev; - - br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); - } + if (WARN_ON(!mlxsw_sp_vport)) + return; mlxsw_sp_vport->dev = mlxsw_sp_port->dev; - - return 0; } static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, @@ -3006,7 +4050,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; - int err; + int err = 0; mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -3015,73 +4059,56 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) + if (!is_vlan_dev(upper_dev) && + !netif_is_lag_master(upper_dev) && + !netif_is_bridge_master(upper_dev)) + return -EINVAL; + if (!info->linking) break; /* HW limitation forbids to put ports to multiple bridges. */ if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, info->upper_info)) - return NOTIFY_BAD; + return -EINVAL; + if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) + return -EINVAL; + if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (is_vlan_dev(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to link VLAN device\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to unlink VLAN device\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); } else if (netif_is_bridge_master(upper_dev)) { - if (info->linking) { - err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } - mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, - true); - mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } - } + if (info->linking) + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, + upper_dev); + else + mlxsw_sp_port_bridge_leave(mlxsw_sp_port); } else if (netif_is_lag_master(upper_dev)) { - if (info->linking) { + if (info->linking) err = mlxsw_sp_port_lag_join(mlxsw_sp_port, upper_dev); - if (err) { - netdev_err(dev, "Failed to join link aggregation\n"); - return NOTIFY_BAD; - } - } else { - err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, - upper_dev); - if (err) { - netdev_err(dev, "Failed to leave link aggregation\n"); - return NOTIFY_BAD; - } - } + else + mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + } else { + err = -EINVAL; + WARN_ON(1); } break; } - return NOTIFY_DONE; + return err; } static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, @@ -3105,7 +4132,7 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, break; } - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_port_event(struct net_device *dev, @@ -3119,7 +4146,7 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *dev, return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); } - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, @@ -3132,218 +4159,230 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, netdev_for_each_lower_dev(lag_dev, dev, iter) { if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); - if (ret == NOTIFY_BAD) + if (ret) return ret; } } - return NOTIFY_DONE; + return 0; } -static struct mlxsw_sp_vfid * -mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev) +static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev) { - struct mlxsw_sp_vfid *vfid; + u16 fid = vlan_dev_vlan_id(vlan_dev); + struct mlxsw_sp_fid *f; - list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { - if (vfid->br_dev == br_dev) - return vfid; + f = mlxsw_sp_fid_find(mlxsw_sp, fid); + if (!f) { + f = mlxsw_sp_fid_create(mlxsw_sp, fid); + if (IS_ERR(f)) + return PTR_ERR(f); } - return NULL; + f->ref_count++; + + return 0; } -static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) +static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev) { - return vfid - MLXSW_SP_VFID_PORT_MAX; + u16 fid = vlan_dev_vlan_id(vlan_dev); + struct mlxsw_sp_fid *f; + + f = mlxsw_sp_fid_find(mlxsw_sp, fid); + if (f && f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + if (f && --f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp, f); } -static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) +static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev, + unsigned long event, void *ptr) { - return MLXSW_SP_VFID_PORT_MAX + br_vfid; + struct netdev_notifier_changeupper_info *info; + struct net_device *upper_dev; + struct mlxsw_sp *mlxsw_sp; + int err; + + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + return 0; + if (br_dev != mlxsw_sp->master_bridge.dev) + return 0; + + info = ptr; + + switch (event) { + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!is_vlan_dev(upper_dev)) + break; + if (info->linking) { + err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp, + upper_dev); + if (err) + return err; + } else { + mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev); + } + break; + } + + return 0; } -static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) +static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) { - return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, - MLXSW_SP_VFID_BR_MAX); + return find_first_zero_bit(mlxsw_sp->vfids.mapped, + MLXSW_SP_VFID_MAX); } -static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, - struct net_device *br_dev) +static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport); + +static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) { struct device *dev = mlxsw_sp->bus_info->dev; - struct mlxsw_sp_vfid *vfid; - u16 n_vfid; + struct mlxsw_sp_fid *f; + u16 vfid, fid; int err; - n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); - if (n_vfid == MLXSW_SP_VFID_MAX) { + vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (vfid == MLXSW_SP_VFID_MAX) { dev_err(dev, "No available vFIDs\n"); return ERR_PTR(-ERANGE); } - err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + fid = mlxsw_sp_vfid_to_fid(vfid); + err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true); if (err) { - dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + dev_err(dev, "Failed to create FID=%d\n", fid); return ERR_PTR(err); } - vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); - if (!vfid) + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) goto err_allocate_vfid; - vfid->vfid = n_vfid; - vfid->br_dev = br_dev; + f->leave = mlxsw_sp_vport_vfid_leave; + f->fid = fid; + f->dev = br_dev; - list_add(&vfid->list, &mlxsw_sp->br_vfids.list); - set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + list_add(&f->list, &mlxsw_sp->vfids.list); + set_bit(vfid, mlxsw_sp->vfids.mapped); - return vfid; + return f; err_allocate_vfid: - __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); return ERR_PTR(-ENOMEM); } -static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_vfid *vfid) +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fid *f) { - u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + u16 vfid = mlxsw_sp_fid_to_vfid(f->fid); + u16 fid = f->fid; - clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); - list_del(&vfid->list); + clear_bit(vfid, mlxsw_sp->vfids.mapped); + list_del(&f->list); - __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + if (f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); - kfree(vfid); + kfree(f); + + mlxsw_sp_vfid_op(mlxsw_sp, fid, false); } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev, - bool flush_fdb) +static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool valid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid, *new_vfid; - int err; - - vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - WARN_ON(!vfid); - return -EINVAL; - } - /* We need a vFID to go back to after leaving the bridge's vFID. */ - new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); - if (!new_vfid) { - new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (IS_ERR(new_vfid)) { - netdev_err(dev, "Failed to create vFID for VID=%d\n", - vid); - return PTR_ERR(new_vfid); - } - } - - /* Invalidate existing {Port, VID} to vFID mapping and create a new - * one for the new vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid, + vid); +} - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(new_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - new_vfid->vfid); - goto err_port_vid_to_fid_validate; - } +static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp_fid *f; + int err; - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); - if (err) { - netdev_err(dev, "Failed to disable learning\n"); - goto err_port_vid_learning_set; + f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (!f) { + f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev); + if (IS_ERR(f)) + return PTR_ERR(f); } - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, - false); - if (err) { - netdev_err(dev, "Failed clear to clear flooding\n"); + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true); + if (err) goto err_vport_flood_set; - } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, - MLXSW_REG_SPMS_STATE_FORWARDING); - if (err) { - netdev_err(dev, "Failed to set STP state\n"); - goto err_port_stp_state_set; - } - - if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) - netdev_err(dev, "Failed to flush FDB\n"); + err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true); + if (err) + goto err_vport_fid_map; - /* Switch between the vFIDs and destroy the old one if needed. */ - new_vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = new_vfid; - vfid->nr_vports--; - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f); + f->ref_count++; - mlxsw_sp_vport->learning = 0; - mlxsw_sp_vport->learning_sync = 0; - mlxsw_sp_vport->uc_flood = 0; - mlxsw_sp_vport->bridged = 0; + netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid); return 0; -err_port_stp_state_set: +err_vport_fid_map: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); err_vport_flood_set: -err_port_vid_learning_set: -err_port_vid_to_fid_validate: -err_port_vid_to_fid_invalidate: - /* Rollback vFID only if new. */ - if (!new_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + if (!f->ref_count) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); return err; } +static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid); + + mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false); + + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false); + + mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid); + + mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL); + if (--f->ref_count == 0) + mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f); +} + static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, struct net_device *br_dev) { - struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); struct net_device *dev = mlxsw_sp_vport->dev; - struct mlxsw_sp_vfid *vfid; int err; - vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); - if (!vfid) { - vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); - if (IS_ERR(vfid)) { - netdev_err(dev, "Failed to create bridge vFID\n"); - return PTR_ERR(vfid); - } - } + if (f && !WARN_ON(!f->leave)) + f->leave(mlxsw_sp_vport); - err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev); if (err) { - netdev_err(dev, "Failed to setup flooding for vFID=%d\n", - vfid->vfid); - goto err_port_flood_set; + netdev_err(dev, "Failed to join vFID\n"); + return err; } err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); @@ -3352,38 +4391,6 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, goto err_port_vid_learning_set; } - /* We need to invalidate existing {Port, VID} to vFID mapping and - * create a new one for the bridge's vFID. - */ - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", - old_vfid->vfid); - goto err_port_vid_to_fid_invalidate; - } - - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, - mlxsw_sp_vfid_to_fid(vfid->vfid), - vid); - if (err) { - netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", - vfid->vfid); - goto err_port_vid_to_fid_validate; - } - - /* Switch between the vFIDs and destroy the old one if needed. */ - vfid->nr_vports++; - mlxsw_sp_vport->vport.vfid = vfid; - old_vfid->nr_vports--; - if (!old_vfid->nr_vports) - mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); - mlxsw_sp_vport->learning = 1; mlxsw_sp_vport->learning_sync = 1; mlxsw_sp_vport->uc_flood = 1; @@ -3391,20 +4398,25 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, return 0; -err_port_vid_to_fid_validate: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, - MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); -err_port_vid_to_fid_invalidate: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); err_port_vid_learning_set: - mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); -err_port_flood_set: - if (!vfid->nr_vports) - mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); return err; } +static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + + mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; +} + static bool mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, const struct net_device *br_dev) @@ -3413,7 +4425,9 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport); + + if (dev && dev == br_dev) return false; } @@ -3428,56 +4442,39 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, struct netdev_notifier_changeupper_info *info = ptr; struct mlxsw_sp_port *mlxsw_sp_vport; struct net_device *upper_dev; - int err; + int err = 0; mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master || !info->linking) - break; if (!netif_is_bridge_master(upper_dev)) - return NOTIFY_BAD; + return -EINVAL; + if (!info->linking) + break; /* We can't have multiple VLAN interfaces configured on * the same port and being members in the same bridge. */ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, upper_dev)) - return NOTIFY_BAD; + return -EINVAL; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (!info->master) - break; if (info->linking) { - if (!mlxsw_sp_vport) { - WARN_ON(!mlxsw_sp_vport); - return NOTIFY_BAD; - } + if (WARN_ON(!mlxsw_sp_vport)) + return -EINVAL; err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, upper_dev); - if (err) { - netdev_err(dev, "Failed to join bridge\n"); - return NOTIFY_BAD; - } } else { - /* We ignore bridge's unlinking notifications if vPort - * is gone, since we already left the bridge when the - * VLAN device was unlinked from the real device. - */ if (!mlxsw_sp_vport) - return NOTIFY_DONE; - err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev, true); - if (err) { - netdev_err(dev, "Failed to leave bridge\n"); - return NOTIFY_BAD; - } + return 0; + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport); } } - return NOTIFY_DONE; + return err; } static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, @@ -3492,12 +4489,12 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, if (mlxsw_sp_port_dev_check(dev)) { ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, vid); - if (ret == NOTIFY_BAD) + if (ret) return ret; } } - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, @@ -3513,41 +4510,58 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, vid); - return NOTIFY_DONE; + return 0; } static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + int err = 0; - if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_netdevice_port_event(dev, event, ptr); - - if (netif_is_lag_master(dev)) - return mlxsw_sp_netdevice_lag_event(dev, event, ptr); + if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) + err = mlxsw_sp_netdevice_router_port_event(dev); + else if (mlxsw_sp_port_dev_check(dev)) + err = mlxsw_sp_netdevice_port_event(dev, event, ptr); + else if (netif_is_lag_master(dev)) + err = mlxsw_sp_netdevice_lag_event(dev, event, ptr); + else if (netif_is_bridge_master(dev)) + err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr); + else if (is_vlan_dev(dev)) + err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr); - if (is_vlan_dev(dev)) - return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); - - return NOTIFY_DONE; + return notifier_from_errno(err); } static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, }; +static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { + .notifier_call = mlxsw_sp_inetaddr_event, + .priority = 10, /* Must be called before FIB notifier block */ +}; + +static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { + .notifier_call = mlxsw_sp_router_netevent_event, +}; + static int __init mlxsw_sp_module_init(void) { int err; register_netdevice_notifier(&mlxsw_sp_netdevice_nb); + register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + register_netevent_notifier(&mlxsw_sp_router_netevent_nb); + err = mlxsw_core_driver_register(&mlxsw_sp_driver); if (err) goto err_core_driver_register; return 0; err_core_driver_register: + unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); return err; } @@ -3555,6 +4569,8 @@ err_core_driver_register: static void __exit mlxsw_sp_module_exit(void) { mlxsw_core_driver_unregister(&mlxsw_sp_driver); + unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 13b30eaa1..ac48abebe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -39,19 +39,22 @@ #include <linux/types.h> #include <linux/netdevice.h> +#include <linux/rhashtable.h> #include <linux/bitops.h> #include <linux/if_vlan.h> #include <linux/list.h> #include <linux/dcbnl.h> +#include <linux/in6.h> #include <net/switchdev.h> #include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID -#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ -#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */ -#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) +#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */ + +#define MLXSW_SP_RFID_BASE 15360 +#define MLXSW_SP_RIF_MAX 800 #define MLXSW_SP_LAG_MAX 64 #define MLXSW_SP_PORT_PER_LAG_MAX 16 @@ -60,6 +63,12 @@ #define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 +#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */ +#define MLXSW_SP_LPM_TREE_MAX 22 +#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN) + +#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256 + #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ #define MLXSW_SP_BYTES_PER_CELL 96 @@ -67,6 +76,10 @@ #define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) #define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) +#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ +#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */ +#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */ + /* Maximum delay buffer needed in case of PAUSE frames, in cells. * Assumes 100m cable and maximum MTU. */ @@ -87,12 +100,22 @@ struct mlxsw_sp_upper { unsigned int ref_count; }; -struct mlxsw_sp_vfid { +struct mlxsw_sp_fid { + void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport); struct list_head list; - u16 nr_vports; - u16 vfid; /* Starting at 0 */ - struct net_device *br_dev; - u16 vid; + unsigned int ref_count; + struct net_device *dev; + struct mlxsw_sp_rif *r; + u16 fid; +}; + +struct mlxsw_sp_rif { + struct net_device *dev; + unsigned int ref_count; + struct mlxsw_sp_fid *f; + unsigned char addr[ETH_ALEN]; + int mtu; + u16 rif; }; struct mlxsw_sp_mid { @@ -115,7 +138,17 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid) static inline bool mlxsw_sp_fid_is_vfid(u16 fid) { - return fid >= MLXSW_SP_VFID_BASE; + return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE; +} + +static inline bool mlxsw_sp_fid_is_rfid(u16 fid) +{ + return fid >= MLXSW_SP_RFID_BASE; +} + +static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif) +{ + return MLXSW_SP_RFID_BASE + rif; } struct mlxsw_sp_sb_pr { @@ -152,20 +185,97 @@ struct mlxsw_sp_sb { } ports[MLXSW_PORT_MAX_PORTS]; }; -struct mlxsw_sp { +#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) + +struct mlxsw_sp_prefix_usage { + DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); +}; + +enum mlxsw_sp_l3proto { + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_L3_PROTO_IPV6, +}; + +struct mlxsw_sp_lpm_tree { + u8 id; /* tree ID */ + unsigned int ref_count; + enum mlxsw_sp_l3proto proto; + struct mlxsw_sp_prefix_usage prefix_usage; +}; + +struct mlxsw_sp_fib; + +struct mlxsw_sp_vr { + u16 id; /* virtual router ID */ + bool used; + enum mlxsw_sp_l3proto proto; + u32 tb_id; /* kernel fib table id */ + struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_fib *fib; +}; + +enum mlxsw_sp_span_type { + MLXSW_SP_SPAN_EGRESS, + MLXSW_SP_SPAN_INGRESS +}; + +struct mlxsw_sp_span_inspected_port { + struct list_head list; + enum mlxsw_sp_span_type type; + u8 local_port; +}; + +struct mlxsw_sp_span_entry { + u8 local_port; + bool used; + struct list_head bound_ports_list; + int ref_count; + int id; +}; + +enum mlxsw_sp_port_mall_action_type { + MLXSW_SP_PORT_MALL_MIRROR, +}; + +struct mlxsw_sp_port_mall_mirror_tc_entry { + u8 to_local_port; + bool ingress; +}; + +struct mlxsw_sp_port_mall_tc_entry { + struct list_head list; + unsigned long cookie; + enum mlxsw_sp_port_mall_action_type type; + union { + struct mlxsw_sp_port_mall_mirror_tc_entry mirror; + }; +}; + +struct mlxsw_sp_router { + struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT]; + struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX]; + struct rhashtable neigh_ht; struct { - struct list_head list; - unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)]; - } port_vfids; + struct delayed_work dw; + unsigned long interval; /* ms */ + } neighs_update; + struct delayed_work nexthop_probe_dw; +#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */ + struct list_head nexthop_group_list; + struct list_head nexthop_neighs_list; +}; + +struct mlxsw_sp { struct { struct list_head list; - unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)]; - } br_vfids; + DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX); + } vfids; struct { struct list_head list; - unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)]; + DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX); } br_mids; - unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; + struct list_head fids; /* VLAN-aware bridge FIDs */ + struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX]; struct mlxsw_sp_port **ports; struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; @@ -183,6 +293,15 @@ struct mlxsw_sp { struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; u8 port_to_module[MLXSW_PORT_MAX_PORTS]; struct mlxsw_sp_sb sb; + struct mlxsw_sp_router router; + struct { + DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); + } kvdl; + + struct { + struct mlxsw_sp_span_entry *entries; + int entries_count; + } span; }; static inline struct mlxsw_sp_upper * @@ -217,7 +336,7 @@ struct mlxsw_sp_port { u16 lag_id; struct { struct list_head list; - struct mlxsw_sp_vfid *vfid; + struct mlxsw_sp_fid *f; u16 vid; } vport; struct { @@ -239,8 +358,13 @@ struct mlxsw_sp_port { unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; + /* TC handles */ + struct list_head mall_tc_list; }; +struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); +void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); + static inline bool mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) { @@ -259,28 +383,38 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL; } +static inline u16 +mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +{ + return mlxsw_sp_vport->vport.vid; +} + static inline bool mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port) { - return mlxsw_sp_port->vport.vfid; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); + + return vid != 0; } -static inline struct net_device * -mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport, + struct mlxsw_sp_fid *f) { - return mlxsw_sp_vport->vport.vfid->br_dev; + mlxsw_sp_vport->vport.f = f; } -static inline u16 -mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +static inline struct mlxsw_sp_fid * +mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) { - return mlxsw_sp_vport->vport.vid; + return mlxsw_sp_vport->vport.f; } -static inline u16 -mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport) +static inline struct net_device * +mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport) { - return mlxsw_sp_vport->vport.vfid->vfid; + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + return f ? f->dev : NULL; } static inline struct mlxsw_sp_port * @@ -298,20 +432,60 @@ mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } static inline struct mlxsw_sp_port * -mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port, - u16 vfid) +mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) { struct mlxsw_sp_port *mlxsw_sp_vport; list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, vport.list) { - if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid) + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport); + + if (f && f->fid == fid) return mlxsw_sp_vport; } return NULL; } +static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp, + u16 fid) +{ + struct mlxsw_sp_fid *f; + + list_for_each_entry(f, &mlxsw_sp->fids, list) + if (f->fid == fid) + return f; + + return NULL; +} + +static inline struct mlxsw_sp_fid * +mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + struct mlxsw_sp_fid *f; + + list_for_each_entry(f, &mlxsw_sp->vfids.list, list) + if (f->dev == br_dev) + return f; + + return NULL; +} + +static inline struct mlxsw_sp_rif * +mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) +{ + int i; + + for (i = 0; i < MLXSW_SP_RIF_MAX; i++) + if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev) + return mlxsw_sp->rifs[i]; + + return NULL; +} + enum mlxsw_sp_flood_table { MLXSW_SP_FLOOD_TABLE_UC, MLXSW_SP_FLOOD_TABLE_BM, @@ -362,14 +536,17 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged); -int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, - u16 vid); -int mlxsw_sp_port_kill_vid(struct net_device *dev, - __be16 __always_unused proto, u16 vid); -int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, - bool set, bool only_uc); +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool set); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid); +int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, + bool adding); +struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid); +void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f); +void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *r); int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, bool dwrr, u8 dwrr_weight); @@ -399,4 +576,21 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) #endif +int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans); +int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4); +int mlxsw_sp_router_neigh_construct(struct net_device *dev, + struct neighbour *n); +void mlxsw_sp_router_neigh_destroy(struct net_device *dev, + struct neighbour *n); +int mlxsw_sp_router_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr); + +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count); +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 074cdda7b..953b214f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, - MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0), MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, MLXSW_SP_CPU_PORT_SB_CM, @@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, u8 local_port = mlxsw_sp_port->local_port; u8 pg_buff = tc_index; enum mlxsw_reg_sbxx_dir dir = pool_type; - u8 pool = pool_index; + u8 pool = pool_get(pool_index); u32 max_buff; int err; + if (dir != dir_get(pool_index)) + return -EINVAL; + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, threshold, &max_buff); if (err) return err; - if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) { - if (pool < MLXSW_SP_SB_POOL_COUNT) - return -EINVAL; - pool -= MLXSW_SP_SB_POOL_COUNT; - } else if (pool >= MLXSW_SP_SB_POOL_COUNT) { - return -EINVAL; - } return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, 0, max_buff, pool); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c index 01cfb7512..b6ed7f7c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, char pfcc_pl[MLXSW_REG_PFCC_LEN]; mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause); mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), @@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); int err; - if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) && - pfc->pfc_en) { + if (pause_en && pfc->pfc_en) { netdev_err(dev, "PAUSE frames already enabled on port\n"); return -EINVAL; } err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, mlxsw_sp_port->dcb.ets->prio_tc, - false, pfc); + pause_en, pfc); if (err) { netdev_err(dev, "Failed to configure port's headroom for PFC\n"); return err; @@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, err_port_pfc_set: __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, - mlxsw_sp_port->dcb.ets->prio_tc, false, + mlxsw_sp_port->dcb.ets->prio_tc, pause_en, mlxsw_sp_port->dcb.pfc); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c new file mode 100644 index 000000000..ac321e8e5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -0,0 +1,91 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/bitops.h> + +#include "spectrum.h" + +#define MLXSW_SP_KVDL_SINGLE_BASE 0 +#define MLXSW_SP_KVDL_SINGLE_SIZE 16384 +#define MLXSW_SP_KVDL_CHUNKS_BASE \ + (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) +#define MLXSW_SP_KVDL_CHUNKS_SIZE \ + (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE) +#define MLXSW_SP_CHUNK_MAX 32 + +int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count) +{ + int entry_index; + int size; + int type_base; + int type_size; + int type_entries; + + if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) { + return -EINVAL; + } else if (entry_count == 1) { + type_base = MLXSW_SP_KVDL_SINGLE_BASE; + type_size = MLXSW_SP_KVDL_SINGLE_SIZE; + type_entries = 1; + } else { + type_base = MLXSW_SP_KVDL_CHUNKS_BASE; + type_size = MLXSW_SP_KVDL_CHUNKS_SIZE; + type_entries = MLXSW_SP_CHUNK_MAX; + } + + entry_index = type_base; + size = type_base + type_size; + for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) { + int i; + + for (i = 0; i < type_entries; i++) + set_bit(entry_index + i, mlxsw_sp->kvdl.usage); + return entry_index; + } + return -ENOBUFS; +} + +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) +{ + int type_entries; + int i; + + if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE) + type_entries = 1; + else + type_entries = MLXSW_SP_CHUNK_MAX; + for (i = 0; i < type_entries; i++) + clear_bit(entry_index + i, mlxsw_sp->kvdl.usage); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c new file mode 100644 index 000000000..3f5c51da6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -0,0 +1,1863 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> + * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com> + * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/rhashtable.h> +#include <linux/bitops.h> +#include <linux/in6.h> +#include <linux/notifier.h> +#include <net/netevent.h> +#include <net/neighbour.h> +#include <net/arp.h> + +#include "spectrum.h" +#include "core.h" +#include "reg.h" + +#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ + for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) + +static bool +mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1, + struct mlxsw_sp_prefix_usage *prefix_usage2) +{ + unsigned char prefix; + + mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) { + if (!test_bit(prefix, prefix_usage2->b)) + return false; + } + return true; +} + +static bool +mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, + struct mlxsw_sp_prefix_usage *prefix_usage2) +{ + return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); +} + +static bool +mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage) +{ + struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } }; + + return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none); +} + +static void +mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1, + struct mlxsw_sp_prefix_usage *prefix_usage2) +{ + memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1)); +} + +static void +mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage) +{ + memset(prefix_usage, 0, sizeof(*prefix_usage)); +} + +static void +mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage, + unsigned char prefix_len) +{ + set_bit(prefix_len, prefix_usage->b); +} + +static void +mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage, + unsigned char prefix_len) +{ + clear_bit(prefix_len, prefix_usage->b); +} + +struct mlxsw_sp_fib_key { + struct net_device *dev; + unsigned char addr[sizeof(struct in6_addr)]; + unsigned char prefix_len; +}; + +enum mlxsw_sp_fib_entry_type { + MLXSW_SP_FIB_ENTRY_TYPE_REMOTE, + MLXSW_SP_FIB_ENTRY_TYPE_LOCAL, + MLXSW_SP_FIB_ENTRY_TYPE_TRAP, +}; + +struct mlxsw_sp_nexthop_group; + +struct mlxsw_sp_fib_entry { + struct rhash_head ht_node; + struct mlxsw_sp_fib_key key; + enum mlxsw_sp_fib_entry_type type; + unsigned int ref_count; + u16 rif; /* used for action local */ + struct mlxsw_sp_vr *vr; + struct list_head nexthop_group_node; + struct mlxsw_sp_nexthop_group *nh_group; +}; + +struct mlxsw_sp_fib { + struct rhashtable ht; + unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT]; + struct mlxsw_sp_prefix_usage prefix_usage; +}; + +static const struct rhashtable_params mlxsw_sp_fib_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_fib_entry, key), + .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node), + .key_len = sizeof(struct mlxsw_sp_fib_key), + .automatic_shrinking = true, +}; + +static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_entry *fib_entry) +{ + unsigned char prefix_len = fib_entry->key.prefix_len; + int err; + + err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node, + mlxsw_sp_fib_ht_params); + if (err) + return err; + if (fib->prefix_ref_count[prefix_len]++ == 0) + mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len); + return 0; +} + +static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_entry *fib_entry) +{ + unsigned char prefix_len = fib_entry->key.prefix_len; + + if (--fib->prefix_ref_count[prefix_len] == 0) + mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len); + rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node, + mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len, + struct net_device *dev) +{ + struct mlxsw_sp_fib_entry *fib_entry; + + fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); + if (!fib_entry) + return NULL; + fib_entry->key.dev = dev; + memcpy(fib_entry->key.addr, addr, addr_len); + fib_entry->key.prefix_len = prefix_len; + return fib_entry; +} + +static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry) +{ + kfree(fib_entry); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len, + struct net_device *dev) +{ + struct mlxsw_sp_fib_key key; + + memset(&key, 0, sizeof(key)); + key.dev = dev; + memcpy(key.addr, addr, addr_len); + key.prefix_len = prefix_len; + return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params); +} + +static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void) +{ + struct mlxsw_sp_fib *fib; + int err; + + fib = kzalloc(sizeof(*fib), GFP_KERNEL); + if (!fib) + return ERR_PTR(-ENOMEM); + err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params); + if (err) + goto err_rhashtable_init; + return fib; + +err_rhashtable_init: + kfree(fib); + return ERR_PTR(err); +} + +static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib) +{ + rhashtable_destroy(&fib->ht); + kfree(fib); +} + +static struct mlxsw_sp_lpm_tree * +mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved) +{ + static struct mlxsw_sp_lpm_tree *lpm_tree; + int i; + + for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { + lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + if (lpm_tree->ref_count == 0) { + if (one_reserved) + one_reserved = false; + else + return lpm_tree; + } + } + return NULL; +} + +static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + char ralta_pl[MLXSW_REG_RALTA_LEN]; + + mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); +} + +static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + char ralta_pl[MLXSW_REG_RALTA_LEN]; + + mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); +} + +static int +mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_prefix_usage *prefix_usage, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + char ralst_pl[MLXSW_REG_RALST_LEN]; + u8 root_bin = 0; + u8 prefix; + u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD; + + mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) + root_bin = prefix; + + mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id); + mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) { + if (prefix == 0) + continue; + mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix, + MLXSW_REG_RALST_BIN_NO_CHILD); + last_prefix = prefix; + } + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); +} + +static struct mlxsw_sp_lpm_tree * +mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_prefix_usage *prefix_usage, + enum mlxsw_sp_l3proto proto, bool one_reserved) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + int err; + + lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved); + if (!lpm_tree) + return ERR_PTR(-EBUSY); + lpm_tree->proto = proto; + err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree); + if (err) + return ERR_PTR(err); + + err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage, + lpm_tree); + if (err) + goto err_left_struct_set; + return lpm_tree; + +err_left_struct_set: + mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); + return ERR_PTR(err); +} + +static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); +} + +static struct mlxsw_sp_lpm_tree * +mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_prefix_usage *prefix_usage, + enum mlxsw_sp_l3proto proto, bool one_reserved) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + int i; + + for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { + lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + if (lpm_tree->proto == proto && + mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, + prefix_usage)) + goto inc_ref_count; + } + lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, + proto, one_reserved); + if (IS_ERR(lpm_tree)) + return lpm_tree; + +inc_ref_count: + lpm_tree->ref_count++; + return lpm_tree; +} + +static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) +{ + if (--lpm_tree->ref_count == 0) + return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); + return 0; +} + +static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + int i; + + for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) { + lpm_tree = &mlxsw_sp->router.lpm_trees[i]; + lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN; + } +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_vr *vr; + int i; + + for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) { + vr = &mlxsw_sp->router.vrs[i]; + if (!vr->used) + return vr; + } + return NULL; +} + +static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) +{ + char raltb_pl[MLXSW_REG_RALTB_LEN]; + + mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); +} + +static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) +{ + char raltb_pl[MLXSW_REG_RALTB_LEN]; + + /* Bind to tree 0 which is default */ + mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); +} + +static u32 mlxsw_sp_fix_tb_id(u32 tb_id) +{ + /* For our purpose, squash main and local table into one */ + if (tb_id == RT_TABLE_LOCAL) + tb_id = RT_TABLE_MAIN; + return tb_id; +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp, + u32 tb_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_vr *vr; + int i; + + tb_id = mlxsw_sp_fix_tb_id(tb_id); + for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) { + vr = &mlxsw_sp->router.vrs[i]; + if (vr->used && vr->proto == proto && vr->tb_id == tb_id) + return vr; + } + return NULL; +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, + unsigned char prefix_len, + u32 tb_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage; + struct mlxsw_sp_lpm_tree *lpm_tree; + struct mlxsw_sp_vr *vr; + int err; + + vr = mlxsw_sp_vr_find_unused(mlxsw_sp); + if (!vr) + return ERR_PTR(-EBUSY); + vr->fib = mlxsw_sp_fib_create(); + if (IS_ERR(vr->fib)) + return ERR_CAST(vr->fib); + + vr->proto = proto; + vr->tb_id = tb_id; + mlxsw_sp_prefix_usage_zero(&req_prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + proto, true); + if (IS_ERR(lpm_tree)) { + err = PTR_ERR(lpm_tree); + goto err_tree_get; + } + vr->lpm_tree = lpm_tree; + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); + if (err) + goto err_tree_bind; + + vr->used = true; + return vr; + +err_tree_bind: + mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); +err_tree_get: + mlxsw_sp_fib_destroy(vr->fib); + + return ERR_PTR(err); +} + +static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr) +{ + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + mlxsw_sp_fib_destroy(vr->fib); + vr->used = false; +} + +static int +mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr, + struct mlxsw_sp_prefix_usage *req_prefix_usage) +{ + struct mlxsw_sp_lpm_tree *lpm_tree; + + if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, + &vr->lpm_tree->prefix_usage)) + return 0; + + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, + vr->proto, false); + if (IS_ERR(lpm_tree)) { + /* We failed to get a tree according to the required + * prefix usage. However, the current tree might be still good + * for us if our requirement is subset of the prefixes used + * in the tree. + */ + if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, + &vr->lpm_tree->prefix_usage)) + return 0; + return PTR_ERR(lpm_tree); + } + + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr); + mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree); + vr->lpm_tree = lpm_tree; + return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr); +} + +static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, + unsigned char prefix_len, + u32 tb_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_vr *vr; + int err; + + tb_id = mlxsw_sp_fix_tb_id(tb_id); + vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto); + if (!vr) { + vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto); + if (IS_ERR(vr)) + return vr; + } else { + struct mlxsw_sp_prefix_usage req_prefix_usage; + + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, + &vr->fib->prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len); + /* Need to replace LPM tree in case new prefix is required. */ + err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, + &req_prefix_usage); + if (err) + return ERR_PTR(err); + } + return vr; +} + +static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr) +{ + /* Destroy virtual router entity in case the associated FIB is empty + * and allow it to be used for other tables in future. Otherwise, + * check if some prefix usage did not disappear and change tree if + * that is the case. Note that in case new, smaller tree cannot be + * allocated, the original one will be kept being used. + */ + if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage)) + mlxsw_sp_vr_destroy(mlxsw_sp, vr); + else + mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr, + &vr->fib->prefix_usage); +} + +static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_vr *vr; + int i; + + for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) { + vr = &mlxsw_sp->router.vrs[i]; + vr->id = i; + } +} + +struct mlxsw_sp_neigh_key { + unsigned char addr[sizeof(struct in6_addr)]; + struct net_device *dev; +}; + +struct mlxsw_sp_neigh_entry { + struct rhash_head ht_node; + struct mlxsw_sp_neigh_key key; + u16 rif; + struct neighbour *n; + bool offloaded; + struct delayed_work dw; + struct mlxsw_sp_port *mlxsw_sp_port; + unsigned char ha[ETH_ALEN]; + struct list_head nexthop_list; /* list of nexthops using + * this neigh entry + */ + struct list_head nexthop_neighs_list_node; +}; + +static const struct rhashtable_params mlxsw_sp_neigh_ht_params = { + .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key), + .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node), + .key_len = sizeof(struct mlxsw_sp_neigh_key), +}; + +static int +mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); +} + +static void +mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry) +{ + rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht, + &neigh_entry->ht_node, + mlxsw_sp_neigh_ht_params); +} + +static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); + +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, + struct net_device *dev, u16 rif, + struct neighbour *n) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + + neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); + if (!neigh_entry) + return NULL; + memcpy(neigh_entry->key.addr, addr, addr_len); + neigh_entry->key.dev = dev; + neigh_entry->rif = rif; + neigh_entry->n = n; + INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); + INIT_LIST_HEAD(&neigh_entry->nexthop_list); + return neigh_entry; +} + +static void +mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) +{ + kfree(neigh_entry); +} + +static struct mlxsw_sp_neigh_entry * +mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, + size_t addr_len, struct net_device *dev) +{ + struct mlxsw_sp_neigh_key key = {{ 0 } }; + + memcpy(key.addr, addr, addr_len); + key.dev = dev; + return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, + &key, mlxsw_sp_neigh_ht_params); +} + +int mlxsw_sp_router_neigh_construct(struct net_device *dev, + struct neighbour *n) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_rif *r; + u32 dip; + int err; + + if (n->tbl != &arp_tbl) + return 0; + + dip = ntohl(*((__be32 *) n->primary_key)); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), + n->dev); + if (neigh_entry) { + WARN_ON(neigh_entry->n != n); + return 0; + } + + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); + if (WARN_ON(!r)) + return -EINVAL; + + neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, + r->rif, n); + if (!neigh_entry) + return -ENOMEM; + err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); + if (err) + goto err_neigh_entry_insert; + return 0; + +err_neigh_entry_insert: + mlxsw_sp_neigh_entry_destroy(neigh_entry); + return err; +} + +void mlxsw_sp_router_neigh_destroy(struct net_device *dev, + struct neighbour *n) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_neigh_entry *neigh_entry; + u32 dip; + + if (n->tbl != &arp_tbl) + return; + + dip = ntohl(*((__be32 *) n->primary_key)); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), + n->dev); + if (!neigh_entry) + return; + mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); + mlxsw_sp_neigh_entry_destroy(neigh_entry); +} + +static void +mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) +{ + unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + + mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval); +} + +static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int ent_index) +{ + struct net_device *dev; + struct neighbour *n; + __be32 dipn; + u32 dip; + u16 rif; + + mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip); + + if (!mlxsw_sp->rifs[rif]) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); + return; + } + + dipn = htonl(dip); + dev = mlxsw_sp->rifs[rif]->dev; + n = neigh_lookup(&arp_tbl, &dipn, dev); + if (!n) { + netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n", + &dip); + return; + } + + netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip); + neigh_event_send(n, NULL); + neigh_release(n); +} + +static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + u8 num_entries; + int i; + + num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, + rec_index); + /* Hardware starts counting at 0, so add 1. */ + num_entries++; + + /* Each record consists of several neighbour entries. */ + for (i = 0; i < num_entries; i++) { + int ent_index; + + ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i; + mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl, + ent_index); + } + +} + +static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, int rec_index) +{ + switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) { + case MLXSW_REG_RAUHTD_TYPE_IPV4: + mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl, + rec_index); + break; + case MLXSW_REG_RAUHTD_TYPE_IPV6: + WARN_ON_ONCE(1); + break; + } +} + +static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +{ + char *rauhtd_pl; + u8 num_rec; + int i, err; + + rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); + if (!rauhtd_pl) + return -ENOMEM; + + /* Make sure the neighbour's netdev isn't removed in the + * process. + */ + rtnl_lock(); + do { + mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), + rauhtd_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n"); + break; + } + num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, + i); + } while (num_rec); + rtnl_unlock(); + + kfree(rauhtd_pl); + return err; +} + +static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + + /* Take RTNL mutex here to prevent lists from changes */ + rtnl_lock(); + list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, + nexthop_neighs_list_node) { + /* If this neigh have nexthops, make the kernel think this neigh + * is active regardless of the traffic. + */ + if (!list_empty(&neigh_entry->nexthop_list)) + neigh_event_send(neigh_entry->n, NULL); + } + rtnl_unlock(); +} + +static void +mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp) +{ + unsigned long interval = mlxsw_sp->router.neighs_update.interval; + + mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, + msecs_to_jiffies(interval)); +} + +static void mlxsw_sp_router_neighs_update_work(struct work_struct *work) +{ + struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp, + router.neighs_update.dw.work); + int err; + + err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity"); + + mlxsw_sp_router_neighs_update_nh(mlxsw_sp); + + mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp); +} + +static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp, + router.nexthop_probe_dw.work); + + /* Iterate over nexthop neighbours, find those who are unresolved and + * send arp on them. This solves the chicken-egg problem when + * the nexthop wouldn't get offloaded until the neighbor is resolved + * but it wouldn't get resolved ever in case traffic is flowing in HW + * using different nexthop. + * + * Take RTNL mutex here to prevent lists from changes. + */ + rtnl_lock(); + list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, + nexthop_neighs_list_node) { + if (!(neigh_entry->n->nud_state & NUD_VALID) && + !list_empty(&neigh_entry->nexthop_list)) + neigh_event_send(neigh_entry->n, NULL); + } + rtnl_unlock(); + + mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, + MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL); +} + +static void +mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool removing); + +static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) +{ + struct mlxsw_sp_neigh_entry *neigh_entry = + container_of(work, struct mlxsw_sp_neigh_entry, dw.work); + struct neighbour *n = neigh_entry->n; + struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + struct net_device *dev; + bool entry_connected; + u8 nud_state; + bool updating; + bool removing; + bool adding; + u32 dip; + int err; + + read_lock_bh(&n->lock); + dip = ntohl(*((__be32 *) n->primary_key)); + memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha)); + nud_state = n->nud_state; + dev = n->dev; + read_unlock_bh(&n->lock); + + entry_connected = nud_state & NUD_VALID; + adding = (!neigh_entry->offloaded) && entry_connected; + updating = neigh_entry->offloaded && entry_connected; + removing = neigh_entry->offloaded && !entry_connected; + + if (adding || updating) { + mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD, + neigh_entry->rif, + neigh_entry->ha, dip); + err = mlxsw_reg_write(mlxsw_sp->core, + MLXSW_REG(rauht), rauht_pl); + if (err) { + netdev_err(dev, "Could not add neigh %pI4h\n", &dip); + neigh_entry->offloaded = false; + } else { + neigh_entry->offloaded = true; + } + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false); + } else if (removing) { + mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE, + neigh_entry->rif, + neigh_entry->ha, dip); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), + rauht_pl); + if (err) { + netdev_err(dev, "Could not delete neigh %pI4h\n", &dip); + neigh_entry->offloaded = true; + } else { + neigh_entry->offloaded = false; + } + mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true); + } + + neigh_release(n); + mlxsw_sp_port_dev_put(mlxsw_sp_port); +} + +int mlxsw_sp_router_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp *mlxsw_sp; + unsigned long interval; + struct net_device *dev; + struct neigh_parms *p; + struct neighbour *n; + u32 dip; + + switch (event) { + case NETEVENT_DELAY_PROBE_TIME_UPDATE: + p = ptr; + + /* We don't care about changes in the default table. */ + if (!p->dev || p->tbl != &arp_tbl) + return NOTIFY_DONE; + + /* We are in atomic context and can't take RTNL mutex, + * so use RCU variant to walk the device chain. + */ + mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev); + if (!mlxsw_sp_port) + return NOTIFY_DONE; + + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME)); + mlxsw_sp->router.neighs_update.interval = interval; + + mlxsw_sp_port_dev_put(mlxsw_sp_port); + break; + case NETEVENT_NEIGH_UPDATE: + n = ptr; + dev = n->dev; + + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + + mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev); + if (!mlxsw_sp_port) + return NOTIFY_DONE; + + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + dip = ntohl(*((__be32 *) n->primary_key)); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, + &dip, + sizeof(__be32), + dev); + if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { + mlxsw_sp_port_dev_put(mlxsw_sp_port); + return NOTIFY_DONE; + } + neigh_entry->mlxsw_sp_port = mlxsw_sp_port; + + /* Take a reference to ensure the neighbour won't be + * destructed until we drop the reference in delayed + * work. + */ + neigh_clone(n); + if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) { + neigh_release(n); + mlxsw_sp_port_dev_put(mlxsw_sp_port); + } + break; + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = rhashtable_init(&mlxsw_sp->router.neigh_ht, + &mlxsw_sp_neigh_ht_params); + if (err) + return err; + + /* Initialize the polling interval according to the default + * table. + */ + mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp); + + /* Create the delayed works for the activity_update */ + INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw, + mlxsw_sp_router_neighs_update_work); + INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw, + mlxsw_sp_router_probe_unresolved_nexthops); + mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0); + mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0); + return 0; +} + +static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) +{ + cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw); + cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw); + rhashtable_destroy(&mlxsw_sp->router.neigh_ht); +} + +struct mlxsw_sp_nexthop { + struct list_head neigh_list_node; /* member of neigh entry list */ + struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group + * this belongs to + */ + u8 should_offload:1, /* set indicates this neigh is connected and + * should be put to KVD linear area of this group. + */ + offloaded:1, /* set in case the neigh is actually put into + * KVD linear area of this group. + */ + update:1; /* set indicates that MAC of this neigh should be + * updated in HW + */ + struct mlxsw_sp_neigh_entry *neigh_entry; +}; + +struct mlxsw_sp_nexthop_group { + struct list_head list; /* node in mlxsw->router.nexthop_group_list */ + struct list_head fib_list; /* list of fib entries that use this group */ + u8 adj_index_valid:1; + u32 adj_index; + u16 ecmp_size; + u16 count; + struct mlxsw_sp_nexthop nexthops[0]; +}; + +static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vr *vr, + u32 adj_index, u16 ecmp_size, + u32 new_adj_index, + u16 new_ecmp_size) +{ + char raleu_pl[MLXSW_REG_RALEU_LEN]; + + mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id, + adj_index, ecmp_size, + new_adj_index, new_ecmp_size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl); +} + +static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + u32 old_adj_index, u16 old_ecmp_size) +{ + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_vr *vr = NULL; + int err; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + if (vr == fib_entry->vr) + continue; + vr = fib_entry->vr; + err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr, + old_adj_index, + old_ecmp_size, + nh_grp->adj_index, + nh_grp->ecmp_size); + if (err) + return err; + } + return 0; +} + +static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh) +{ + struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + char ratr_pl[MLXSW_REG_RATR_LEN]; + + mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, + true, adj_index, neigh_entry->rif); + mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); +} + +static int +mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + u32 adj_index = nh_grp->adj_index; /* base */ + struct mlxsw_sp_nexthop *nh; + int i; + int err; + + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + + if (!nh->should_offload) { + nh->offloaded = 0; + continue; + } + + if (nh->update) { + err = mlxsw_sp_nexthop_mac_update(mlxsw_sp, + adj_index, nh); + if (err) + return err; + nh->update = 0; + nh->offloaded = 1; + } + adj_index++; + } + return 0; +} + +static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry); + +static int +mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_fib_entry *fib_entry; + int err; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); + if (err) + return err; + } + return 0; +} + +static void +mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + bool offload_change = false; + u32 adj_index; + u16 ecmp_size = 0; + bool old_adj_index_valid; + u32 old_adj_index; + u16 old_ecmp_size; + int ret; + int i; + int err; + + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + + if (nh->should_offload ^ nh->offloaded) { + offload_change = true; + if (nh->should_offload) + nh->update = 1; + } + if (nh->should_offload) + ecmp_size++; + } + if (!offload_change) { + /* Nothing was added or removed, so no need to reallocate. Just + * update MAC on existing adjacency indexes. + */ + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); + goto set_trap; + } + return; + } + if (!ecmp_size) + /* No neigh of this group is connected so we just set + * the trap and let everthing flow through kernel. + */ + goto set_trap; + + ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size); + if (ret < 0) { + /* We ran out of KVD linear space, just set the + * trap and let everything flow through kernel. + */ + dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n"); + goto set_trap; + } + adj_index = ret; + old_adj_index_valid = nh_grp->adj_index_valid; + old_adj_index = nh_grp->adj_index; + old_ecmp_size = nh_grp->ecmp_size; + nh_grp->adj_index_valid = 1; + nh_grp->adj_index = adj_index; + nh_grp->ecmp_size = ecmp_size; + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); + goto set_trap; + } + + if (!old_adj_index_valid) { + /* The trap was set for fib entries, so we have to call + * fib entry update to unset it and use adjacency index. + */ + err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n"); + goto set_trap; + } + return; + } + + err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp, + old_adj_index, old_ecmp_size); + mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index); + if (err) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); + goto set_trap; + } + return; + +set_trap: + old_adj_index_valid = nh_grp->adj_index_valid; + nh_grp->adj_index_valid = 0; + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + nh->offloaded = 0; + } + err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp); + if (err) + dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n"); + if (old_adj_index_valid) + mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index); +} + +static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, + bool removing) +{ + if (!removing && !nh->should_offload) + nh->should_offload = 1; + else if (removing && nh->offloaded) + nh->should_offload = 0; + nh->update = 1; +} + +static void +mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + bool removing) +{ + struct mlxsw_sp_nexthop *nh; + + /* Take RTNL mutex here to prevent lists from changes */ + rtnl_lock(); + list_for_each_entry(nh, &neigh_entry->nexthop_list, + neigh_list_node) { + __mlxsw_sp_nexthop_neigh_update(nh, removing); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } + rtnl_unlock(); +} + +static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) +{ + struct mlxsw_sp_neigh_entry *neigh_entry; + u32 gwip = ntohl(fib_nh->nh_gw); + struct net_device *dev = fib_nh->nh_dev; + struct neighbour *n; + u8 nud_state; + + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, + sizeof(gwip), dev); + if (!neigh_entry) { + __be32 gwipn = htonl(gwip); + + n = neigh_create(&arp_tbl, &gwipn, dev); + if (IS_ERR(n)) + return PTR_ERR(n); + neigh_event_send(n, NULL); + neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, + sizeof(gwip), dev); + if (!neigh_entry) { + neigh_release(n); + return -EINVAL; + } + } else { + /* Take a reference of neigh here ensuring that neigh would + * not be detructed before the nexthop entry is finished. + * The second branch takes the reference in neith_create() + */ + n = neigh_entry->n; + neigh_clone(n); + } + + /* If that is the first nexthop connected to that neigh, add to + * nexthop_neighs_list + */ + if (list_empty(&neigh_entry->nexthop_list)) + list_add_tail(&neigh_entry->nexthop_neighs_list_node, + &mlxsw_sp->router.nexthop_neighs_list); + + nh->nh_grp = nh_grp; + nh->neigh_entry = neigh_entry; + list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list); + read_lock_bh(&n->lock); + nud_state = n->nud_state; + read_unlock_bh(&n->lock); + __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID)); + + return 0; +} + +static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; + + list_del(&nh->neigh_list_node); + + /* If that is the last nexthop connected to that neigh, remove from + * nexthop_neighs_list + */ + if (list_empty(&nh->neigh_entry->nexthop_list)) + list_del(&nh->neigh_entry->nexthop_neighs_list_node); + + neigh_release(neigh_entry->n); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_nexthop *nh; + struct fib_nh *fib_nh; + size_t alloc_size; + int i; + int err; + + alloc_size = sizeof(*nh_grp) + + fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop); + nh_grp = kzalloc(alloc_size, GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->count = fi->fib_nhs; + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + fib_nh = &fi->fib_nh[i]; + err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh); + if (err) + goto err_nexthop_init; + } + list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + return nh_grp; + +err_nexthop_init: + for (i--; i >= 0; i--) + mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + kfree(nh_grp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + int i; + + list_del(&nh_grp->list); + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + } + kfree(nh_grp); +} + +static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, + struct fib_info *fi) +{ + int i; + + for (i = 0; i < fi->fib_nhs; i++) { + struct fib_nh *fib_nh = &fi->fib_nh[i]; + u32 gwip = ntohl(fib_nh->nh_gw); + + if (memcmp(nh->neigh_entry->key.addr, + &gwip, sizeof(u32)) == 0 && + nh->neigh_entry->key.dev == fib_nh->nh_dev) + return true; + } + return false; +} + +static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp, + struct fib_info *fi) +{ + int i; + + if (nh_grp->count != fi->fib_nhs) + return false; + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + if (!mlxsw_sp_nexthop_match(nh, fi)) + return false; + } + return true; +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list, + list) { + if (mlxsw_sp_nexthop_group_match(nh_grp, fi)) + return nh_grp; + } + return NULL; +} + +static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + struct fib_info *fi) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi); + if (!nh_grp) { + nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + } + list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list); + fib_entry->nh_group = nh_grp; + return 0; +} + +static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + + list_del(&fib_entry->nexthop_group_node); + if (!list_empty(&nh_grp->fib_list)) + return; + mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); +} + +static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + + mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); +} + +static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) +{ + char rgcr_pl[MLXSW_REG_RGCR_LEN]; + + mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); +} + +int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list); + INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list); + err = __mlxsw_sp_router_init(mlxsw_sp); + if (err) + return err; + mlxsw_sp_lpm_init(mlxsw_sp); + mlxsw_sp_vrs_init(mlxsw_sp); + err = mlxsw_sp_neigh_init(mlxsw_sp); + if (err) + goto err_neigh_init; + return 0; + +err_neigh_init: + __mlxsw_sp_router_fini(mlxsw_sp); + return err; +} + +void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_neigh_fini(mlxsw_sp); + __mlxsw_sp_router_fini(mlxsw_sp); +} + +static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u32 *p_dip = (u32 *) fib_entry->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->vr; + enum mlxsw_reg_ralue_trap_action trap_action; + u16 trap_id = 0; + u32 adjacency_index = 0; + u16 ecmp_size = 0; + + /* In case the nexthop group adjacency index is valid, use it + * with provided ECMP size. Otherwise, setup trap and pass + * traffic to kernel. + */ + if (fib_entry->nh_group->adj_index_valid) { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP; + adjacency_index = fib_entry->nh_group->adj_index; + ecmp_size = fib_entry->nh_group->ecmp_size; + } else { + trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP; + trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; + } + + mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id, + fib_entry->key.prefix_len, *p_dip); + mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, + adjacency_index, ecmp_size); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u32 *p_dip = (u32 *) fib_entry->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->vr; + + mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id, + fib_entry->key.prefix_len, *p_dip); + mlxsw_reg_ralue_act_local_pack(ralue_pl, + MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0, + fib_entry->rif); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; + u32 *p_dip = (u32 *) fib_entry->key.addr; + struct mlxsw_sp_vr *vr = fib_entry->vr; + + mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id, + fib_entry->key.prefix_len, *p_dip); + mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); +} + +static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + switch (fib_entry->type) { + case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: + return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: + return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op); + case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: + return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op); + } + return -EINVAL; +} + +static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + switch (fib_entry->vr->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); + case MLXSW_SP_L3_PROTO_IPV6: + return -EINVAL; + } + return -EINVAL; +} + +static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, + MLXSW_REG_RALUE_OP_WRITE_WRITE); +} + +static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, + MLXSW_REG_RALUE_OP_WRITE_DELETE); +} + +struct mlxsw_sp_router_fib4_add_info { + struct switchdev_trans_item tritem; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_fib_entry *fib_entry; +}; + +static void mlxsw_sp_router_fib4_add_info_destroy(void const *data) +{ + const struct mlxsw_sp_router_fib4_add_info *info = data; + struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry; + struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp; + struct mlxsw_sp_vr *vr = fib_entry->vr; + + mlxsw_sp_fib_entry_destroy(fib_entry); + mlxsw_sp_vr_put(mlxsw_sp, vr); + kfree(info); +} + +static int +mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp, + const struct switchdev_obj_ipv4_fib *fib4, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct fib_info *fi = fib4->fi; + + if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) { + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + return 0; + } + if (fib4->type != RTN_UNICAST) + return -EINVAL; + + if (fi->fib_scope != RT_SCOPE_UNIVERSE) { + struct mlxsw_sp_rif *r; + + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev); + if (!r) + return -EINVAL; + fib_entry->rif = r->rif; + return 0; + } + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi); +} + +static void +mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE) + return; + mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct mlxsw_sp_fib_entry *fib_entry; + struct fib_info *fi = fib4->fi; + struct mlxsw_sp_vr *vr; + int err; + + vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(vr)) + return ERR_CAST(vr); + + fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, + sizeof(fib4->dst), + fib4->dst_len, fi->fib_dev); + if (fib_entry) { + /* Already exists, just take a reference */ + fib_entry->ref_count++; + return fib_entry; + } + fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst, + sizeof(fib4->dst), + fib4->dst_len, fi->fib_dev); + if (!fib_entry) { + err = -ENOMEM; + goto err_fib_entry_create; + } + fib_entry->vr = vr; + fib_entry->ref_count = 1; + + err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry); + if (err) + goto err_fib4_entry_init; + + return fib_entry; + +err_fib4_entry_init: + mlxsw_sp_fib_entry_destroy(fib_entry); +err_fib_entry_create: + mlxsw_sp_vr_put(mlxsw_sp, vr); + + return ERR_PTR(err); +} + +static struct mlxsw_sp_fib_entry * +mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct mlxsw_sp_vr *vr; + + vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4); + if (!vr) + return NULL; + + return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst, + sizeof(fib4->dst), fib4->dst_len, + fib4->fi->fib_dev); +} + +void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_vr *vr = fib_entry->vr; + + if (--fib_entry->ref_count == 0) { + mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry); + mlxsw_sp_fib_entry_destroy(fib_entry); + } + mlxsw_sp_vr_put(mlxsw_sp, vr); +} + +static int +mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_router_fib4_add_info *info; + struct mlxsw_sp_fib_entry *fib_entry; + int err; + + fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4); + if (IS_ERR(fib_entry)) + return PTR_ERR(fib_entry); + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + err = -ENOMEM; + goto err_alloc_info; + } + info->mlxsw_sp = mlxsw_sp; + info->fib_entry = fib_entry; + switchdev_trans_item_enqueue(trans, info, + mlxsw_sp_router_fib4_add_info_destroy, + &info->tritem); + return 0; + +err_alloc_info: + mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + return err; +} + +static int +mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_router_fib4_add_info *info; + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_vr *vr; + int err; + + info = switchdev_trans_item_dequeue(trans); + fib_entry = info->fib_entry; + kfree(info); + + if (fib_entry->ref_count != 1) + return 0; + + vr = fib_entry->vr; + err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry); + if (err) + goto err_fib_entry_insert; + err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry); + if (err) + goto err_fib_entry_add; + return 0; + +err_fib_entry_add: + mlxsw_sp_fib_entry_remove(vr->fib, fib_entry); +err_fib_entry_insert: + mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + return err; +} + +int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + if (switchdev_trans_ph_prepare(trans)) + return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port, + fib4, trans); + return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port, + fib4, trans); +} + +int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_fib_entry *fib_entry; + + fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4); + if (!fib_entry) { + dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n"); + return -ENOENT; + } + + if (fib_entry->ref_count == 1) { + mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry); + mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry); + } + + mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 3710f19ed..7b654c517 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -55,13 +55,10 @@ static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { + struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); u16 fid = vid; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); - - fid = mlxsw_sp_vfid_to_fid(vfid); - } + fid = f ? f->fid : fid; if (!fid) fid = mlxsw_sp_port->pvid; @@ -169,14 +166,9 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); } -static bool mlxsw_sp_vfid_is_vport_br(u16 vfid) -{ - return vfid >= MLXSW_SP_VFID_PORT_MAX; -} - static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, - u16 idx_begin, u16 idx_end, bool set, - bool only_uc) + u16 idx_begin, u16 idx_end, bool uc_set, + bool bm_set) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; u16 local_port = mlxsw_sp_port->local_port; @@ -185,43 +177,32 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, char *sftr_pl; int err; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { + if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - if (mlxsw_sp_vfid_is_vport_br(idx_begin)) - local_port = mlxsw_sp_port->local_port; - else - local_port = MLXSW_PORT_CPU_PORT; - } else { + else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - } sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, set); + table_type, range, local_port, uc_set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto buffer_out; - /* Flooding control allows one to decide whether a given port will - * flood unicast traffic for which there is no FDB entry. - */ - if (only_uc) - goto buffer_out; - mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, - table_type, range, local_port, set); + table_type, range, local_port, bm_set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); if (err) goto err_flood_bm_set; - else - goto buffer_out; + + goto buffer_out; err_flood_bm_set: mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, - table_type, range, local_port, !set); + table_type, range, local_port, !uc_set); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: kfree(sftr_pl); @@ -236,7 +217,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, int err; if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); + u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid; + u16 vfid = mlxsw_sp_fid_to_vfid(fid); return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid, set, true); @@ -260,14 +242,16 @@ err_port_flood_set: return err; } -int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, - bool set, bool only_uc) +int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid, + bool set) { + u16 vfid; + /* In case of vFIDs, index into the flooding table is relative to * the start of the vFIDs range. */ - return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, - only_uc); + vfid = mlxsw_sp_fid_to_vfid(fid); + return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set); } static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -383,6 +367,192 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } +static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID; + char svfa_pl[MLXSW_REG_SVFA_LEN]; + + mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); +} + +static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid) +{ + struct mlxsw_sp_fid *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return NULL; + + f->fid = fid; + + return f; +} + +struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) +{ + struct mlxsw_sp_fid *f; + int err; + + err = mlxsw_sp_fid_op(mlxsw_sp, fid, true); + if (err) + return ERR_PTR(err); + + /* Although all the ports member in the FID might be using a + * {Port, VID} to FID mapping, we create a global VID-to-FID + * mapping. This allows a port to transition to VLAN mode, + * knowing the global mapping exists. + */ + err = mlxsw_sp_fid_map(mlxsw_sp, fid, true); + if (err) + goto err_fid_map; + + f = mlxsw_sp_fid_alloc(fid); + if (!f) { + err = -ENOMEM; + goto err_allocate_fid; + } + + list_add(&f->list, &mlxsw_sp->fids); + + return f; + +err_allocate_fid: + mlxsw_sp_fid_map(mlxsw_sp, fid, false); +err_fid_map: + mlxsw_sp_fid_op(mlxsw_sp, fid, false); + return ERR_PTR(err); +} + +void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f) +{ + u16 fid = f->fid; + + list_del(&f->list); + + if (f->r) + mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r); + + kfree(f); + + mlxsw_sp_fid_map(mlxsw_sp, fid, false); + + mlxsw_sp_fid_op(mlxsw_sp, fid, false); +} + +static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp_fid *f; + + if (test_bit(fid, mlxsw_sp_port->active_vlans)) + return 0; + + f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); + if (!f) { + f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid); + if (IS_ERR(f)) + return PTR_ERR(f); + } + + f->ref_count++; + + netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid); + + return 0; +} + +static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp_fid *f; + + f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid); + if (WARN_ON(!f)) + return; + + netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid); + + mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid); + + if (--f->ref_count == 0) + mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f); +} + +static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid, + bool valid) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + + /* If port doesn't have vPorts, then it can use the global + * VID-to-FID mapping. + */ + if (list_empty(&mlxsw_sp_port->vports_list)) + return 0; + + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid); +} + +static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_begin, u16 fid_end) +{ + int fid, err; + + for (fid = fid_begin; fid <= fid_end; fid++) { + err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid); + if (err) + goto err_port_fid_join; + } + + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, + mlxsw_sp_port->uc_flood, true); + if (err) + goto err_port_flood_set; + + for (fid = fid_begin; fid <= fid_end; fid++) { + err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true); + if (err) + goto err_port_fid_map; + } + + return 0; + +err_port_fid_map: + for (fid--; fid >= fid_begin; fid--) + mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); + __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, + false); +err_port_flood_set: + fid = fid_end; +err_port_fid_join: + for (fid--; fid >= fid_begin; fid--) + __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); + return err; +} + +static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_begin, u16 fid_end) +{ + int fid; + + for (fid = fid_begin; fid <= fid_end; fid++) + mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false); + + __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false, + false); + + for (fid = fid_begin; fid <= fid_end; fid++) + __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid); +} + static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { @@ -440,74 +610,6 @@ err_port_allow_untagged_set: return err; } -static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - int err; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - - if (err) - return err; - - set_bit(fid, mlxsw_sp->active_fids); - return 0; -} - -static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - clear_bit(fid, mlxsw_sp->active_fids); - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, - fid, fid); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - -static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) -{ - enum mlxsw_reg_svfa_mt mt; - - if (!list_empty(&mlxsw_sp_port->vports_list)) - mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; - else - mt = MLXSW_REG_SVFA_MT_VID_TO_FID; - - return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); -} - -static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) -{ - enum mlxsw_reg_svfa_mt mt; - - if (list_empty(&mlxsw_sp_port->vports_list)) - return 0; - - mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; - return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); -} - -static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, - u16 vid_end) -{ - u16 vid; - int err; - - for (vid = vid_begin; vid <= vid_end; vid++) { - err = mlxsw_sp_port_add_vid(dev, 0, vid); - if (err) - goto err_port_add_vid; - } - return 0; - -err_port_add_vid: - for (vid--; vid >= vid_begin; vid--) - mlxsw_sp_port_kill_vid(dev, 0, vid); - return err; -} - static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged) @@ -533,57 +635,17 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool flag_untagged, bool flag_pvid) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; - u16 vid, last_visited_vid, old_pvid; - enum mlxsw_reg_svfa_mt mt; + u16 vid, old_pvid; int err; - /* In case this is invoked with BRIDGE_FLAGS_SELF and port is - * not bridged, then packets ingressing through the port with - * the specified VIDs will be directed to CPU. - */ if (!mlxsw_sp_port->bridged) - return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); - - for (vid = vid_begin; vid <= vid_end; vid++) { - if (!test_bit(vid, mlxsw_sp->active_fids)) { - err = mlxsw_sp_fid_create(mlxsw_sp, vid); - if (err) { - netdev_err(dev, "Failed to create FID=%d\n", - vid); - return err; - } - - /* When creating a FID, we set a VID to FID mapping - * regardless of the port's mode. - */ - mt = MLXSW_REG_SVFA_MT_VID_TO_FID; - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, - true, vid, vid); - if (err) { - netdev_err(dev, "Failed to create FID=VID=%d mapping\n", - vid); - goto err_port_vid_to_fid_set; - } - } - } - - /* Set FID mapping according to port's mode */ - for (vid = vid_begin; vid <= vid_end; vid++) { - err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); - if (err) { - netdev_err(dev, "Failed to map FID=%d", vid); - last_visited_vid = --vid; - goto err_port_fid_map; - } - } + return -EINVAL; - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, - true, false); + err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end); if (err) { - netdev_err(dev, "Failed to configure flooding\n"); - goto err_port_flood_set; + netdev_err(dev, "Failed to join FIDs\n"); + return err; } err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, @@ -628,10 +690,6 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; -err_port_vid_to_fid_set: - mlxsw_sp_fid_destroy(mlxsw_sp, vid); - return err; - err_port_stp_state_set: for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); @@ -641,13 +699,7 @@ err_port_pvid_set: __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, false); err_port_vlans_set: - __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false, - false); -err_port_flood_set: - last_visited_vid = vid_end; -err_port_fid_map: - for (vid = last_visited_vid; vid >= vid_begin; vid--) - mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); return err; } @@ -678,9 +730,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding) MLXSW_REG_SFD_OP_WRITE_REMOVE; } -static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, - const char *mac, u16 fid, bool adding, - bool dynamic) +static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + enum mlxsw_reg_sfd_rec_action action, + bool dynamic) { char *sfd_pl; int err; @@ -691,14 +744,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), - mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, - local_port); + mac, fid, action, local_port); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; } +static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const char *mac, u16 fid, bool adding, + bool dynamic) +{ + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding, + MLXSW_REG_SFD_REC_ACTION_NOP, dynamic); +} + +int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid, + bool adding) +{ + return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding, + MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER, + false); +} + static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id, const char *mac, u16 fid, u16 lag_vid, bool adding, bool dynamic) @@ -903,6 +971,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = mlxsw_sp_router_fib4_add(mlxsw_sp_port, + SWITCHDEV_OBJ_IPV4_FIB(obj), + trans); + break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj), @@ -921,34 +994,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, return err; } -static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, - u16 vid_end) -{ - u16 vid; - int err; - - for (vid = vid_begin; vid <= vid_end; vid++) { - err = mlxsw_sp_port_kill_vid(dev, 0, vid); - if (err) - return err; - } - - return 0; -} - static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, - u16 vid_begin, u16 vid_end, bool init) + u16 vid_begin, u16 vid_end) { struct net_device *dev = mlxsw_sp_port->dev; u16 vid, pvid; int err; - /* In case this is invoked with BRIDGE_FLAGS_SELF and port is - * not bridged, then prevent packets ingressing through the - * port with the specified VIDs from being trapped to CPU. - */ - if (!init && !mlxsw_sp_port->bridged) - return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); + if (!mlxsw_sp_port->bridged) + return -EINVAL; err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false, false); @@ -958,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return err; } - if (init) - goto out; - pvid = mlxsw_sp_port->pvid; if (pvid >= vid_begin && pvid <= vid_end) { err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0); @@ -970,23 +1021,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, } } - err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, - false, false); - if (err) { - netdev_err(dev, "Failed to clear flooding\n"); - return err; - } + mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end); - for (vid = vid_begin; vid <= vid_end; vid++) { - /* Remove FID mapping in case of Virtual mode */ - err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); - if (err) { - netdev_err(dev, "Failed to unmap FID=%d", vid); - return err; - } - } - -out: /* Changing activity bits only if HW operation succeded */ for (vid = vid_begin; vid <= vid_end; vid++) clear_bit(vid, mlxsw_sp_port->active_vlans); @@ -997,8 +1033,8 @@ out: static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan) { - return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, - vlan->vid_begin, vlan->vid_end, false); + return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin, + vlan->vid_end); } void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) @@ -1006,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) u16 vid; for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) - __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); + __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid); } static int @@ -1081,6 +1117,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev, err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + err = mlxsw_sp_router_fib4_del(mlxsw_sp_port, + SWITCHDEV_OBJ_IPV4_FIB(obj)); + break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, SWITCHDEV_OBJ_PORT_FDB(obj)); @@ -1118,7 +1158,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_port *tmp; - u16 vport_fid = 0; + struct mlxsw_sp_fid *f; + u16 vport_fid; char *sfd_pl; char mac[ETH_ALEN]; u16 fid; @@ -1133,12 +1174,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; - if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { - u16 tmp; - - tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); - vport_fid = mlxsw_sp_vfid_to_fid(tmp); - } + f = mlxsw_sp_vport_fid_get(mlxsw_sp_port); + vport_fid = f ? f->fid : 0; mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); do { @@ -1310,11 +1347,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, } if (mlxsw_sp_fid_is_vfid(fid)) { - u16 vfid = mlxsw_sp_fid_to_vfid(fid); struct mlxsw_sp_port *mlxsw_sp_vport; - mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, - vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, + fid); if (!mlxsw_sp_vport) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); goto just_remove; @@ -1370,11 +1406,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, } if (mlxsw_sp_fid_is_vfid(fid)) { - u16 vfid = mlxsw_sp_fid_to_vfid(fid); struct mlxsw_sp_port *mlxsw_sp_vport; - mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port, - vfid); + mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port, + fid); if (!mlxsw_sp_vport) { netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); goto just_remove; @@ -1495,14 +1530,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); } -static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) -{ - u16 fid; - - for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) - mlxsw_sp_fid_destroy(mlxsw_sp, fid); -} - int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { return mlxsw_sp_fdb_init(mlxsw_sp); @@ -1511,33 +1538,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp_fdb_fini(mlxsw_sp); - mlxsw_sp_fids_fini(mlxsw_sp); -} - -int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) -{ - struct net_device *dev = mlxsw_sp_port->dev; - int err; - - /* Allow only untagged packets to ingress and tag them internally - * with VID 1. - */ - mlxsw_sp_port->pvid = 1; - err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1, - true); - if (err) { - netdev_err(dev, "Unable to init VLANs\n"); - return err; - } - - /* Add implicit VLAN interface in the device, so that untagged - * packets will be classified to the default vFID. - */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to configure default vFID\n"); - - return err; } void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 25f658b38..377daa4d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1541,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = { .type = MLXSW_PORT_SWID_TYPE_ETH, } }, + .resource_query_enable = 0, }; static struct mlxsw_driver mlxsw_sx_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 53a9550be..ed8e30186 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -54,6 +54,15 @@ enum { MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32, MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33, MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34, + MLXSW_TRAP_ID_ARPBC = 0x50, + MLXSW_TRAP_ID_ARPUC = 0x51, + MLXSW_TRAP_ID_MTUERROR = 0x52, + MLXSW_TRAP_ID_TTLERROR = 0x53, + MLXSW_TRAP_ID_LBERROR = 0x54, + MLXSW_TRAP_ID_OSPF = 0x55, + MLXSW_TRAP_ID_IP2ME = 0x5F, + MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, + MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, MLXSW_TRAP_ID_MAX = 0x1FF }; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 2874dffe7..eaa37c079 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7412,7 +7412,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || - (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && + (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) && (dev->features & NETIF_F_RXCSUM)) { l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index e744acc18..690635660 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -63,7 +63,7 @@ #define NFP_NET_POLL_TIMEOUT 5 /* Bar allocation */ -#define NFP_NET_CRTL_BAR 0 +#define NFP_NET_CTRL_BAR 0 #define NFP_NET_Q0_BAR 2 #define NFP_NET_Q1_BAR 4 /* OBSOLETE */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index ba26bb356..39dadfca8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -41,7 +41,6 @@ * Chris Telfer <chris.telfer@netronome.com> */ -#include <linux/version.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_net_set_hash(nn->netdev, skb, rxd); - /* Pad small frames to minimum */ - if (skb_put_padto(skb, 60)) - break; - /* Stats update */ u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_pkts++; @@ -1845,13 +1840,14 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn) } /** - * nfp_net_write_mac_addr() - Write mac address to device registers + * nfp_net_write_mac_addr() - Write mac address to the device control BAR * @nn: NFP Net device to reconfigure - * @mac: Six-byte MAC address to be written * - * We do a bit of byte swapping dance because firmware is LE. + * Writes the MAC address from the netdev to the device control BAR. Does not + * perform the required reconfig. We do a bit of byte swapping dance because + * firmware is LE. */ -static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) +static void nfp_net_write_mac_addr(struct nfp_net *nn) { nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(nn->netdev->dev_addr)); @@ -1952,7 +1948,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); - nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + nfp_net_write_mac_addr(nn); nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); @@ -1979,7 +1975,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - vxlan_get_rx_port(nn->netdev); + udp_tunnel_get_rx_info(nn->netdev); } return err; @@ -2048,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev) nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), GFP_KERNEL); - if (!nn->rx_rings) + if (!nn->rx_rings) { + err = -ENOMEM; goto err_free_lsc; + } nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), GFP_KERNEL); - if (!nn->tx_rings) + if (!nn->tx_rings) { + err = -ENOMEM; goto err_free_rx_rings; + } for (r = 0; r < nn->num_r_vecs; r++) { err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); @@ -2551,27 +2551,33 @@ static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port) } static void nfp_net_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx; - idx = nfp_net_find_vxlan_idx(nn, port); + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); if (idx == -ENOSPC) return; if (!nn->vxlan_usecnt[idx]++) - nfp_net_set_vxlan_port(nn, idx, port); + nfp_net_set_vxlan_port(nn, idx, ti->port); } static void nfp_net_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct nfp_net *nn = netdev_priv(netdev); int idx; - idx = nfp_net_find_vxlan_idx(nn, port); - if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC) + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + idx = nfp_net_find_vxlan_idx(nn, ti->port); + if (idx == -ENOSPC || !nn->vxlan_usecnt[idx]) return; if (!--nn->vxlan_usecnt[idx]) @@ -2589,8 +2595,8 @@ static const struct net_device_ops nfp_net_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_set_features = nfp_net_set_features, .ndo_features_check = nfp_net_features_check, - .ndo_add_vxlan_port = nfp_net_add_vxlan_port, - .ndo_del_vxlan_port = nfp_net_del_vxlan_port, + .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, + .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, }; /** @@ -2733,7 +2739,7 @@ int nfp_net_netdev_init(struct net_device *netdev) nn->cap = nn_readl(nn, NFP_NET_CFG_CAP); nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU); - nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); + nfp_net_write_mac_addr(nn); /* Set default MTU and Freelist buffer size */ if (nn->max_mtu < NFP_NET_DEFAULT_MTU) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index ccfef1f17..4c9897220 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -40,7 +40,6 @@ * Brad Petrus <brad.petrus@netronome.com> */ -#include <linux/version.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> @@ -605,6 +604,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev, static const struct ethtool_ops nfp_net_ethtool_ops = { .get_drvinfo = nfp_net_get_drvinfo, + .get_link = ethtool_op_get_link, .get_ringparam = nfp_net_get_ringparam, .set_ringparam = nfp_net_set_ringparam, .get_strings = nfp_net_get_strings, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index e2b22b8a2..f7062cb64 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -38,7 +38,6 @@ * Rolf Neugebauer <rolf.neugebauer@netronome.com> */ -#include <linux/version.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -124,17 +123,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code * the identical for PF and VF drivers. */ - ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR), + ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR), NFP_NET_CFG_BAR_SZ); if (!ctrl_bar) { dev_err(&pdev->dev, - "Failed to map resource %d\n", NFP_NET_CRTL_BAR); + "Failed to map resource %d\n", NFP_NET_CTRL_BAR); err = -EIO; goto err_pci_regions; } nfp_net_get_fw_version(&fw_ver, ctrl_bar); - if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n", fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); err = -EINVAL; @@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, } /* Determine stride */ - if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) || - nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) || - nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) { + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { stride = 2; tx_bar_no = NFP_NET_Q0_BAR; rx_bar_no = NFP_NET_Q1_BAR; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index b1ce7aaa8..8e13ec84c 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -425,7 +425,6 @@ struct netdata_local { unsigned int last_tx_idx; unsigned int num_used_tx_buffs; struct mii_bus *mii_bus; - struct phy_device *phy_dev; struct clk *clk; dma_addr_t dma_buff_base_p; void *dma_buff_base_v; @@ -476,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac) mac[5] = tmp >> 8; } -static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable) -{ - if (enable) - clk_prepare_enable(pldat->clk); - else - clk_disable_unprepare(pldat->clk); -} - static void __lpc_params_setup(struct netdata_local *pldat) { u32 tmp; @@ -750,7 +741,7 @@ static int lpc_mdio_reset(struct mii_bus *bus) static void lpc_handle_link_change(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; + struct phy_device *phydev = ndev->phydev; unsigned long flags; bool status_change = false; @@ -814,7 +805,6 @@ static int lpc_mii_probe(struct net_device *ndev) pldat->link = 0; pldat->speed = 0; pldat->duplex = -1; - pldat->phy_dev = phydev; phy_attached_info(phydev); @@ -1048,8 +1038,8 @@ static int lpc_eth_close(struct net_device *ndev) napi_disable(&pldat->napi); netif_stop_queue(ndev); - if (pldat->phy_dev) - phy_stop(pldat->phy_dev); + if (ndev->phydev) + phy_stop(ndev->phydev); spin_lock_irqsave(&pldat->lock, flags); __lpc_eth_reset(pldat); @@ -1058,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev) writel(0, LPC_ENET_MAC2(pldat->net_base)); spin_unlock_irqrestore(&pldat->lock, flags); - __lpc_eth_clock_enable(pldat, false); + clk_disable_unprepare(pldat->clk); return 0; } @@ -1185,8 +1175,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev) static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) { - struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -1200,21 +1189,24 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) static int lpc_eth_open(struct net_device *ndev) { struct netdata_local *pldat = netdev_priv(ndev); + int ret; if (netif_msg_ifup(pldat)) dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name); - __lpc_eth_clock_enable(pldat, true); + ret = clk_prepare_enable(pldat->clk); + if (ret) + return ret; /* Suspended PHY makes LPC ethernet core block, so resume now */ - phy_resume(pldat->phy_dev); + phy_resume(ndev->phydev); /* Reset and initialize */ __lpc_eth_reset(pldat); __lpc_eth_init(pldat); /* schedule a link state check */ - phy_start(pldat->phy_dev); + phy_start(ndev->phydev); netif_start_queue(ndev); napi_enable(&pldat->napi); @@ -1247,37 +1239,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level) pldat->msg_enable = level; } -static int lpc_eth_ethtool_getsettings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); -} - -static int lpc_eth_ethtool_setsettings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct netdata_local *pldat = netdev_priv(ndev); - struct phy_device *phydev = pldat->phy_dev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); -} - static const struct ethtool_ops lpc_eth_ethtool_ops = { .get_drvinfo = lpc_eth_ethtool_getdrvinfo, - .get_settings = lpc_eth_ethtool_getsettings, - .set_settings = lpc_eth_ethtool_setsettings, .get_msglevel = lpc_eth_ethtool_getmsglevel, .set_msglevel = lpc_eth_ethtool_setmsglevel, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops lpc_netdev_ops = { @@ -1347,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) } /* Enable network clock */ - __lpc_eth_clock_enable(pldat, true); + ret = clk_prepare_enable(pldat->clk); + if (ret) + goto err_out_clk_put; /* Map IO space */ pldat->net_base = ioremap(res->start, resource_size(res)); @@ -1460,7 +1430,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev) netdev_info(ndev, "LPC mac at 0x%08x irq %d\n", res->start, ndev->irq); - phydev = pldat->phy_dev; + phydev = ndev->phydev; device_init_wakeup(&pdev->dev, 1); device_set_wakeup_enable(&pdev->dev, 0); @@ -1481,6 +1451,7 @@ err_out_iounmap: iounmap(pldat->net_base); err_out_disable_clocks: clk_disable_unprepare(pldat->clk); +err_out_clk_put: clk_put(pldat->clk); err_out_free_dev: free_netdev(ndev); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index af54df52a..2f4a837f0 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -989,7 +989,7 @@ static void pasemi_adjust_link(struct net_device *dev) unsigned int flags; unsigned int new_flags; - if (!mac->phydev->link) { + if (!dev->phydev->link) { /* If no link, MAC speed settings don't matter. Just report * link down and return. */ @@ -1010,10 +1010,10 @@ static void pasemi_adjust_link(struct net_device *dev) new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M | PAS_MAC_CFG_PCFG_TSR_M); - if (!mac->phydev->duplex) + if (!dev->phydev->duplex) new_flags |= PAS_MAC_CFG_PCFG_HD; - switch (mac->phydev->speed) { + switch (dev->phydev->speed) { case 1000: new_flags |= PAS_MAC_CFG_PCFG_SPD_1G | PAS_MAC_CFG_PCFG_TSR_1G; @@ -1027,15 +1027,15 @@ static void pasemi_adjust_link(struct net_device *dev) PAS_MAC_CFG_PCFG_TSR_10M; break; default: - printk("Unsupported speed %d\n", mac->phydev->speed); + printk("Unsupported speed %d\n", dev->phydev->speed); } /* Print on link or speed/duplex change */ - msg = mac->link != mac->phydev->link || flags != new_flags; + msg = mac->link != dev->phydev->link || flags != new_flags; - mac->duplex = mac->phydev->duplex; - mac->speed = mac->phydev->speed; - mac->link = mac->phydev->link; + mac->duplex = dev->phydev->duplex; + mac->speed = dev->phydev->speed; + mac->link = dev->phydev->link; if (new_flags != flags) write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags); @@ -1067,8 +1067,6 @@ static int pasemi_mac_phy_init(struct net_device *dev) return -ENODEV; } - mac->phydev = phydev; - return 0; } @@ -1198,8 +1196,8 @@ static int pasemi_mac_open(struct net_device *dev) goto out_rx_int; } - if (mac->phydev) - phy_start(mac->phydev); + if (dev->phydev) + phy_start(dev->phydev); setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, (unsigned long)mac->tx); @@ -1293,9 +1291,9 @@ static int pasemi_mac_close(struct net_device *dev) rxch = rx_ring(mac)->chan.chno; txch = tx_ring(mac)->chan.chno; - if (mac->phydev) { - phy_stop(mac->phydev); - phy_disconnect(mac->phydev); + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); } del_timer_sync(&mac->tx->clean_timer); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index 161c99a98..7c47e263b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -70,7 +70,6 @@ struct pasemi_mac { struct pci_dev *pdev; struct pci_dev *dma_pdev; struct pci_dev *iob_pdev; - struct phy_device *phydev; struct napi_struct napi; int bufsz; /* RX ring buffer size */ diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c index f046bfc18..d0afc2b8f 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -62,32 +62,6 @@ static struct { { "tx-1024-1518-byte-packets" }, }; -static int -pasemi_mac_ethtool_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct pasemi_mac *mac = netdev_priv(netdev); - struct phy_device *phydev = mac->phydev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_gset(phydev, cmd); -} - -static int -pasemi_mac_ethtool_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct pasemi_mac *mac = netdev_priv(netdev); - struct phy_device *phydev = mac->phydev; - - if (!phydev) - return -EOPNOTSUPP; - - return phy_ethtool_sset(phydev, cmd); -} - static u32 pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) { @@ -145,8 +119,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset, } const struct ethtool_ops pasemi_mac_ethtool_ops = { - .get_settings = pasemi_mac_ethtool_get_settings, - .set_settings = pasemi_mac_ethtool_set_settings, .get_msglevel = pasemi_mac_ethtool_get_msglevel, .set_msglevel = pasemi_mac_ethtool_set_msglevel, .get_link = ethtool_op_get_link, @@ -154,5 +126,7 @@ const struct ethtool_ops pasemi_mac_ethtool_ops = { .get_strings = pasemi_mac_get_strings, .get_sset_count = pasemi_mac_get_sset_count, .get_ethtool_stats = pasemi_mac_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 680d8c736..6ba484068 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -54,16 +54,6 @@ config QLCNIC_DCB mode of DCB is supported. PG and PFC values are related only to Tx. -config QLCNIC_VXLAN - bool "Virtual eXtensible Local Area Network (VXLAN) offload support" - default n - depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m) - ---help--- - This enables hardware offload support for VXLAN protocol over QLogic's - 84XX series adapters. - Say Y here if you want to enable hardware offload support for - Virtual eXtensible Local Area Network (VXLAN) in the driver. - config QLCNIC_HWMON bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support" depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m) @@ -114,24 +104,4 @@ config QEDE ---help--- This enables the support for ... -config QEDE_VXLAN - bool "Virtual eXtensible Local Area Network support" - default n - depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m) - ---help--- - This enables hardware offload support for VXLAN protocol over - qede module. Say Y here if you want to enable hardware offload - support for Virtual eXtensible Local Area Network (VXLAN) - in the driver. - -config QEDE_GENEVE - bool "Generic Network Virtualization Encapsulation (GENEVE) support" - depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m) - ---help--- - This allows one to create GENEVE virtual interfaces that provide - Layer 2 Networks over Layer 3 Networks. GENEVE is often used - to tunnel virtual network infrastructure in virtualized environments. - Say Y here if you want to enable hardware offload support for - Generic Network Virtualization Encapsulation (GENEVE) in the driver. - endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1042f2af8..45ab74676 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -127,6 +127,8 @@ struct qed_tunn_update_params { */ enum qed_pci_personality { QED_PCI_ETH, + QED_PCI_ISCSI, + QED_PCI_ETH_ROCE, QED_PCI_DEFAULT /* default in shmem */ }; @@ -170,6 +172,8 @@ enum QED_PORT_MODE { enum qed_dev_cap { QED_DEV_CAP_ETH, + QED_DEV_CAP_ISCSI, + QED_DEV_CAP_ROCE, }; struct qed_hw_info { @@ -183,6 +187,8 @@ struct qed_hw_info { #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) +#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ + RESC_NUM(_p_hwfn, resc)) #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) u8 num_tc; @@ -255,6 +261,7 @@ struct qed_qm_info { u8 pure_lb_pq; u8 offload_pq; u8 pure_ack_pq; + u8 ooo_pq; u8 vf_queues_offset; u16 num_pqs; u16 num_vf_pqs; @@ -267,6 +274,7 @@ struct qed_qm_info { u8 pf_wfq; u32 pf_rl; struct qed_wfq_data *wfq_data; + u8 num_pf_rls; }; struct storm_stats { @@ -312,6 +320,7 @@ struct qed_hwfn { bool hw_init_done; u8 num_funcs_on_engine; + u8 enabled_func_idx; /* BAR access */ void __iomem *regview; @@ -350,6 +359,9 @@ struct qed_hwfn { /* Protocol related */ struct qed_pf_params pf_params; + bool b_rdma_enabled_in_prs; + u32 rdma_prs_search_reg; + /* Array of sb_info of all status blocks */ struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; u16 num_sbs; @@ -477,8 +489,8 @@ struct qed_dev { u32 int_mode; enum qed_coalescing_mode int_coalescing_mode; - u8 rx_coalesce_usecs; - u8 tx_coalesce_usecs; + u16 rx_coalesce_usecs; + u16 tx_coalesce_usecs; /* Start Bar offset of first hwfn */ void __iomem *regview; @@ -549,12 +561,22 @@ struct qed_dev { static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) { + u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); + u8 vf_valid = GET_FIELD(concrete_fid, + PXP_CONCRETE_FID_VFVALID); + u8 sw_fid; + + if (vf_valid) + sw_fid = vfid + MAX_NUM_PFS; + else + sw_fid = pfid; - return pfid; + return sw_fid; } #define PURE_LB_TC 8 +#define OOO_LB_TC 9 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index ac284c58d..1c35f3761 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -39,6 +39,14 @@ #define DQ_RANGE_SHIFT 4 #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) +/* Searcher constants */ +#define SRC_MIN_NUM_ELEMS 256 + +/* Timers constants */ +#define TM_SHIFT 7 +#define TM_ALIGN BIT(TM_SHIFT) +#define TM_ELEM_SIZE 4 + /* ILT constants */ #define ILT_DEFAULT_HW_P_SIZE 3 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) @@ -56,26 +64,71 @@ union conn_context { struct core_conn_context core_ctx; struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct roce_conn_context roce_ctx; +}; + +/* TYPE-0 task context - iSCSI */ +union type0_task_context { + struct iscsi_task_context iscsi_ctx; }; +/* TYPE-1 task context - ROCE */ +union type1_task_context { + struct rdma_task_context roce_ctx; +}; + +struct src_ent { + u8 opaque[56]; + u64 next; +}; + +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) + #define CONN_CXT_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) +#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) + +#define TYPE0_TASK_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) + +/* Alignment is inherent to the type1_task_context structure */ +#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) + /* PF per protocl configuration object */ +#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) +#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) + +struct qed_tid_seg { + u32 count; + u8 type; + bool has_fl_mem; +}; + struct qed_conn_type_cfg { u32 cid_count; u32 cid_start; u32 cids_per_vf; + struct qed_tid_seg tid_seg[TASK_SEGMENTS]; }; /* ILT Client configuration, Per connection type (protocol) resources. */ #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) #define CDUC_BLK (0) +#define SRQ_BLK (0) +#define CDUT_SEG_BLK(n) (1 + (u8)(n)) +#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS) enum ilt_clients { ILT_CLI_CDUC, + ILT_CLI_CDUT, ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, ILT_CLI_MAX }; @@ -88,6 +141,7 @@ struct qed_ilt_cli_blk { u32 total_size; /* 0 means not active */ u32 real_size_in_page; u32 start_line; + u32 dynamic_line_cnt; }; struct qed_ilt_client_cfg { @@ -131,18 +185,44 @@ struct qed_cxt_mngr { /* computed ILT structure */ struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; + /* Task type sizes */ + u32 task_type_size[NUM_TASK_TYPES]; + /* total number of VFs for this hwfn - * ALL VFs are symmetric in terms of HW resources */ u32 vf_count; + /* total number of SRQ's for this hwfn */ + u32 srq_count; + /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; /* ILT shadow table */ struct qed_dma_mem *ilt_shadow; u32 pf_start_line; + + /* Mutex for a dynamic ILT allocation */ + struct mutex mutex; + + /* SRC T2 */ + struct qed_dma_mem *t2; + u32 t2_num_pages; + u64 first_free; + u64 last_free; }; +static bool src_proto(enum protocol_type type) +{ + return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_ROCE; +} + +static bool tm_cid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_ISCSI || + type == PROTOCOLID_ROCE; +} /* counts the iids for the CDU/CDUC ILT client configuration */ struct qed_cdu_iids { @@ -161,21 +241,120 @@ static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, } } +/* counts the iids for the Searcher block configuration */ +struct qed_src_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr, + struct qed_src_iids *iids) +{ + u32 i; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + if (!src_proto(i)) + continue; + + iids->pf_cids += p_mngr->conn_cfg[i].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; + } +} + +/* counts the iids for the Timers block configuration */ +struct qed_tm_iids { + u32 pf_cids; + u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ + u32 pf_tids_total; + u32 per_vf_cids; + u32 per_vf_tids; +}; + +static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr, + struct qed_tm_iids *iids) +{ + u32 i, j; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; + + if (tm_cid_proto(i)) { + iids->pf_cids += p_cfg->cid_count; + iids->per_vf_cids += p_cfg->cids_per_vf; + } + } + + iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); + iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN); + iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN); + + for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { + iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN); + iids->pf_tids_total += iids->pf_tids[j]; + } +} + static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, struct qed_qm_iids *iids) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - u32 vf_cids = 0, type; + struct qed_tid_seg *segs; + u32 vf_cids = 0, type, j; + u32 vf_tids = 0; for (type = 0; type < MAX_CONN_TYPES; type++) { iids->cids += p_mngr->conn_cfg[type].cid_count; vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + + segs = p_mngr->conn_cfg[type].tid_seg; + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->tids += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->tids += vf_tids * p_mngr->vf_count; + DP_VERBOSE(p_hwfn, QED_MSG_ILT, - "iids: CIDS %08x vf_cids %08x\n", - iids->cids, iids->vf_cids); + "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", + iids->cids, iids->vf_cids, iids->tids, vf_tids); +} + +static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, + u32 seg) +{ + struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; + u32 i; + + /* Find the protocol with tid count > 0 for this segment. + * Note: there can only be one and this is already validated. + */ + for (i = 0; i < MAX_CONN_TYPES; i++) + if (p_cfg->conn_cfg[i].tid_seg[seg].count) + return &p_cfg->conn_cfg[i].tid_seg[seg]; + return NULL; +} + +void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + p_mgr->srq_count = num_srqs; +} + +u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + return p_mgr->srq_count; } /* set the iids count per protocol */ @@ -188,6 +367,14 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); + + if (type == PROTOCOLID_ROCE) { + u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; + u32 cxt_size = CONN_CXT_SIZE(p_hwfn); + u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + + p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page); + } } u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, @@ -200,6 +387,37 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; } +u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, + enum protocol_type type) +{ + return p_hwfn->p_cxt_mngr->acquired[type].start_cid; +} + +u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type) +{ + u32 cnt = 0; + int i; + + for (i = 0; i < TASK_SEGMENTS; i++) + cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; + + return cnt; +} + +static void +qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, u8 seg_type, u32 count, bool has_fl) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + p_seg->count = count; + p_seg->has_fl_mem = has_fl; + p_seg->type = seg_type; +} + static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, struct qed_ilt_cli_blk *p_blk, u32 start_line, u32 total_size, @@ -241,17 +459,42 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, p_blk->real_size_in_page, p_blk->start_line); } +static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client) +{ + u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; + struct qed_ilt_client_cfg *p_cli; + u32 lines_to_skip = 0; + u32 cxts_per_p; + + if (ilt_client == ILT_CLI_CDUC) { + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + + cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / + (u32) CONN_CXT_SIZE(p_hwfn); + + lines_to_skip = cid_count / cxts_per_p; + } + + return lines_to_skip; +} + int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 curr_line, total, i, task_size, line; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; struct qed_cdu_iids cdu_iids; + struct qed_src_iids src_iids; struct qed_qm_iids qm_iids; - u32 curr_line, total, i; + struct qed_tm_iids tm_iids; + struct qed_tid_seg *p_seg; memset(&qm_iids, 0, sizeof(qm_iids)); memset(&cdu_iids, 0, sizeof(cdu_iids)); + memset(&src_iids, 0, sizeof(src_iids)); + memset(&tm_iids, 0, sizeof(tm_iids)); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); @@ -279,6 +522,9 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->pf_total_lines = curr_line - p_blk->start_line; + p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn, + ILT_CLI_CDUC); + /* CDUC VF */ p_blk = &p_cli->vf_blks[CDUC_BLK]; total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); @@ -293,21 +539,128 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + /* CDUT PF */ + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + p_cli->first.val = curr_line; + + /* first the 'working' task memory */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + + /* next the 'init' task memory (forced load memory) */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; + + if (!p_seg->has_fl_mem) { + /* The segment is active (total size pf 'working' + * memory is > 0) but has no FL (forced-load, Init) + * memory. Thus: + * + * 1. The total-size in the corrsponding FL block of + * the ILT client is set to 0 - No ILT line are + * provisioned and no ILT memory allocated. + * + * 2. The start-line of said block is set to the + * start line of the matching working memory + * block in the ILT client. This is later used to + * configure the CDU segment offset registers and + * results in an FL command for TIDs of this + * segement behaves as regular load commands + * (loading TIDs from the working memory). + */ + line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; + + qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + continue; + } + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; + + /* CDUT VF */ + p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); + if (p_seg && p_seg->count) { + /* Stricly speaking we need to iterate over all VF + * task segment types, but a VF has only 1 segment + */ + + /* 'working' memory */ + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + /* 'init' memory */ + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + if (!p_seg->has_fl_mem) { + /* see comment above */ + line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; + qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + } else { + task_size = p_mngr->task_type_size[p_seg->type]; + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, task_size); + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->vf_total_lines = curr_line - + p_cli->vf_blks[0].start_line; + + /* Now for the rest of the VFs */ + for (i = 1; i < p_mngr->vf_count; i++) { + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + } + /* QM */ p_cli = &p_mngr->clients[ILT_CLI_QM]; p_blk = &p_cli->pf_blks[0]; qed_cxt_qm_iids(p_hwfn, &qm_iids); total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, - qm_iids.vf_cids, 0, + qm_iids.vf_cids, qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs); DP_VERBOSE(p_hwfn, QED_MSG_ILT, - "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", + "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", qm_iids.cids, qm_iids.vf_cids, + qm_iids.tids, p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); qed_ilt_cli_blk_fill(p_cli, p_blk, @@ -317,6 +670,75 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); p_cli->pf_total_lines = curr_line - p_blk->start_line; + /* SRC */ + p_cli = &p_mngr->clients[ILT_CLI_SRC]; + qed_cxt_src_iids(p_mngr, &src_iids); + + /* Both the PF and VFs searcher connections are stored in the per PF + * database. Thus sum the PF searcher cids and all the VFs searcher + * cids. + */ + total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (total) { + u32 local_max = max_t(u32, total, + SRC_MIN_NUM_ELEMS); + + total = roundup_pow_of_two(local_max); + + p_blk = &p_cli->pf_blks[0]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * sizeof(struct src_ent), + sizeof(struct src_ent)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_SRC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM PF */ + p_cli = &p_mngr->clients[ILT_CLI_TM]; + qed_cxt_tm_iids(p_mngr, &tm_iids); + total = tm_iids.pf_cids + tm_iids.pf_tids_total; + if (total) { + p_blk = &p_cli->pf_blks[0]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM VF */ + total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; + if (total) { + p_blk = &p_cli->vf_blks[0]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + } + + /* TSDM (SRQ CONTEXT) */ + total = qed_cxt_get_srq_count(p_hwfn); + + if (total) { + p_cli = &p_mngr->clients[ILT_CLI_TSDM]; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TSDM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > RESC_NUM(p_hwfn, QED_ILT)) { DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", @@ -327,8 +749,122 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) return 0; } +static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 i; + + if (!p_mngr->t2) + return; + + for (i = 0; i < p_mngr->t2_num_pages; i++) + if (p_mngr->t2[i].p_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_mngr->t2[i].size, + p_mngr->t2[i].p_virt, + p_mngr->t2[i].p_phys); + + kfree(p_mngr->t2); + p_mngr->t2 = NULL; +} + +static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_num, total_size, ent_per_page, psz, i; + struct qed_ilt_client_cfg *p_src; + struct qed_src_iids src_iids; + struct qed_dma_mem *p_t2; + int rc; + + memset(&src_iids, 0, sizeof(src_iids)); + + /* if the SRC ILT client is inactive - there are no connection + * requiring the searcer, leave. + */ + p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; + if (!p_src->active) + return 0; + + qed_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + total_size = conn_num * sizeof(struct src_ent); + + /* use the same page size as the SRC ILT client */ + psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); + p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz); + + /* allocate t2 */ + p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem), + GFP_KERNEL); + if (!p_mngr->t2) { + DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); + rc = -ENOMEM; + goto t2_fail; + } + + /* allocate t2 pages */ + for (i = 0; i < p_mngr->t2_num_pages; i++) { + u32 size = min_t(u32, total_size, psz); + void **p_virt = &p_mngr->t2[i].p_virt; + + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + size, + &p_mngr->t2[i].p_phys, GFP_KERNEL); + if (!p_mngr->t2[i].p_virt) { + rc = -ENOMEM; + goto t2_fail; + } + memset(*p_virt, 0, size); + p_mngr->t2[i].size = size; + total_size -= size; + } + + /* Set the t2 pointers */ + + /* entries per page - must be a power of two */ + ent_per_page = psz / sizeof(struct src_ent); + + p_mngr->first_free = (u64) p_mngr->t2[0].p_phys; + + p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page]; + p_mngr->last_free = (u64) p_t2->p_phys + + ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); + + for (i = 0; i < p_mngr->t2_num_pages; i++) { + u32 ent_num = min_t(u32, + ent_per_page, + conn_num); + struct src_ent *entries = p_mngr->t2[i].p_virt; + u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val; + u32 j; + + for (j = 0; j < ent_num - 1; j++) { + val = p_ent_phys + (j + 1) * sizeof(struct src_ent); + entries[j].next = cpu_to_be64(val); + } + + if (i < p_mngr->t2_num_pages - 1) + val = (u64) p_mngr->t2[i + 1].p_phys; + else + val = 0; + entries[j].next = cpu_to_be64(val); + + conn_num -= ent_num; + } + + return 0; + +t2_fail: + qed_cxt_src_t2_free(p_hwfn); + return rc; +} + #define for_each_ilt_valid_client(pos, clients) \ - for (pos = 0; pos < ILT_CLI_MAX; pos++) + for (pos = 0; pos < ILT_CLI_MAX; pos++) \ + if (!clients[pos].active) { \ + continue; \ + } else \ /* Total number of ILT lines used by this PF */ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) @@ -336,12 +872,8 @@ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) u32 size = 0; u32 i; - for_each_ilt_valid_client(i, ilt_clients) { - if (!ilt_clients[i].active) - continue; - size += (ilt_clients[i].last.val - - ilt_clients[i].first.val + 1); - } + for_each_ilt_valid_client(i, ilt_clients) + size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1); return size; } @@ -372,15 +904,22 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, u32 start_line_offset) { struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; - u32 lines, line, sz_left; + u32 lines, line, sz_left, lines_to_skip = 0; + + /* Special handling for RoCE that supports dynamic allocation */ + if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) && + ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) + return 0; + + lines_to_skip = p_blk->dynamic_line_cnt; if (!p_blk->total_size) return 0; sz_left = p_blk->total_size; - lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page); + lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; line = p_blk->start_line + start_line_offset - - p_hwfn->p_cxt_mngr->pf_start_line; + p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; for (; lines; lines--) { dma_addr_t p_phys; @@ -434,8 +973,6 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) (u32)(size * sizeof(struct qed_dma_mem))); for_each_ilt_valid_client(i, clients) { - if (!clients[i].active) - continue; for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { p_blk = &clients[i].pf_blks[j]; rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); @@ -514,6 +1051,7 @@ cid_map_fail: int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) { + struct qed_ilt_client_cfg *clients; struct qed_cxt_mngr *p_mngr; u32 i; @@ -524,20 +1062,42 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) } /* Initialize ILT client registers */ - p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); - p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); - p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); - - p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); - p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); - p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); - + clients = p_mngr->clients; + clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); + clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); + clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); + + clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); + clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); + clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); + + clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); + clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); + clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); + + clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); + clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); + clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); + + clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); + clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); + clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); + + clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); + clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); + clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); /* default ILT page size for all clients is 32K */ for (i = 0; i < ILT_CLI_MAX; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + /* Initialize task sizes */ + p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); + p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); + if (p_hwfn->cdev->p_iov_info) p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; + /* Initialize the dynamic ILT allocation mutex */ + mutex_init(&p_mngr->mutex); /* Set the cxt mangr pointer priori to further allocations */ p_hwfn->p_cxt_mngr = p_mngr; @@ -556,6 +1116,13 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) goto tables_alloc_fail; } + /* Allocate the T2 table */ + rc = qed_cxt_src_t2_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n"); + goto tables_alloc_fail; + } + /* Allocate and initialize the acquired cids bitmaps */ rc = qed_cid_map_alloc(p_hwfn); if (rc) { @@ -576,6 +1143,7 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) return; qed_cid_map_free(p_hwfn); + qed_cxt_src_t2_free(p_hwfn); qed_ilt_shadow_free(p_hwfn); kfree(p_hwfn->p_cxt_mngr); @@ -620,6 +1188,48 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) #define CDUC_NCIB_MASK \ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) +#define CDUT_TYPE0_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT + +#define CDUT_TYPE0_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ + CDUT_TYPE0_CXT_SIZE_SHIFT) + +#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE0_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ + CDUT_TYPE0_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE0_NCIB_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE0_NCIB_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE0_NCIB_SHIFT) + +#define CDUT_TYPE1_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT + +#define CDUT_TYPE1_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ + CDUT_TYPE1_CXT_SIZE_SHIFT) + +#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE1_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ + CDUT_TYPE1_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE1_NCIB_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE1_NCIB_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE1_NCIB_SHIFT) + static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) { u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; @@ -634,6 +1244,92 @@ static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-0 tasks configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-1 tasks configuration */ + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); +} + +/* CDU PF */ +#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT +#define CDU_SEG_REG_TYPE_MASK 0x1 +#define CDU_SEG_REG_OFFSET_SHIFT 0 +#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK + +static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_tid_seg *p_seg; + u32 cdu_seg_params, offset; + int i; + + static const u32 rt_type_offset_arr[] = { + CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + static const u32 rt_type_offset_fl_arr[] = { + CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + + /* There are initializations only for CDUT during pf Phase */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + /* Segment 0 */ + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg) + continue; + + /* Note: start_line is already adjusted for the CDU + * segment register granularity, so we just need to + * divide. Adjustment is implicit as we assume ILT + * Page size is larger than 32K! + */ + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); + + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); + } } void qed_qm_init_pf(struct qed_hwfn *p_hwfn) @@ -742,14 +1438,11 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) ilt_clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, ilt_clients) { - if (!ilt_clients[i].active) - continue; STORE_RT_REG(p_hwfn, ilt_clients[i].first.reg, ilt_clients[i].first.val); STORE_RT_REG(p_hwfn, - ilt_clients[i].last.reg, - ilt_clients[i].last.val); + ilt_clients[i].last.reg, ilt_clients[i].last.val); STORE_RT_REG(p_hwfn, ilt_clients[i].p_size.reg, ilt_clients[i].p_size.val); @@ -786,6 +1479,33 @@ static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, p_cli->vf_total_lines); } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } } /* ILT (PSWRQ2) PF */ @@ -804,9 +1524,6 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) clients = p_hwfn->p_cxt_mngr->clients; for_each_ilt_valid_client(i, clients) { - if (!clients[i].active) - continue; - /** Client's 1st val and RT array are absolute, ILT shadows' * lines are relative. */ @@ -837,6 +1554,137 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) } } +/* SRC (Searcher) PF */ +static void qed_src_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rounded_conn_num, conn_num, conn_max; + struct qed_src_iids src_iids; + + memset(&src_iids, 0, sizeof(src_iids)); + qed_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (!conn_num) + return; + + conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS); + rounded_conn_num = roundup_pow_of_two(conn_max); + + STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); + STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, + ilog2(rounded_conn_num)); + + STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->first_free); + STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->last_free); +} + +/* Timers PF */ +#define TM_CFG_NUM_IDS_SHIFT 0 +#define TM_CFG_NUM_IDS_MASK 0xFFFFULL +#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 +#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL +#define TM_CFG_PARENT_PF_SHIFT 25 +#define TM_CFG_PARENT_PF_MASK 0x7ULL + +#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 +#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL + +#define TM_CFG_TID_OFFSET_SHIFT 30 +#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL +#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 +#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL + +static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 active_seg_mask = 0, tm_offset, rt_reg; + struct qed_tm_iids tm_iids; + u64 cfg_word; + u8 i; + + memset(&tm_iids, 0, sizeof(tm_iids)); + qed_cxt_tm_iids(p_mngr, &tm_iids); + + /* @@@TBD No pre-scan for now */ + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ + + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + + /* enale scan */ + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, + tm_iids.pf_cids ? 0x1 : 0x0); + + /* @@@TBD how to enable the scan for the VFs */ + + tm_offset = tm_iids.per_vf_cids; + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + tm_offset = tm_iids.pf_cids; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->cdev) + + p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); + + tm_offset += tm_iids.pf_tids[i]; + } + + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) + active_seg_mask = 0; + + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); + + /* @@@TBD how to enable the scan for the VFs */ +} + void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) { qed_cdu_init_common(p_hwfn); @@ -847,7 +1695,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn) qed_qm_init_pf(p_hwfn); qed_cm_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn); + qed_cdu_init_pf(p_hwfn); qed_ilt_init_pf(p_hwfn); + qed_src_init_pf(p_hwfn); + qed_tm_init_pf(p_hwfn); } int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, @@ -968,17 +1819,439 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, return 0; } -int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) +void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, + struct qed_rdma_pf_params *p_params) { - struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params; + u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs; + enum protocol_type proto; + + num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs); + num_tasks = num_mrs; /* each mr uses a single task id */ + num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_ROCE: + num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); + num_cons = num_qps * 2; /* each QP requires two connections */ + proto = PROTOCOLID_ROCE; + break; + default: + return; + } + + if (num_cons && num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); + + /* Deliberatly passing ROCE for tasks id. This is because + * iWARP / RoCE share the task id. + */ + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, + QED_CXT_ROCE_TID_SEG, 1, + num_tasks, false); + qed_cxt_set_srq_count(p_hwfn, num_srqs); + } else { + DP_INFO(p_hwfn->cdev, + "RDMA personality used without setting params!\n"); + } +} +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) +{ /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons, 1); + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_ROCE: + { + qed_rdma_set_pf_params(p_hwfn, + &p_hwfn-> + pf_params.rdma_pf_params); + /* no need for break since RoCE coexist with Ethernet */ + } + case QED_PCI_ETH: + { + struct qed_eth_pf_params *p_params = + &p_hwfn->pf_params.eth_pf_params; + + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, 1); + break; + } + case QED_PCI_ISCSI: + { + struct qed_iscsi_pf_params *p_params; + + p_params = &p_hwfn->pf_params.iscsi_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_ISCSI, + p_params->num_cons, + 0); + + qed_cxt_set_proto_tid_count(p_hwfn, + PROTOCOLID_ISCSI, + QED_CXT_ISCSI_TID_SEG, + 0, + p_params->num_tasks, + true); + } else { + DP_INFO(p_hwfn->cdev, + "Iscsi personality used without setting params!\n"); + } + break; + } + default: + return -EINVAL; + } + + return 0; +} + +int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, + struct qed_tid_mem *p_info) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 proto, seg, total_lines, i, shadow_line; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_fl_seg; + struct qed_tid_seg *p_seg_info; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ISCSI: + proto = PROTOCOLID_ISCSI; + seg = QED_CXT_ISCSI_TID_SEG; + break; + default: + return -EINVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return -EINVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + if (!p_seg_info->has_fl_mem) + return -EINVAL; + + p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + total_lines = DIV_ROUND_UP(p_fl_seg->total_size, + p_fl_seg->real_size_in_page); + + for (i = 0; i < total_lines; i++) { + shadow_line = i + p_fl_seg->start_line - + p_hwfn->p_cxt_mngr->pf_start_line; + p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt; + } + p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - + p_fl_seg->real_size_in_page; + p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; + p_info->num_tids_per_block = p_fl_seg->real_size_in_page / + p_info->tid_size; + + return 0; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +int +qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, u32 iid) +{ + u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + struct qed_ptt *p_ptt; + dma_addr_t p_phys; + u64 ilt_hw_entry; + void *p_virt; + int rc = 0; + + switch (elem_type) { + case QED_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case QED_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); + return -EINVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + line = p_blk->start_line + (iid / elems_per_p); + shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; + + /* If line is already allocated, do nothing, otherwise allocate it and + * write it to the PSWRQ2 registers. + * This section can be run in parallel from different contexts and thus + * a mutex protection is needed. + */ + + mutex_lock(&p_hwfn->p_cxt_mngr->mutex); + + if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt) + goto out0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, + "QED_TIME_OUT on ptt acquire - dynamic allocation"); + rc = -EBUSY; + goto out0; + } + + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, + &p_phys, GFP_KERNEL); + if (!p_virt) { + rc = -ENOMEM; + goto out1; + } + memset(p_virt, 0, p_blk->real_size_in_page); + + /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, + * to compensate for a HW bug, but it is configured even if DIF is not + * enabled. This is harmless and allows us to avoid a dedicated API. We + * configure the field for all of the contexts on the newly allocated + * page. + */ + if (elem_type == QED_ELEM_TASK) { + u32 elem_i; + u8 *elem_start = (u8 *)p_virt; + union type1_task_context *elem; + + for (elem_i = 0; elem_i < elems_per_p; elem_i++) { + elem = (union type1_task_context *)elem_start; + SET_FIELD(elem->roce_ctx.tdif_context.flags1, + TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); + elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); + } + } + + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = + p_blk->real_size_in_page; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); + + ilt_hw_entry = 0; + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, + ILT_ENTRY_PHY_ADDR, + (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12)); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ + qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry, + reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0); + + if (elem_type == QED_ELEM_CXT) { + u32 last_cid_allocated = (1 + (iid / elems_per_p)) * + elems_per_p; + + /* Update the relevant register in the parser */ + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, + last_cid_allocated - 1); + + if (!p_hwfn->b_rdma_enabled_in_prs) { + /* Enable RoCE search */ + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); + p_hwfn->b_rdma_enabled_in_prs = true; + } + } + +out1: + qed_ptt_release(p_hwfn, p_ptt); +out0: + mutex_unlock(&p_hwfn->p_cxt_mngr->mutex); + + return rc; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +static int +qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, + u32 start_iid, u32 count) +{ + u32 start_line, end_line, shadow_start_line, shadow_end_line; + u32 reg_offset, elem_size, hw_p_size, elems_per_p; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u32 end_iid = start_iid + count; + struct qed_ptt *p_ptt; + u64 ilt_hw_entry = 0; + u32 i; + + switch (elem_type) { + case QED_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case QED_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); + return -EINVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + start_line = p_blk->start_line + (start_iid / elems_per_p); + end_line = p_blk->start_line + (end_iid / elems_per_p); + if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) + end_line--; + + shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; + shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, + "QED_TIME_OUT on ptt acquire - dynamic allocation"); + return -EBUSY; + } + + for (i = shadow_start_line; i < shadow_end_line; i++) { + if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt) + continue; + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_hwfn->p_cxt_mngr->ilt_shadow[i].size, + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt, + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys); + + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL; + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0; + p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + ((start_line++) * ILT_REG_SIZE_IN_BYTES * + ILT_ENTRY_IN_REGS); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a + * wide-bus. + */ + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64) (uintptr_t) &ilt_hw_entry, + reg_offset, + sizeof(ilt_hw_entry) / sizeof(u32), + 0); + } + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) +{ + int rc; + u32 cid; + + /* Free Connection CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT, + qed_cxt_get_proto_cid_start(p_hwfn, + proto), + qed_cxt_get_proto_cid_count(p_hwfn, + proto, &cid)); + + if (rc) + return rc; + + /* Free Task CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, + qed_cxt_get_proto_tid_count(p_hwfn, proto)); + if (rc) + return rc; + + /* Free TSDM CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0, + qed_cxt_get_srq_count(p_hwfn)); + + return rc; +} + +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **pp_task_ctx) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_seg; + struct qed_tid_seg *p_seg_info; + u32 proto, seg; + u32 total_lines; + u32 tid_size, ilt_idx; + u32 num_tids_per_block; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ISCSI: + proto = PROTOCOLID_ISCSI; + seg = QED_CXT_ISCSI_TID_SEG; + break; + default: + return -EINVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return -EINVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + if (ctx_type == QED_CTX_WORKING_MEM) { + p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; + } else if (ctx_type == QED_CTX_FL_MEM) { + if (!p_seg_info->has_fl_mem) + return -EINVAL; + p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + } else { + return -EINVAL; + } + total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page); + tid_size = p_mngr->task_type_size[p_seg_info->type]; + num_tids_per_block = p_seg->real_size_in_page / tid_size; + + if (total_lines < tid / num_tids_per_block) + return -EINVAL; + + ilt_idx = tid / num_tids_per_block + p_seg->start_line - + p_mngr->pf_start_line; + *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt + + (tid % num_tids_per_block) * tid_size; return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 234c0fa8d..c6f6f2e81 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -21,6 +21,14 @@ struct qed_cxt_info { enum protocol_type type; }; +#define MAX_TID_BLOCKS 512 +struct qed_tid_mem { + u32 tid_size; + u32 num_tids_per_block; + u32 waste; + u8 *blocks[MAX_TID_BLOCKS]; /* 4K */ +}; + /** * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type * @@ -46,8 +54,22 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); +/** + * @brief qed_cxt_get_tid_mem_info + * + * @param p_hwfn + * @param p_info + * + * @return int + */ +int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, + struct qed_tid_mem *p_info); + +#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI +#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE enum qed_cxt_elem_type { QED_ELEM_CXT, + QED_ELEM_SRQ, QED_ELEM_TASK }; @@ -149,4 +171,6 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); +#define QED_CTX_WORKING_MEM 0 +#define QED_CTX_FL_MEM 1 #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 21ec1c2df..3656d2fd6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> +#include <linux/dcbnl.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -18,6 +19,10 @@ #include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_sp.h" +#include "qed_sriov.h" +#ifdef CONFIG_DCB +#include <linux/qed/qed_eth_if.h> +#endif #define QED_DCBX_MAX_MIB_READ_TRY (100) #define QED_ETH_TYPE_DEFAULT (0) @@ -48,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) DCBX_APP_SF_ETHTYPE); } +static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); +} + static bool qed_dcbx_app_port(u32 app_info_bitmap) { return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == DCBX_APP_SF_PORT); } -static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_port(app_info_bitmap); + + return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); +} + +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_DEFAULT); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); } -static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_TCP_PORT_ISCSI); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_TCP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); } -static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_FCOE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); } -static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_ethtype(app_info_bitmap) && - proto_id == QED_ETH_TYPE_ROCE); + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); } -static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) { - return !!(qed_dcbx_app_port(app_info_bitmap) && - proto_id == QED_UDP_PORT_TYPE_ROCE_V2); + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_UDP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); } static void @@ -160,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, static bool qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, u32 app_prio_bitmap, - u16 id, enum dcbx_protocol_type *type) + u16 id, enum dcbx_protocol_type *type, bool ieee) { - if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_FCOE; - } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE; - } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ISCSI; - } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ETH; - } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; @@ -190,17 +249,18 @@ static int qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data, struct dcbx_app_priority_entry *p_tbl, - u32 pri_tc_tbl, int count, bool dcbx_enabled) + u32 pri_tc_tbl, int count, u8 dcbx_version) { u8 tc, priority_map; enum dcbx_protocol_type type; + bool enable, ieee; u16 protocol_id; int priority; - bool enable; int i; DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); /* Parse APP TLV */ for (i = 0; i < count; i++) { protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, @@ -215,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, - protocol_id, &type)) { + protocol_id, &type, ieee)) { /* ETH always have the enable bit reset, as it gets * vlan information per packet. For other protocols, * should be set according to the dcbx_enabled @@ -252,7 +312,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, if (p_data->arr[type].update) continue; - enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled; + enable = !(type == DCBX_PROTOCOL_ETH); qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, priority, tc, type); } @@ -271,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) struct dcbx_ets_feature *p_ets; struct qed_hw_info *p_info; u32 pri_tc_tbl, flags; - bool dcbx_enabled; + u8 dcbx_version; int num_entries; int rc = 0; - /* If DCBx version is non zero, then negotiation was - * successfuly performed - */ flags = p_hwfn->p_dcbx_info->operational.flags; - dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); p_app = &p_hwfn->p_dcbx_info->operational.features.app; p_tbl = p_app->app_pri_tbl; @@ -291,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, - num_entries, dcbx_enabled); + num_entries, dcbx_version); if (rc) return rc; p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); data.pf_id = p_hwfn->rel_pf_id; - data.dcbx_enabled = dcbx_enabled; + data.dcbx_enabled = !!dcbx_version; qed_dcbx_dp_protocol(p_hwfn, &data); @@ -351,6 +408,325 @@ qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, return rc; } +#ifdef CONFIG_DCB +static void +qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, + struct qed_dcbx_app_prio *p_prio, + struct qed_dcbx_results *p_results) +{ + u8 val; + + p_prio->roce = QED_DCBX_INVALID_PRIORITY; + p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY; + p_prio->iscsi = QED_DCBX_INVALID_PRIORITY; + p_prio->fcoe = QED_DCBX_INVALID_PRIORITY; + + if (p_results->arr[DCBX_PROTOCOL_ROCE].update && + p_results->arr[DCBX_PROTOCOL_ROCE].enable) + p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority; + + if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update && + p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) { + val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority; + p_prio->roce_v2 = val; + } + + if (p_results->arr[DCBX_PROTOCOL_ISCSI].update && + p_results->arr[DCBX_PROTOCOL_ISCSI].enable) + p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority; + + if (p_results->arr[DCBX_PROTOCOL_FCOE].update && + p_results->arr[DCBX_PROTOCOL_FCOE].enable) + p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority; + + if (p_results->arr[DCBX_PROTOCOL_ETH].update && + p_results->arr[DCBX_PROTOCOL_ETH].enable) + p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n", + p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe, + p_prio->eth); +} + +static void +qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct qed_dcbx_params *p_params, bool ieee) +{ + struct qed_app_entry *entry; + u8 pri_map; + int i; + + p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags, + DCBX_APP_WILLING); + p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED); + p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR); + p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags, + DCBX_APP_NUM_ENTRIES); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + entry = &p_params->app_entry[i]; + if (ieee) { + u8 sf_ieee; + u32 val; + + sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF_IEEE); + switch (sf_ieee) { + case DCBX_APP_SF_IEEE_RESERVED: + /* Old MFW */ + val = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF); + entry->sf_ieee = val ? + QED_DCBX_SF_IEEE_TCP_UDP_PORT : + QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_ETHTYPE: + entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_TCP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case DCBX_APP_SF_IEEE_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case DCBX_APP_SF_IEEE_TCP_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + } + } else { + entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF)); + } + + pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); + entry->prio = ffs(pri_map) - 1; + entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + entry->proto_id, + &entry->proto_type, ieee); + } + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "APP params: willing %d, valid %d error = %d\n", + p_params->app_willing, p_params->app_valid, + p_params->app_error); +} + +static void +qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn, + u32 pfc, struct qed_dcbx_params *p_params) +{ + u8 pfc_map; + + p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING); + p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS); + p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED); + pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP); + p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0); + p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1); + p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2); + p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3); + p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4); + p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5); + p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6); + p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7); + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "PFC params: willing %d, pfc_bitmap %d\n", + p_params->pfc.willing, pfc_map); +} + +static void +qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct qed_dcbx_params *p_params) +{ + u32 bw_map[2], tsa_map[2], pri_map; + int i; + + p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_WILLING); + p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_ENABLED); + p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS); + p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_MAX_TCS); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", + p_params->ets_willing, + p_params->ets_cbs, + p_ets->pri_tc_tbl[0], p_params->max_ets_tc); + + /* 8 bit tsa and bw data corresponding to each of the 8 TC's are + * encoded in a type u32 array of size 2. + */ + bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]); + bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]); + tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]); + tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]); + pri_map = p_ets->pri_tc_tbl[0]; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; + p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; + p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "elem %d bw_tbl %x tsa_tbl %x\n", + i, p_params->ets_tc_bw_tbl[i], + p_params->ets_tc_tsa_tbl[i]); + } +} + +static void +qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct dcbx_ets_feature *p_ets, + u32 pfc, struct qed_dcbx_params *p_params, bool ieee) +{ + qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); + qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); + qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); +} + +static void +qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->local_admin.features; + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->local.params, false); + params->local.valid = true; +} + +static void +qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->remote.features; + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->remote.params, false); + params->remote.valid = true; +} + +static void +qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_get *params) +{ + struct qed_dcbx_operational_params *p_operational; + struct qed_dcbx_results *p_results; + struct dcbx_features *p_feat; + bool enabled, err; + u32 flags; + bool val; + + flags = p_hwfn->p_dcbx_info->operational.flags; + + /* If DCBx version is non zero, then negotiation + * was successfuly performed + */ + p_operational = ¶ms->operational; + enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != + DCBX_CONFIG_VERSION_DISABLED); + if (!enabled) { + p_operational->enabled = enabled; + p_operational->valid = false; + return; + } + + p_feat = &p_hwfn->p_dcbx_info->operational.features; + p_results = &p_hwfn->p_dcbx_info->results; + + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_IEEE); + p_operational->ieee = val; + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_CEE); + p_operational->cee = val; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n", + p_operational->ieee, p_operational->cee); + + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->operational.params, + p_operational->ieee); + qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); + err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); + p_operational->err = err; + p_operational->enabled = enabled; + p_operational->valid = true; +} + +static void +qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_get *params) +{ + struct lldp_config_params_s *p_local; + + p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; + + memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, + ARRAY_SIZE(p_local->local_chassis_id)); + memcpy(params->lldp_local.local_port_id, p_local->local_port_id, + ARRAY_SIZE(p_local->local_port_id)); +} + +static void +qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_get *params) +{ + struct lldp_status_params_s *p_remote; + + p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; + + memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, + ARRAY_SIZE(p_remote->peer_chassis_id)); + memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, + ARRAY_SIZE(p_remote->peer_port_id)); +} + +static int +qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_get *p_params, + enum qed_mib_read_type type) +{ + switch (type) { + case QED_DCBX_REMOTE_MIB: + qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_LOCAL_MIB: + qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_OPERATIONAL_MIB: + qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_REMOTE_LLDP_MIB: + qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params); + break; + case QED_DCBX_LOCAL_LLDP_MIB: + qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + return -EINVAL; + } + + return 0; +} +#endif + static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -561,3 +937,1377 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, p_dcb_data = &p_dest->eth_dcb_data; qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); } + +#ifdef CONFIG_DCB +static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_get *p_get, + enum qed_mib_read_type type) +{ + struct qed_ptt *p_ptt; + int rc; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + goto out; + + rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type); + +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +static void +qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn, + u32 *pfc, struct qed_dcbx_params *p_params) +{ + u8 pfc_map = 0; + int i; + + if (p_params->pfc.willing) + *pfc |= DCBX_PFC_WILLING_MASK; + else + *pfc &= ~DCBX_PFC_WILLING_MASK; + + if (p_params->pfc.enabled) + *pfc |= DCBX_PFC_ENABLED_MASK; + else + *pfc &= ~DCBX_PFC_ENABLED_MASK; + + *pfc &= ~DCBX_PFC_CAPS_MASK; + *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (p_params->pfc.prio[i]) + pfc_map |= BIT(i); + + *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK; + *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); +} + +static void +qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct qed_dcbx_params *p_params) +{ + u8 *bw_map, *tsa_map; + u32 val; + int i; + + if (p_params->ets_willing) + p_ets->flags |= DCBX_ETS_WILLING_MASK; + else + p_ets->flags &= ~DCBX_ETS_WILLING_MASK; + + if (p_params->ets_cbs) + p_ets->flags |= DCBX_ETS_CBS_MASK; + else + p_ets->flags &= ~DCBX_ETS_CBS_MASK; + + if (p_params->ets_enabled) + p_ets->flags |= DCBX_ETS_ENABLED_MASK; + else + p_ets->flags &= ~DCBX_ETS_ENABLED_MASK; + + p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; + p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT; + + bw_map = (u8 *)&p_ets->tc_bw_tbl[0]; + tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0]; + p_ets->pri_tc_tbl[0] = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + bw_map[i] = p_params->ets_tc_bw_tbl[i]; + tsa_map[i] = p_params->ets_tc_tsa_tbl[i]; + /* Copy the priority value to the corresponding 4 bits in the + * traffic class table. + */ + val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); + p_ets->pri_tc_tbl[0] |= val; + } + for (i = 0; i < 2; i++) { + p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]); + p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]); + } +} + +static void +qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct qed_dcbx_params *p_params, bool ieee) +{ + u32 *entry; + int i; + + if (p_params->app_willing) + p_app->flags |= DCBX_APP_WILLING_MASK; + else + p_app->flags &= ~DCBX_APP_WILLING_MASK; + + if (p_params->app_valid) + p_app->flags |= DCBX_APP_ENABLED_MASK; + else + p_app->flags &= ~DCBX_APP_ENABLED_MASK; + + p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK; + p_app->flags |= (u32)p_params->num_app_entries << + DCBX_APP_NUM_ENTRIES_SHIFT; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + entry = &p_app->app_pri_tbl[i].entry; + *entry = 0; + if (ieee) { + *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK); + switch (p_params->app_entry[i].sf_ieee) { + case QED_DCBX_SF_IEEE_ETHTYPE: + *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + } + } else { + *entry &= ~DCBX_APP_SF_MASK; + if (p_params->app_entry[i].ethtype) + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + else + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + } + + *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; + *entry |= ((u32)p_params->app_entry[i].proto_id << + DCBX_APP_PROTOCOL_ID_SHIFT); + *entry &= ~DCBX_APP_PRI_MAP_MASK; + *entry |= ((u32)(p_params->app_entry[i].prio) << + DCBX_APP_PRI_MAP_SHIFT); + } +} + +static void +qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, + struct dcbx_local_params *local_admin, + struct qed_dcbx_set *params) +{ + bool ieee = false; + + local_admin->flags = 0; + memcpy(&local_admin->features, + &p_hwfn->p_dcbx_info->operational.features, + sizeof(local_admin->features)); + + if (params->enabled) { + local_admin->config = params->ver_num; + ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); + } else { + local_admin->config = DCBX_CONFIG_VERSION_DISABLED; + } + + if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) + qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, + ¶ms->config.params); + + if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG) + qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets, + ¶ms->config.params); + + if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) + qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, + ¶ms->config.params, ieee); +} + +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit) +{ + struct dcbx_local_params local_admin; + struct qed_dcbx_mib_meta_data data; + u32 resp = 0, param = 0; + int rc = 0; + + if (!hw_commit) { + memcpy(&p_hwfn->p_dcbx_info->set, params, + sizeof(struct qed_dcbx_set)); + return 0; + } + + /* clear set-parmas cache */ + memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set)); + + memset(&local_admin, 0, sizeof(local_admin)); + qed_dcbx_set_local_params(p_hwfn, &local_admin, params); + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &local_admin; + data.size = sizeof(struct dcbx_local_params); + qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n"); + + return rc; +} + +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params) +{ + struct qed_dcbx_get *dcbx_info; + int rc; + + if (p_hwfn->p_dcbx_info->set.config.valid) { + memcpy(params, &p_hwfn->p_dcbx_info->set, + sizeof(struct qed_dcbx_set)); + return 0; + } + + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); + if (!dcbx_info) { + DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n"); + return -ENOMEM; + } + + rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); + if (rc) { + kfree(dcbx_info); + return rc; + } + + p_hwfn->p_dcbx_info->set.override_flags = 0; + p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED; + if (dcbx_info->operational.cee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE; + if (dcbx_info->operational.ieee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; + memcpy(&p_hwfn->p_dcbx_info->set.config.params, + &dcbx_info->operational.params, + sizeof(struct qed_dcbx_admin_params)); + p_hwfn->p_dcbx_info->set.config.valid = true; + + memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set)); + + kfree(dcbx_info); + + return 0; +} + +static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, + enum qed_mib_read_type type) +{ + struct qed_dcbx_get *dcbx_info; + + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); + if (!dcbx_info) { + DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n"); + return NULL; + } + + if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { + kfree(dcbx_info); + return NULL; + } + + if ((type == QED_DCBX_OPERATIONAL_MIB) && + !dcbx_info->operational.enabled) { + DP_INFO(hwfn, "DCBX is not enabled/operational\n"); + kfree(dcbx_info); + return NULL; + } + + return dcbx_info; +} + +static u8 qed_dcbnl_getstate(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + bool enabled; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + enabled = dcbx_info->operational.enabled; + DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled); + kfree(dcbx_info); + + return enabled; +} + +static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + dcbx_set.enabled = !!state; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc ? 1 : 0; +} + +static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc); + *prio_type = *pgid = *bw_pct = *up_map = 0; + if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid tc %d\n", tc); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc]; + kfree(dcbx_info); +} + +static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + *bw_pct = 0; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid); + if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid pgid %d\n", pgid); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid]; + DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct); + kfree(dcbx_info); +} + +static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio, + u8 *bwg_id, u8 *bw_pct, u8 *up_map) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); + *prio = *bwg_id = *bw_pct = *up_map = 0; +} + +static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev, + int bwg_id, u8 *bw_pct) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); + *bw_pct = 0; +} + +static void qed_dcbnl_getpfccfg(struct qed_dev *cdev, + int priority, u8 *setting) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority); + if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", priority); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *setting = dcbx_info->operational.params.pfc.prio[priority]; + DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting); + kfree(dcbx_info); +} + +static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n", + priority, setting); + if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", priority); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.prio[priority] = !!setting; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int rc = 0; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 1; + + switch (capid) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_PFC: + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_PG_TCS: + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE | + DCB_CAP_DCBX_VER_IEEE); + break; + default: + *cap = false; + rc = 1; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap); + kfree(dcbx_info); + + return rc; +} + +static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int rc = 0; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = dcbx_info->operational.params.max_ets_tc; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = dcbx_info->operational.params.pfc.max_tc; + break; + default: + rc = -EINVAL; + } + + kfree(dcbx_info); + DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num); + + return rc; +} + +static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + bool enabled; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + enabled = dcbx_info->operational.params.pfc.enabled; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled); + kfree(dcbx_info); + + return enabled; +} + +static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + u8 mode = 0; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + if (dcbx_info->operational.enabled) + mode |= DCB_CAP_DCBX_LLD_MANAGED; + if (dcbx_info->operational.ieee) + mode |= DCB_CAP_DCBX_VER_IEEE; + if (dcbx_info->operational.cee) + mode |= DCB_CAP_DCBX_VER_CEE; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode); + kfree(dcbx_info); + + return mode; +} + +static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev, + int tc, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, + "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n", + tc, pri_type, pgid, bw_pct, up_map); + + if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid tc %d\n", tc); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); +} + +static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct); + if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid pgid %d\n", pgid); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); +} + +static u8 qed_dcbnl_setall(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num); + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.max_ets_tc = num; + break; + case DCB_NUMTCS_ATTR_PFC: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.max_tc = num; + break; + default: + DP_INFO(hwfn, "Invalid tcid %d\n", tcid); + return -EINVAL; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.enabled = !!state; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_app_entry *entry; + bool ethtype; + u8 prio = 0; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_info->operational.params.app_entry[i]; + if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) { + prio = entry->prio; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + return prio; +} + +static int qed_dcbnl_setapp(struct qed_dev *cdev, + u8 idtype, u16 idval, u8 pri_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_app_entry *entry; + struct qed_ptt *ptt; + bool ethtype; + int rc, i; + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_set.config.params.app_entry[i]; + if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) + break; + /* First empty slot */ + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App table is full\n"); + return -EBUSY; + } + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_entry[i].ethtype = ethtype; + dcbx_set.config.params.app_entry[i].proto_id = idval; + dcbx_set.config.params.app_entry[i].prio = pri_map; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode); + + if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) { + DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n"); + return 1; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + dcbx_set.ver_num = 0; + if (mode & DCB_CAP_DCBX_VER_CEE) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE; + dcbx_set.enabled = true; + } + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + dcbx_set.enabled = true; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 1; + + *flags = 0; + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + if (dcbx_info->operational.params.ets_enabled) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (dcbx_info->operational.params.pfc.enabled) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_APP: + if (dcbx_info->operational.params.app_valid) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + default: + DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); + kfree(dcbx_info); + return 1; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags); + kfree(dcbx_info); + + return 0; +} + +static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + bool enabled, willing; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n", + featid, flags); + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + enabled = !!(flags & DCB_FEATCFG_ENABLE); + willing = !!(flags & DCB_FEATCFG_WILLING); + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_enabled = enabled; + dcbx_set.config.params.ets_willing = willing; + break; + case DCB_FEATCFG_ATTR_PFC: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.enabled = enabled; + dcbx_set.config.params.pfc.willing = willing; + break; + case DCB_FEATCFG_ATTR_APP: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_willing = willing; + break; + default: + DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); + return 1; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + info->willing = dcbx_info->remote.params.app_willing; + info->error = dcbx_info->remote.params.app_error; + *app_count = dcbx_info->remote.params.num_app_entries; + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev, + struct dcb_app *table) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) { + if (dcbx_info->remote.params.app_entry[i].ethtype) + table[i].selector = DCB_APP_IDTYPE_ETHTYPE; + else + table[i].selector = DCB_APP_IDTYPE_PORTNUM; + table[i].priority = dcbx_info->remote.params.app_entry[i].prio; + table[i].protocol = + dcbx_info->remote.params.app_entry[i].proto_id; + } + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (dcbx_info->remote.params.pfc.prio[i]) + pfc->pfc_en |= BIT(i); + + pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n", + pfc->pfc_en, pfc->tcs_supported); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + pg->willing = dcbx_info->remote.params.ets_willing; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i]; + pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i]; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev, + struct ieee_pfc *pfc, bool remote) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_params *params; + struct qed_dcbx_get *dcbx_info; + int rc, i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + return -EINVAL; + } + + if (remote) { + memset(dcbx_info, 0, sizeof(*dcbx_info)); + rc = qed_dcbx_query_params(hwfn, dcbx_info, + QED_DCBX_REMOTE_MIB); + if (rc) { + kfree(dcbx_info); + return -EINVAL; + } + + params = &dcbx_info->remote.params; + } else { + params = &dcbx_info->operational.params; + } + + pfc->pfc_cap = params->pfc.max_tc; + pfc->pfc_en = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (params->pfc.prio[i]) + pfc->pfc_en |= BIT(i); + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + return qed_dcbnl_get_ieee_pfc(cdev, pfc, false); +} + +static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc, i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev, + struct ieee_ets *ets, bool remote) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_params *params; + int rc; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + if (remote) { + memset(dcbx_info, 0, sizeof(*dcbx_info)); + rc = qed_dcbx_query_params(hwfn, dcbx_info, + QED_DCBX_REMOTE_MIB); + if (rc) { + kfree(dcbx_info); + return -EINVAL; + } + + params = &dcbx_info->remote.params; + } else { + params = &dcbx_info->operational.params; + } + + ets->ets_cap = params->max_ets_tc; + ets->willing = params->ets_willing; + ets->cbs = params->ets_cbs; + memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc)); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + return qed_dcbnl_get_ieee_ets(cdev, ets, false); +} + +static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.max_ets_tc = ets->ets_cap; + dcbx_set.config.params.ets_willing = ets->willing; + dcbx_set.config.params.ets_cbs = ets->cbs; + memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc, + sizeof(ets->prio_tc)); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + return qed_dcbnl_get_ieee_ets(cdev, ets, true); +} + +int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + return qed_dcbnl_get_ieee_pfc(cdev, pfc, true); +} + +int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_app_entry *entry; + bool ethtype; + u8 prio = 0; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + /* ieee defines the selector field value for ethertype to be 1 */ + ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_info->operational.params.app_entry[i]; + if ((entry->ethtype == ethtype) && + (entry->proto_id == app->protocol)) { + prio = entry->prio; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector, + app->protocol); + kfree(dcbx_info); + return -EINVAL; + } + + app->priority = ffs(prio) - 1; + + kfree(dcbx_info); + + return 0; +} + +int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_app_entry *entry; + struct qed_ptt *ptt; + bool ethtype; + int rc, i; + + if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", app->priority); + return -EINVAL; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + /* ieee defines the selector field value for ethertype to be 1 */ + ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_set.config.params.app_entry[i]; + if ((entry->ethtype == ethtype) && + (entry->proto_id == app->protocol)) + break; + /* First empty slot */ + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App table is full\n"); + return -EBUSY; + } + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_entry[i].ethtype = ethtype; + dcbx_set.config.params.app_entry[i].proto_id = app->protocol; + dcbx_set.config.params.app_entry[i].prio = BIT(app->priority); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = { + .getstate = qed_dcbnl_getstate, + .setstate = qed_dcbnl_setstate, + .getpgtccfgtx = qed_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx, + .getpgtccfgrx = qed_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx, + .getpfccfg = qed_dcbnl_getpfccfg, + .setpfccfg = qed_dcbnl_setpfccfg, + .getcap = qed_dcbnl_getcap, + .getnumtcs = qed_dcbnl_getnumtcs, + .getpfcstate = qed_dcbnl_getpfcstate, + .getdcbx = qed_dcbnl_getdcbx, + .setpgtccfgtx = qed_dcbnl_setpgtccfgtx, + .setpgtccfgrx = qed_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx, + .setall = qed_dcbnl_setall, + .setnumtcs = qed_dcbnl_setnumtcs, + .setpfcstate = qed_dcbnl_setpfcstate, + .setapp = qed_dcbnl_setapp, + .setdcbx = qed_dcbnl_setdcbx, + .setfeatcfg = qed_dcbnl_setfeatcfg, + .getfeatcfg = qed_dcbnl_getfeatcfg, + .getapp = qed_dcbnl_getapp, + .peer_getappinfo = qed_dcbnl_peer_getappinfo, + .peer_getapptable = qed_dcbnl_peer_getapptable, + .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc, + .cee_peer_getpg = qed_dcbnl_cee_peer_getpg, + .ieee_getpfc = qed_dcbnl_ieee_getpfc, + .ieee_setpfc = qed_dcbnl_ieee_setpfc, + .ieee_getets = qed_dcbnl_ieee_getets, + .ieee_setets = qed_dcbnl_ieee_setets, + .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc, + .ieee_peer_getets = qed_dcbnl_ieee_peer_getets, + .ieee_getapp = qed_dcbnl_ieee_getapp, + .ieee_setapp = qed_dcbnl_ieee_setapp, +}; + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h index e7f834dbd..9ba681643 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -33,6 +33,24 @@ struct qed_dcbx_app_data { u8 tc; /* Traffic Class */ }; +#ifdef CONFIG_DCB +#define QED_DCBX_VERSION_DISABLED 0 +#define QED_DCBX_VERSION_IEEE 1 +#define QED_DCBX_VERSION_CEE 2 + +struct qed_dcbx_set { +#define QED_DCBX_OVERRIDE_STATE BIT(0) +#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1) +#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2) +#define QED_DCBX_OVERRIDE_APP_CFG BIT(3) +#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4) + u32 override_flags; + bool enabled; + struct qed_dcbx_admin_params config; + u32 ver_num; +}; +#endif + struct qed_dcbx_results { bool dcbx_enabled; u8 pf_id; @@ -55,6 +73,9 @@ struct qed_dcbx_info { struct qed_dcbx_results results; struct dcbx_mib operational; struct dcbx_mib remote; +#ifdef CONFIG_DCB + struct qed_dcbx_set set; +#endif u8 dcbx_cap; }; @@ -67,6 +88,13 @@ struct qed_dcbx_mib_meta_data { u32 addr; }; +#ifdef CONFIG_DCB +int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *); + +int qed_dcbx_config_params(struct qed_hwfn *, + struct qed_ptt *, struct qed_dcbx_set *, bool); +#endif + /* QED local interface routines */ int qed_dcbx_mib_update_event(struct qed_hwfn *, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 2d89e8c16..0e4f4a930 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -17,6 +17,7 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> +#include <linux/vmalloc.h> #include <linux/etherdevice.h> #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> @@ -160,9 +161,13 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; + bool init_rdma_offload_pq = false; + bool init_pure_ack_pq = false; + bool init_ooo_pq = false; u16 num_pqs, multi_cos_tcs = 1; u8 pf_wfq = qm_info->pf_wfq; u32 pf_rl = qm_info->pf_rl; + u16 num_pf_rls = 0; u16 num_vfs = 0; #ifdef CONFIG_QED_SRIOV @@ -174,6 +179,25 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + num_pqs++; /* for RoCE queue */ + init_rdma_offload_pq = true; + /* we subtract num_vfs because each require a rate limiter, + * and one default rate limiter + */ + if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) + num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1; + + num_pqs += num_pf_rls; + qm_info->num_pf_rls = (u8) num_pf_rls; + } + + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */ + init_pure_ack_pq = true; + init_ooo_pq = true; + } + /* Sanity checking that setup requires legal number of resources */ if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) { DP_ERR(p_hwfn, @@ -211,12 +235,22 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); + /* First init rate limited queues */ + for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) { + qm_info->qm_pq_params[curr_queue].vport_id = vport_id++; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.non_offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + qm_info->qm_pq_params[curr_queue].rl_valid = 1; + } + /* First init per-TC PQs */ for (i = 0; i < multi_cos_tcs; i++) { struct init_qm_pq_params *params = &qm_info->qm_pq_params[curr_queue++]; - if (p_hwfn->hw_info.personality == QED_PCI_ETH) { + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE || + p_hwfn->hw_info.personality == QED_PCI_ETH) { params->vport_id = vport_id; params->tc_id = p_hwfn->hw_info.non_offload_tc; params->wrr_group = 1; @@ -236,6 +270,32 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) curr_queue++; qm_info->offload_pq = 0; + if (init_rdma_offload_pq) { + qm_info->offload_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = vport_id; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + if (init_pure_ack_pq) { + qm_info->pure_ack_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = vport_id; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + if (init_ooo_pq) { + qm_info->ooo_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = vport_id; + qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + /* Then init per-VF PQs */ vf_offset = curr_queue; for (i = 0; i < num_vfs; i++) { @@ -244,6 +304,7 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) qm_info->qm_pq_params[curr_queue].tc_id = p_hwfn->hw_info.non_offload_tc; qm_info->qm_pq_params[curr_queue].wrr_group = 1; + qm_info->qm_pq_params[curr_queue].rl_valid = 1; curr_queue++; } @@ -256,7 +317,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable) for (i = 0; i < num_ports; i++) { p_qm_port = &qm_info->qm_port_params[i]; p_qm_port->active = 1; - p_qm_port->num_active_phys_tcs = 4; + if (num_ports == 4) + p_qm_port->active_phys_tcs = 0x7; + else + p_qm_port->active_phys_tcs = 0x9f; p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; } @@ -366,21 +430,20 @@ int qed_resc_alloc(struct qed_dev *cdev) if (!p_hwfn->p_tx_cids) { DP_NOTICE(p_hwfn, "Failed to allocate memory for Tx Cids\n"); - rc = -ENOMEM; - goto alloc_err; + goto alloc_no_mem; } p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL); if (!p_hwfn->p_rx_cids) { DP_NOTICE(p_hwfn, "Failed to allocate memory for Rx Cids\n"); - rc = -ENOMEM; - goto alloc_err; + goto alloc_no_mem; } } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u32 n_eqes, num_cons; /* First allocate the context manager structure */ rc = qed_cxt_mngr_alloc(p_hwfn); @@ -429,18 +492,35 @@ int qed_resc_alloc(struct qed_dev *cdev) goto alloc_err; /* EQ */ - p_eq = qed_eq_alloc(p_hwfn, 256); - if (!p_eq) { - rc = -ENOMEM; + n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); + if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) { + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, + PROTOCOLID_ROCE, + 0) * 2; + n_eqes += num_cons + 2 * MAX_NUM_VFS_BB; + } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + num_cons = + qed_cxt_get_proto_cid_count(p_hwfn, + PROTOCOLID_ISCSI, 0); + n_eqes += 2 * num_cons; + } + + if (n_eqes > 0xFFFF) { + DP_ERR(p_hwfn, + "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", + n_eqes, 0xFFFF); + rc = -EINVAL; goto alloc_err; } + + p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes); + if (!p_eq) + goto alloc_no_mem; p_hwfn->p_eq = p_eq; p_consq = qed_consq_alloc(p_hwfn); - if (!p_consq) { - rc = -ENOMEM; - goto alloc_err; - } + if (!p_consq) + goto alloc_no_mem; p_hwfn->p_consq = p_consq; /* DMA info initialization */ @@ -469,6 +549,8 @@ int qed_resc_alloc(struct qed_dev *cdev) return 0; +alloc_no_mem: + rc = -ENOMEM; alloc_err: qed_resc_free(cdev); return rc; @@ -634,6 +716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; struct qed_dev *cdev = p_hwfn->cdev; + u16 num_pfs, pf_id; u32 concrete_fid; int rc = 0; u8 vf_id; @@ -682,9 +765,16 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); - /* Disable relaxed ordering in the PCI config space */ - qed_wr(p_hwfn, p_ptt, 0x20b4, - qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); + if (QED_IS_BB(p_hwfn->cdev)) { + num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); + for (pf_id = 0; pf_id < num_pfs; pf_id++) { + qed_fid_pretend(p_hwfn, p_ptt, pf_id); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); @@ -703,8 +793,31 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, { int rc = 0; - rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, - hw_mode); + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); + if (rc != 0) + return rc; + + if (hw_mode & (1 << MODE_MF_SI)) { + u8 pf_id = 0; + + if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "PF[%08x] is first eth on engine\n", pf_id); + + /* We should have configured BIT for ppfid, i.e., the + * relative function number in the port. But there's a + * bug in LLH in BB where the ppfid is actually engine + * based, so we need to take this into account. + */ + qed_wr(p_hwfn, p_ptt, + NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id); + } + + /* Take the protocol-based hit vector if there is a hit, + * otherwise take the other vector. + */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2); + } return rc; } @@ -751,7 +864,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, } /* Protocl Configuration */ - STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, + (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0); STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); @@ -773,6 +887,21 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, /* Pure runtime initializations - directly to the HW */ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + if (hw_mode & (1 << MODE_MF_SI)) { + u8 pf_id = 0; + u32 val = 0; + + if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) { + if (p_hwfn->rel_pf_id == pf_id) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "PF[%d] is first ETH on engine\n", + pf_id); + val = 1; + } + qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val); + } + } + if (b_hw_start) { /* enable interrupts */ qed_int_igu_enable(p_hwfn, p_ptt, int_mode); @@ -1213,8 +1342,9 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) num_features); } -static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) +static int qed_hw_get_resc(struct qed_hwfn *p_hwfn) { + u8 enabled_func_idx = p_hwfn->enabled_func_idx; u32 *resc_start = p_hwfn->hw_info.resc_start; u8 num_funcs = p_hwfn->num_funcs_on_engine; u32 *resc_num = p_hwfn->hw_info.resc_num; @@ -1238,14 +1368,22 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs; - resc_num[QED_RL] = 8; + resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]); resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) / num_funcs; - resc_num[QED_ILT] = 950; + resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs; for (i = 0; i < QED_MAX_RESC; i++) - resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; + resc_start[i] = resc_num[i] * enabled_func_idx; + + /* Sanity for ILT */ + if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) { + DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", + RESC_START(p_hwfn, QED_ILT), + RESC_END(p_hwfn, QED_ILT) - 1); + return -EINVAL; + } qed_hw_set_feat(p_hwfn); @@ -1275,6 +1413,8 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) p_hwfn->hw_info.resc_start[QED_VLAN], p_hwfn->hw_info.resc_num[QED_ILT], p_hwfn->hw_info.resc_start[QED_ILT]); + + return 0; } static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, @@ -1304,31 +1444,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G; break; - case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G; break; default: @@ -1373,7 +1513,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: link->speed.forced_speed = 50000; break; - case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: + case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: link->speed.forced_speed = 100000; break; default: @@ -1429,14 +1569,20 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) __set_bit(QED_DEV_CAP_ETH, &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) + __set_bit(QED_DEV_CAP_ISCSI, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) + __set_bit(QED_DEV_CAP_ROCE, + &p_hwfn->hw_info.device_capabilities); return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - u32 reg_function_hide, tmp, eng_mask; - u8 num_funcs; + u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; + u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; num_funcs = MAX_NUM_PFS_BB; @@ -1466,9 +1612,19 @@ static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) num_funcs++; tmp >>= 0x1; } + + /* Get the PF index within the enabled functions */ + low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; + tmp = reg_function_hide & eng_mask & low_pfs_mask; + while (tmp) { + if (tmp & 0x1) + enabled_func_idx--; + tmp >>= 0x1; + } } p_hwfn->num_funcs_on_engine = num_funcs; + p_hwfn->enabled_func_idx = enabled_func_idx; DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, @@ -1538,9 +1694,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_num_funcs(p_hwfn, p_ptt); - qed_hw_get_resc(p_hwfn); - - return rc; + return qed_hw_get_resc(p_hwfn); } static int qed_get_dev_info(struct qed_dev *cdev) @@ -1737,92 +1891,285 @@ void qed_hw_remove(struct qed_dev *cdev) qed_iov_free_hw_info(cdev); } -int qed_chain_alloc(struct qed_dev *cdev, - enum qed_chain_use_mode intended_use, - enum qed_chain_mode mode, - u16 num_elems, - size_t elem_size, - struct qed_chain *p_chain) +static void qed_chain_free_next_ptr(struct qed_dev *cdev, + struct qed_chain *p_chain) +{ + void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL; + dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; + struct qed_chain_next *p_next; + u32 size, i; + + if (!p_virt) + return; + + size = p_chain->elem_size * p_chain->usable_per_page; + + for (i = 0; i < p_chain->page_cnt; i++) { + if (!p_virt) + break; + + p_next = (struct qed_chain_next *)((u8 *)p_virt + size); + p_virt_next = p_next->next_virt; + p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); + + dma_free_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, p_virt, p_phys); + + p_virt = p_virt_next; + p_phys = p_phys_next; + } +} + +static void qed_chain_free_single(struct qed_dev *cdev, + struct qed_chain *p_chain) { - dma_addr_t p_pbl_phys = 0; - void *p_pbl_virt = NULL; + if (!p_chain->p_virt_addr) + return; + + dma_free_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_chain->p_virt_addr, p_chain->p_phys_addr); +} + +static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + u32 page_cnt = p_chain->page_cnt, i, pbl_size; + u8 *p_pbl_virt = p_chain->pbl.p_virt_table; + + if (!pp_virt_addr_tbl) + return; + + if (!p_chain->pbl.p_virt_table) + goto out; + + for (i = 0; i < page_cnt; i++) { + if (!pp_virt_addr_tbl[i]) + break; + + dma_free_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + pp_virt_addr_tbl[i], + *(dma_addr_t *)p_pbl_virt); + + p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + } + + pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + dma_free_coherent(&cdev->pdev->dev, + pbl_size, + p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table); +out: + vfree(p_chain->pbl.pp_virt_addr_tbl); +} + +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + switch (p_chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + qed_chain_free_next_ptr(cdev, p_chain); + break; + case QED_CHAIN_MODE_SINGLE: + qed_chain_free_single(cdev, p_chain); + break; + case QED_CHAIN_MODE_PBL: + qed_chain_free_pbl(cdev, p_chain); + break; + } +} + +static int +qed_chain_alloc_sanity_check(struct qed_dev *cdev, + enum qed_chain_cnt_type cnt_type, + size_t elem_size, u32 page_cnt) +{ + u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into acount the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of a u32 type. + */ + if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 && + chain_size > 0x10000) || + (cnt_type == QED_CHAIN_CNT_TYPE_U32 && + chain_size > 0x100000000ULL)) { + DP_NOTICE(cdev, + "The actual chain size (0x%llx) is larger than the maximal possible value\n", + chain_size); + return -EINVAL; + } + + return 0; +} + +static int +qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + void *p_virt = NULL, *p_virt_prev = NULL; dma_addr_t p_phys = 0; - void *p_virt = NULL; - u16 page_cnt = 0; - size_t size; + u32 i; - if (mode == QED_CHAIN_MODE_SINGLE) - page_cnt = 1; - else - page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + for (i = 0; i < p_chain->page_cnt; i++) { + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + return -ENOMEM; + } + + if (i == 0) { + qed_chain_init_mem(p_chain, p_virt, p_phys); + qed_chain_reset(p_chain); + } else { + qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_virt, p_phys); + } + + p_virt_prev = p_virt; + } + /* Last page's next element should point to the beginning of the + * chain. + */ + qed_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_chain->p_virt_addr, + p_chain->p_phys_addr); + + return 0; +} + +static int +qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + dma_addr_t p_phys = 0; + void *p_virt = NULL; - size = page_cnt * QED_CHAIN_PAGE_SIZE; p_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_phys, GFP_KERNEL); + QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL); if (!p_virt) { - DP_NOTICE(cdev, "Failed to allocate chain mem\n"); - goto nomem; + DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + return -ENOMEM; } - if (mode == QED_CHAIN_MODE_PBL) { - size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, - size, &p_pbl_phys, - GFP_KERNEL); - if (!p_pbl_virt) { - DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n"); - goto nomem; - } + qed_chain_init_mem(p_chain, p_virt, p_phys); + qed_chain_reset(p_chain); - qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt, - (u8)elem_size, intended_use, - p_pbl_phys, p_pbl_virt); - } else { - qed_chain_init(p_chain, p_virt, p_phys, page_cnt, - (u8)elem_size, intended_use, mode); + return 0; +} + +static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) +{ + u32 page_cnt = p_chain->page_cnt, size, i; + dma_addr_t p_phys = 0, p_pbl_phys = 0; + void **pp_virt_addr_tbl = NULL; + u8 *p_pbl_virt = NULL; + void *p_virt = NULL; + + size = page_cnt * sizeof(*pp_virt_addr_tbl); + pp_virt_addr_tbl = vmalloc(size); + if (!pp_virt_addr_tbl) { + DP_NOTICE(cdev, + "Failed to allocate memory for the chain virtual addresses table\n"); + return -ENOMEM; } + memset(pp_virt_addr_tbl, 0, size); - return 0; + /* The allocation of the PBL table is done with its full size, since it + * is expected to be successive. + * qed_chain_init_pbl_mem() is called even in a case of an allocation + * failure, since pp_virt_addr_tbl was previously allocated, and it + * should be saved to allow its freeing during the error flow. + */ + size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; + p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev, + size, &p_pbl_phys, GFP_KERNEL); + qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, + pp_virt_addr_tbl); + if (!p_pbl_virt) { + DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n"); + return -ENOMEM; + } -nomem: - dma_free_coherent(&cdev->pdev->dev, - page_cnt * QED_CHAIN_PAGE_SIZE, - p_virt, p_phys); - dma_free_coherent(&cdev->pdev->dev, - page_cnt * QED_CHAIN_PBL_ENTRY_SIZE, - p_pbl_virt, p_pbl_phys); + for (i = 0; i < page_cnt; i++) { + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(cdev, "Failed to allocate chain memory\n"); + return -ENOMEM; + } - return -ENOMEM; + if (i == 0) { + qed_chain_init_mem(p_chain, p_virt, p_phys); + qed_chain_reset(p_chain); + } + + /* Fill the PBL table with the physical address of the page */ + *(dma_addr_t *)p_pbl_virt = p_phys; + /* Keep the virtual address of the page */ + p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + + p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE; + } + + return 0; } -void qed_chain_free(struct qed_dev *cdev, - struct qed_chain *p_chain) +int qed_chain_alloc(struct qed_dev *cdev, + enum qed_chain_use_mode intended_use, + enum qed_chain_mode mode, + enum qed_chain_cnt_type cnt_type, + u32 num_elems, size_t elem_size, struct qed_chain *p_chain) { - size_t size; + u32 page_cnt; + int rc = 0; - if (!p_chain->p_virt_addr) - return; + if (mode == QED_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode); - if (p_chain->mode == QED_CHAIN_MODE_PBL) { - size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; - dma_free_coherent(&cdev->pdev->dev, size, - p_chain->pbl.p_virt_table, - p_chain->pbl.p_phys_table); + rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt); + if (rc) { + DP_NOTICE(cdev, + "Cannot allocate a chain with the given arguments:\n" + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", + intended_use, mode, cnt_type, num_elems, elem_size); + return rc; } - size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE; - dma_free_coherent(&cdev->pdev->dev, size, - p_chain->p_virt_addr, - p_chain->p_phys_addr); + qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use, + mode, cnt_type); + + switch (mode) { + case QED_CHAIN_MODE_NEXT_PTR: + rc = qed_chain_alloc_next_ptr(cdev, p_chain); + break; + case QED_CHAIN_MODE_SINGLE: + rc = qed_chain_alloc_single(cdev, p_chain); + break; + case QED_CHAIN_MODE_PBL: + rc = qed_chain_alloc_pbl(cdev, p_chain); + break; + } + if (rc) + goto nomem; + + return 0; + +nomem: + qed_chain_free(cdev, p_chain); + return rc; } -int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, - u16 src_id, u16 *dst_id) +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { u16 min, max; - min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE); max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); DP_NOTICE(p_hwfn, "l2_queue id [%d] is not valid, available indices [%d - %d]\n", @@ -1876,6 +2223,110 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } +static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 hw_addr, void *p_eth_qzone, + size_t eth_qzone_size, u8 timeset) +{ + struct coalescing_timeset *p_coal_timeset; + + if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { + DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); + return -EINVAL; + } + + p_coal_timeset = p_eth_qzone; + memset(p_coal_timeset, 0, eth_qzone_size); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); + qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); + + return 0; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id) +{ + struct ustorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u16 fw_qid = 0; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc) + return rc; + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); + if (rc) + goto out; + + address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct ustorm_eth_queue_zone), timeset); + if (rc) + goto out; + + p_hwfn->cdev->rx_coalesce_usecs = coalesce; +out: + return rc; +} + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id) +{ + struct xstorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u16 fw_qid = 0; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc) + return rc; + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); + if (rc) + goto out; + + address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct xstorm_eth_queue_zone), timeset); + if (rc) + goto out; + + p_hwfn->cdev->tx_coalesce_usecs = coalesce; +out: + return rc; +} + /* Calculate final WFQ values for all vports and configure them. * After this configuration each vport will have * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) @@ -2089,7 +2540,7 @@ int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); - if (!rc) { + if (rc) { qed_ptt_release(p_hwfn, p_ptt); return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index dde364d6f..343bb0344 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -212,6 +212,20 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, u32 size_in_dwords, u32 flags); + /** + * @brief qed_dmae_grc2host - Read data from dmae data offset + * to source address using the given ptt + * + * @param p_ptt + * @param grc_addr (dmae_data_offset) + * @param dest_addr + * @param size_in_dwords + * @param flags - one of the flags defined above + */ +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, + u32 flags); + /** * @brief qed_dmae_host2host - copy data from to source address * to a destination adress (for SRIOV) using the given ptt @@ -245,9 +259,8 @@ int qed_chain_alloc(struct qed_dev *cdev, enum qed_chain_use_mode intended_use, enum qed_chain_mode mode, - u16 num_elems, - size_t elem_size, - struct qed_chain *p_chain); + enum qed_chain_cnt_type cnt_type, + u32 num_elems, size_t elem_size, struct qed_chain *p_chain); /** * @brief qed_chain_free - Free chain DMA memory @@ -255,8 +268,7 @@ qed_chain_alloc(struct qed_dev *cdev, * @param p_hwfn * @param p_chain */ -void qed_chain_free(struct qed_dev *cdev, - struct qed_chain *p_chain); +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain); /** * @@brief qed_fw_l2_queue - Get absolute L2 queue ID @@ -310,4 +322,37 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); +/** + * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue + * The fact that we can configure coalescing to up to 511, but on varying + * accuracy [the bigger the value the less accurate] up to a mistake of 3usec + * for the highest values. + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * @param qid - SB Id + * + * @return int + */ +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id); + +/** + * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue + * While the API allows setting coalescing per-qid, all tx queues sharing a + * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * @param qid - SB Id + * + * @return int + */ +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 coalesce, u8 qid, u16 sb_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index e29ed5a69..6f9d3b831 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -17,13 +17,15 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/qed/common_hsi.h> +#include <linux/qed/storage_common.h> +#include <linux/qed/tcp_common.h> #include <linux/qed/eth_common.h> +#include <linux/qed/iscsi_common.h> +#include <linux/qed/rdma_common.h> +#include <linux/qed/roce_common.h> struct qed_hwfn; struct qed_ptt; -/********************************/ -/* Add include to common target */ -/********************************/ /* opcodes for the event ring */ enum common_event_opcode { @@ -32,9 +34,10 @@ enum common_event_opcode { COMMON_EVENT_VF_START, COMMON_EVENT_VF_STOP, COMMON_EVENT_VF_PF_CHANNEL, - COMMON_EVENT_RESERVED4, - COMMON_EVENT_RESERVED5, - COMMON_EVENT_RESERVED6, + COMMON_EVENT_VF_FLR, + COMMON_EVENT_PF_UPDATE, + COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_RL_UPDATE, COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE }; @@ -42,11 +45,12 @@ enum common_event_opcode { /* Common Ramrod Command IDs */ enum common_ramrod_cmd_id { COMMON_RAMROD_UNUSED, - COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, - COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, + COMMON_RAMROD_PF_START, + COMMON_RAMROD_PF_STOP, COMMON_RAMROD_VF_START, COMMON_RAMROD_VF_STOP, COMMON_RAMROD_PF_UPDATE, + COMMON_RAMROD_RL_UPDATE, COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -63,448 +67,448 @@ struct pstorm_core_conn_st_ctx { /* Core Slowpath Connection storm context of Xstorm */ struct xstorm_core_conn_st_ctx { - __le32 spq_base_lo /* SPQ Ring Base Address low dword */; - __le32 spq_base_hi /* SPQ Ring Base Address high dword */; - struct regpair consolid_base_addr; - __le16 spq_cons /* SPQ Ring Consumer */; - __le16 consolid_cons /* Consolidation Ring Consumer */; - __le32 reserved0[55] /* Pad to 15 cycles */; + __le32 spq_base_lo; + __le32 spq_base_hi; + struct regpair consolid_base_addr; + __le16 spq_cons; + __le16 consolid_cons; + __le32 reserved0[55]; }; struct xstorm_core_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 core_state /* state */; - u8 flags0; -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 reserved0; + u8 core_state; + u8 flags0; +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 u8 flags1; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ -#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 u8 flags4; -#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ -#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ -#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ -#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ -#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ -#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */ -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 -#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ -#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ -#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ -#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ -#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ -#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */ -#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 -#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 u8 flags10; -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ -#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ -#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ -#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ -#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */ -#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ -#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ -#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ -#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ -#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 -#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 -#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 -#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 -#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 -#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */ -#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 -#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */ -#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 - u8 byte2 /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 consolid_prod /* physical_q1 */; - __le16 reserved16 /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_or_spq_prod /* word4 */; - __le16 word5 /* word5 */; - __le16 conn_dpi /* conn_dpi */; - u8 byte3 /* byte3 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - u8 byte6 /* byte6 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* cf_array0 */; - __le32 reg6 /* cf_array1 */; - __le16 word7 /* word7 */; - __le16 word8 /* word8 */; - __le16 word9 /* word9 */; - __le16 word10 /* word10 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - __le32 reg9 /* reg9 */; - u8 byte7 /* byte7 */; - u8 byte8 /* byte8 */; - u8 byte9 /* byte9 */; - u8 byte10 /* byte10 */; - u8 byte11 /* byte11 */; - u8 byte12 /* byte12 */; - u8 byte13 /* byte13 */; - u8 byte14 /* byte14 */; - u8 byte15 /* byte15 */; - u8 byte16 /* byte16 */; - __le16 word11 /* word11 */; - __le32 reg10 /* reg10 */; - __le32 reg11 /* reg11 */; - __le32 reg12 /* reg12 */; - __le32 reg13 /* reg13 */; - __le32 reg14 /* reg14 */; - __le32 reg15 /* reg15 */; - __le32 reg16 /* reg16 */; - __le32 reg17 /* reg17 */; - __le32 reg18 /* reg18 */; - __le32 reg19 /* reg19 */; - __le16 word12 /* word12 */; - __le16 word13 /* word13 */; - __le16 word14 /* word14 */; - __le16 word15 /* word15 */; +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 consolid_prod; + __le16 reserved16; + __le16 tx_bd_cons; + __le16 tx_bd_or_spq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 byte16; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; }; struct tstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 byte0; + u8 byte1; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 word1 /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; }; struct ustorm_core_conn_ag_ctx { - u8 reserved /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 word1 /* word1 */; - __le32 rx_producers /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 rx_producers; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; }; /* The core storm context for the Mstorm */ @@ -519,122 +523,186 @@ struct ustorm_core_conn_st_ctx { /* core connection context */ struct core_conn_context { - struct ystorm_core_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2] /* padding */; - struct pstorm_core_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2]; - struct xstorm_core_conn_st_ctx xstorm_st_context; - struct xstorm_core_conn_ag_ctx xstorm_ag_context; - struct tstorm_core_conn_ag_ctx tstorm_ag_context; - struct ustorm_core_conn_ag_ctx ustorm_ag_context; - struct mstorm_core_conn_st_ctx mstorm_st_context; - struct ustorm_core_conn_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2] /* padding */; + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_core_conn_st_ctx xstorm_st_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; + struct mstorm_core_conn_st_ctx mstorm_st_context; + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + +struct eth_mstorm_per_pf_stat { + struct regpair gre_discard_pkts; + struct regpair vxlan_discard_pkts; + struct regpair geneve_discard_pkts; + struct regpair lb_discard_pkts; }; struct eth_mstorm_per_queue_stat { - struct regpair ttl0_discard; - struct regpair packet_too_big_discard; - struct regpair no_buff_discard; - struct regpair not_active_discard; - struct regpair tpa_coalesced_pkts; - struct regpair tpa_coalesced_events; - struct regpair tpa_aborts_num; - struct regpair tpa_coalesced_bytes; + struct regpair ttl0_discard; + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; + struct regpair not_active_discard; + struct regpair tpa_coalesced_pkts; + struct regpair tpa_coalesced_events; + struct regpair tpa_aborts_num; + struct regpair tpa_coalesced_bytes; +}; + +/* Ethernet TX Per PF */ +struct eth_pstorm_per_pf_stat { + struct regpair sent_lb_ucast_bytes; + struct regpair sent_lb_mcast_bytes; + struct regpair sent_lb_bcast_bytes; + struct regpair sent_lb_ucast_pkts; + struct regpair sent_lb_mcast_pkts; + struct regpair sent_lb_bcast_pkts; + struct regpair sent_gre_bytes; + struct regpair sent_vxlan_bytes; + struct regpair sent_geneve_bytes; + struct regpair sent_gre_pkts; + struct regpair sent_vxlan_pkts; + struct regpair sent_geneve_pkts; + struct regpair gre_drop_pkts; + struct regpair vxlan_drop_pkts; + struct regpair geneve_drop_pkts; +}; + +/* Ethernet TX Per Queue Stats */ +struct eth_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; +}; + +/* ETH Rx producers data */ +struct eth_rx_rate_limit { + __le16 mult; + __le16 cnst; + u8 add_sub_cnst; + u8 reserved0; + __le16 reserved1; }; -struct eth_pstorm_per_queue_stat { - struct regpair sent_ucast_bytes; - struct regpair sent_mcast_bytes; - struct regpair sent_bcast_bytes; - struct regpair sent_ucast_pkts; - struct regpair sent_mcast_pkts; - struct regpair sent_bcast_pkts; - struct regpair error_drop_pkts; +struct eth_ustorm_per_pf_stat { + struct regpair rcv_lb_ucast_bytes; + struct regpair rcv_lb_mcast_bytes; + struct regpair rcv_lb_bcast_bytes; + struct regpair rcv_lb_ucast_pkts; + struct regpair rcv_lb_mcast_pkts; + struct regpair rcv_lb_bcast_pkts; + struct regpair rcv_gre_bytes; + struct regpair rcv_vxlan_bytes; + struct regpair rcv_geneve_bytes; + struct regpair rcv_gre_pkts; + struct regpair rcv_vxlan_pkts; + struct regpair rcv_geneve_pkts; }; struct eth_ustorm_per_queue_stat { - struct regpair rcv_ucast_bytes; - struct regpair rcv_mcast_bytes; - struct regpair rcv_bcast_bytes; - struct regpair rcv_ucast_pkts; - struct regpair rcv_mcast_pkts; - struct regpair rcv_bcast_pkts; + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; }; /* Event Ring Next Page Address */ struct event_ring_next_addr { - struct regpair addr /* Next Page Address */; - __le32 reserved[2] /* Reserved */; + struct regpair addr; + __le32 reserved[2]; }; +/* Event Ring Element */ union event_ring_element { - struct event_ring_entry entry /* Event Ring Entry */; - struct event_ring_next_addr next_addr; + struct event_ring_entry entry; + struct event_ring_next_addr next_addr; +}; + +/* Major and Minor hsi Versions */ +struct hsi_fp_ver_struct { + u8 minor_ver_arr[2]; + u8 major_ver_arr[2]; }; +/* Mstorm non-triggering VF zone */ struct mstorm_non_trigger_vf_zone { struct eth_mstorm_per_queue_stat eth_queue_stat; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF]; }; +/* Mstorm VF zone */ struct mstorm_vf_zone { struct mstorm_non_trigger_vf_zone non_trigger; + }; +/* personality per PF */ enum personality_type { BAD_PERSONALITY_TYP, - PERSONALITY_RESERVED, + PERSONALITY_ISCSI, PERSONALITY_RESERVED2, - PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, + PERSONALITY_RDMA_AND_ETH, PERSONALITY_RESERVED3, PERSONALITY_CORE, - PERSONALITY_ETH /* Ethernet */, + PERSONALITY_ETH, PERSONALITY_RESERVED4, MAX_PERSONALITY_TYPE }; +/* tunnel configuration */ struct pf_start_tunnel_config { - u8 set_vxlan_udp_port_flg; - u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; - u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; - u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; - u8 tunnel_clss_l2geneve; - u8 tunnel_clss_ipgeneve; - u8 tunnel_clss_l2gre; - u8 tunnel_clss_ipgre; - __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; - __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre; + u8 tx_enable_ipgre; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; }; /* Ramrod data for PF start ramrod */ struct pf_start_ramrod_data { - struct regpair event_ring_pbl_addr; - struct regpair consolid_q_pbl_addr; - struct pf_start_tunnel_config tunnel_config; - __le16 event_ring_sb_id; - u8 base_vf_id; - u8 num_vfs; - u8 event_ring_num_pages; - u8 event_ring_sb_index; - u8 path_id; - u8 warning_as_error; - u8 dont_log_ramrods; - u8 personality; - __le16 log_type_mask; - u8 mf_mode /* Multi function mode */; - u8 integ_phase /* Integration phase */; - u8 allow_npar_tx_switching; - u8 inner_to_outer_pri_map[8]; - u8 pri_map_valid; - u32 outer_tag; - u8 reserved0[4]; -}; - -/* Data for port update ramrod */ + struct regpair event_ring_pbl_addr; + struct regpair consolid_q_pbl_addr; + struct pf_start_tunnel_config tunnel_config; + __le16 event_ring_sb_id; + u8 base_vf_id; + u8 num_vfs; + u8 event_ring_num_pages; + u8 event_ring_sb_index; + u8 path_id; + u8 warning_as_error; + u8 dont_log_ramrods; + u8 personality; + __le16 log_type_mask; + u8 mf_mode; + u8 integ_phase; + u8 allow_npar_tx_switching; + u8 inner_to_outer_pri_map[8]; + u8 pri_map_valid; + __le32 outer_tag; + struct hsi_fp_ver_struct hsi_fp_ver; + +}; + struct protocol_dcb_data { u8 dcb_enable_flag; u8 dcb_priority; @@ -642,25 +710,24 @@ struct protocol_dcb_data { u8 reserved; }; -/* tunnel configuration */ struct pf_update_tunnel_config { - u8 update_rx_pf_clss; - u8 update_tx_pf_clss; - u8 set_vxlan_udp_port_flg; - u8 set_geneve_udp_port_flg; - u8 tx_enable_vxlan; - u8 tx_enable_l2geneve; - u8 tx_enable_ipgeneve; - u8 tx_enable_l2gre; - u8 tx_enable_ipgre; - u8 tunnel_clss_vxlan; - u8 tunnel_clss_l2geneve; - u8 tunnel_clss_ipgeneve; - u8 tunnel_clss_l2gre; - u8 tunnel_clss_ipgre; - __le16 vxlan_udp_port; - __le16 geneve_udp_port; - __le16 reserved[3]; + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre; + u8 tx_enable_ipgre; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; + __le16 reserved[3]; }; struct pf_update_ramrod_data { @@ -669,38 +736,43 @@ struct pf_update_ramrod_data { u8 update_fcoe_dcb_data_flag; u8 update_iscsi_dcb_data_flag; u8 update_roce_dcb_data_flag; + u8 update_iwarp_dcb_data_flag; u8 update_mf_vlan_flag; - __le16 mf_vlan; + u8 reserved; struct protocol_dcb_data eth_dcb_data; struct protocol_dcb_data fcoe_dcb_data; struct protocol_dcb_data iscsi_dcb_data; struct protocol_dcb_data roce_dcb_data; - struct pf_update_tunnel_config tunnel_config; -}; - -/* Tunnel classification scheme */ -enum tunnel_clss { - TUNNEL_CLSS_MAC_VLAN = 0, - TUNNEL_CLSS_MAC_VNI, - TUNNEL_CLSS_INNER_MAC_VLAN, - TUNNEL_CLSS_INNER_MAC_VNI, - MAX_TUNNEL_CLSS + struct protocol_dcb_data iwarp_dcb_data; + __le16 mf_vlan; + __le16 reserved2; + struct pf_update_tunnel_config tunnel_config; }; +/* Ports mode */ enum ports_mode { - ENGX2_PORTX1 /* 2 engines x 1 port */, - ENGX2_PORTX2 /* 2 engines x 2 ports */, - ENGX1_PORTX1 /* 1 engine x 1 port */, - ENGX1_PORTX2 /* 1 engine x 2 ports */, - ENGX1_PORTX4 /* 1 engine x 4 ports */, + ENGX2_PORTX1, + ENGX2_PORTX2, + ENGX1_PORTX1, + ENGX1_PORTX2, + ENGX1_PORTX4, MAX_PORTS_MODE }; +/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ +enum protocol_version_array_key { + ETH_VER_KEY = 0, + ROCE_VER_KEY, + MAX_PROTOCOL_VERSION_ARRAY_KEY +}; + +/* Pstorm non-triggering VF zone */ struct pstorm_non_trigger_vf_zone { struct eth_pstorm_per_queue_stat eth_queue_stat; struct regpair reserved[2]; }; +/* Pstorm VF zone */ struct pstorm_vf_zone { struct pstorm_non_trigger_vf_zone non_trigger; struct regpair reserved[7]; @@ -708,56 +780,89 @@ struct pstorm_vf_zone { /* Ramrod Header of SPQE */ struct ramrod_header { - __le32 cid /* Slowpath Connection CID */; - u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; - u8 protocol_id /* Ramrod Protocol ID */; - __le16 echo /* Ramrod echo */; + __le32 cid; + u8 cmd_id; + u8 protocol_id; + __le16 echo; }; /* Slowpath Element (SPQE) */ struct slow_path_element { - struct ramrod_header hdr /* Ramrod Header */; - struct regpair data_ptr; + struct ramrod_header hdr; + struct regpair data_ptr; +}; + +/* Tstorm non-triggering VF zone */ +struct tstorm_non_trigger_vf_zone { + struct regpair reserved[2]; }; struct tstorm_per_port_stat { - struct regpair trunc_error_discard; - struct regpair mac_error_discard; - struct regpair mftag_filter_discard; - struct regpair eth_mac_filter_discard; - struct regpair ll2_mac_filter_discard; - struct regpair ll2_conn_disabled_discard; - struct regpair iscsi_irregular_pkt; - struct regpair fcoe_irregular_pkt; - struct regpair roce_irregular_pkt; - struct regpair eth_irregular_pkt; - struct regpair toe_irregular_pkt; - struct regpair preroce_irregular_pkt; + struct regpair trunc_error_discard; + struct regpair mac_error_discard; + struct regpair mftag_filter_discard; + struct regpair eth_mac_filter_discard; + struct regpair reserved[5]; + struct regpair eth_irregular_pkt; + struct regpair reserved1[2]; + struct regpair eth_gre_tunn_filter_discard; + struct regpair eth_vxlan_tunn_filter_discard; + struct regpair eth_geneve_tunn_filter_discard; +}; + +/* Tstorm VF zone */ +struct tstorm_vf_zone { + struct tstorm_non_trigger_vf_zone non_trigger; +}; + +/* Tunnel classification scheme */ +enum tunnel_clss { + TUNNEL_CLSS_MAC_VLAN = 0, + TUNNEL_CLSS_MAC_VNI, + TUNNEL_CLSS_INNER_MAC_VLAN, + TUNNEL_CLSS_INNER_MAC_VNI, + TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE, + MAX_TUNNEL_CLSS }; +/* Ustorm non-triggering VF zone */ struct ustorm_non_trigger_vf_zone { struct eth_ustorm_per_queue_stat eth_queue_stat; struct regpair vf_pf_msg_addr; }; +/* Ustorm triggering VF zone */ struct ustorm_trigger_vf_zone { u8 vf_pf_msg_valid; u8 reserved[7]; }; +/* Ustorm VF zone */ struct ustorm_vf_zone { struct ustorm_non_trigger_vf_zone non_trigger; struct ustorm_trigger_vf_zone trigger; }; +/* VF-PF channel data */ +struct vf_pf_channel_data { + __le32 ready; + u8 valid; + u8 reserved0; + __le16 reserved1; +}; + +/* Ramrod data for VF start ramrod */ struct vf_start_ramrod_data { u8 vf_id; u8 enable_flr_ack; __le16 opaque_fid; u8 personality; - u8 reserved[3]; + u8 reserved[7]; + struct hsi_fp_ver_struct hsi_fp_ver; + }; +/* Ramrod data for VF start ramrod */ struct vf_stop_ramrod_data { u8 vf_id; u8 reserved0; @@ -765,94 +870,474 @@ struct vf_stop_ramrod_data { __le32 reserved2; }; +/* Attentions status block */ struct atten_status_block { - __le32 atten_bits; - __le32 atten_ack; - __le16 reserved0; - __le16 sb_index /* status block running index */; - __le32 reserved1; + __le32 atten_bits; + __le32 atten_ack; + __le16 reserved0; + __le16 sb_index; + __le32 reserved1; +}; + +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +/* DMAE command */ +struct dmae_cmd { + __le32 opcode; +#define DMAE_CMD_SRC_MASK 0x1 +#define DMAE_CMD_SRC_SHIFT 0 +#define DMAE_CMD_DST_MASK 0x3 +#define DMAE_CMD_DST_SHIFT 1 +#define DMAE_CMD_C_DST_MASK 0x1 +#define DMAE_CMD_C_DST_SHIFT 3 +#define DMAE_CMD_CRC_RESET_MASK 0x1 +#define DMAE_CMD_CRC_RESET_SHIFT 4 +#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 +#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 +#define DMAE_CMD_COMP_FUNC_MASK 0x1 +#define DMAE_CMD_COMP_FUNC_SHIFT 7 +#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 +#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 +#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 +#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 +#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 +#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 +#define DMAE_CMD_RESERVED1_MASK 0x1 +#define DMAE_CMD_RESERVED1_SHIFT 13 +#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 +#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 +#define DMAE_CMD_ERR_HANDLING_MASK 0x3 +#define DMAE_CMD_ERR_HANDLING_SHIFT 16 +#define DMAE_CMD_PORT_ID_MASK 0x3 +#define DMAE_CMD_PORT_ID_SHIFT 18 +#define DMAE_CMD_SRC_PF_ID_MASK 0xF +#define DMAE_CMD_SRC_PF_ID_SHIFT 20 +#define DMAE_CMD_DST_PF_ID_MASK 0xF +#define DMAE_CMD_DST_PF_ID_SHIFT 24 +#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 +#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 +#define DMAE_CMD_RESERVED2_MASK 0x3 +#define DMAE_CMD_RESERVED2_SHIFT 30 + __le32 src_addr_lo; + __le32 src_addr_hi; + __le32 dst_addr_lo; + __le32 dst_addr_hi; + __le16 length_dw; + __le16 opcode_b; +#define DMAE_CMD_SRC_VF_ID_MASK 0xFF +#define DMAE_CMD_SRC_VF_ID_SHIFT 0 +#define DMAE_CMD_DST_VF_ID_MASK 0xFF +#define DMAE_CMD_DST_VF_ID_SHIFT 8 + __le32 comp_addr_lo; + __le32 comp_addr_hi; + __le32 comp_val; + __le32 crc32; + __le32 crc_32_c; + __le16 crc16; + __le16 crc16_c; + __le16 crc10; + __le16 reserved; + __le16 xsum16; + __le16 xsum8; +}; + +enum dmae_cmd_comp_crc_en_enum { + dmae_cmd_comp_crc_disabled, + dmae_cmd_comp_crc_enabled, + MAX_DMAE_CMD_COMP_CRC_EN_ENUM +}; + +enum dmae_cmd_comp_func_enum { + dmae_cmd_comp_func_to_src, + dmae_cmd_comp_func_to_dst, + MAX_DMAE_CMD_COMP_FUNC_ENUM +}; + +enum dmae_cmd_comp_word_en_enum { + dmae_cmd_comp_word_disabled, + dmae_cmd_comp_word_enabled, + MAX_DMAE_CMD_COMP_WORD_EN_ENUM +}; + +enum dmae_cmd_c_dst_enum { + dmae_cmd_c_dst_pcie, + dmae_cmd_c_dst_grc, + MAX_DMAE_CMD_C_DST_ENUM +}; + +enum dmae_cmd_dst_enum { + dmae_cmd_dst_none_0, + dmae_cmd_dst_pcie, + dmae_cmd_dst_grc, + dmae_cmd_dst_none_3, + MAX_DMAE_CMD_DST_ENUM +}; + +enum dmae_cmd_error_handling_enum { + dmae_cmd_error_handling_send_regular_comp, + dmae_cmd_error_handling_send_comp_with_err, + dmae_cmd_error_handling_dont_send_comp, + MAX_DMAE_CMD_ERROR_HANDLING_ENUM +}; + +enum dmae_cmd_src_enum { + dmae_cmd_src_pcie, + dmae_cmd_src_grc, + MAX_DMAE_CMD_SRC_ENUM +}; + +/* IGU cleanup command */ +struct igu_cleanup { + __le32 sb_id_and_flags; +#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF +#define IGU_CLEANUP_RESERVED0_SHIFT 0 +#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 +#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 +#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 +#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 +#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 +#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +/* IGU firmware driver command */ +union igu_command { + struct igu_prod_cons_update prod_cons_update; + struct igu_cleanup cleanup; +}; + +/* IGU firmware driver command */ +struct igu_command_reg_ctrl { + __le16 opaque_fid; + __le16 igu_command_reg_ctrl_fields; +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 +#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 +#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 }; +/* IGU mapping line structure */ +struct igu_mapping_line { + __le32 igu_mapping_line_fields; +#define IGU_MAPPING_LINE_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_VALID_SHIFT 0 +#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 +#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 +#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F +#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 +#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF +#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +}; + +/* IGU MSIX line structure */ +struct igu_msix_vector { + struct regpair address; + __le32 data; + __le32 msix_vector_fields; +#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 +#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 +#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF +#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 +#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF +#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 +#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF +#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +}; + +struct mstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* per encapsulation type enabling flags */ +struct prs_reg_encapsulation_type_en { + u8 flags; +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 +}; + +enum pxp_tph_st_hint { + TPH_ST_HINT_BIDIR, + TPH_ST_HINT_REQUESTER, + TPH_ST_HINT_TARGET, + TPH_ST_HINT_TARGET_PRIO, + MAX_PXP_TPH_ST_HINT +}; + +/* QM hardware structure of enable bypass credit mask */ +struct qm_rf_bypass_mask { + u8 flags; +#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 +#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 +#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 +#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 +#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 +}; + +/* QM hardware structure of opportunistic credit mask */ +struct qm_rf_opportunistic_mask { + __le16 flags; +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 +}; + +/* QM hardware structure of QM map memory */ +struct qm_rf_pq_map { + __le32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +}; + +/* Completion params for aggregated interrupt completion */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +}; + +/* SDM operation gen command (generate aggregative interrupt) */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF +#define SDM_OP_GEN_RESERVED_SHIFT 20 +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + enum block_addr { - GRCBASE_GRC = 0x50000, - GRCBASE_MISCS = 0x9000, - GRCBASE_MISC = 0x8000, - GRCBASE_DBU = 0xa000, - GRCBASE_PGLUE_B = 0x2a8000, - GRCBASE_CNIG = 0x218000, - GRCBASE_CPMU = 0x30000, - GRCBASE_NCSI = 0x40000, - GRCBASE_OPTE = 0x53000, - GRCBASE_BMB = 0x540000, - GRCBASE_PCIE = 0x54000, - GRCBASE_MCP = 0xe00000, - GRCBASE_MCP2 = 0x52000, - GRCBASE_PSWHST = 0x2a0000, - GRCBASE_PSWHST2 = 0x29e000, - GRCBASE_PSWRD = 0x29c000, - GRCBASE_PSWRD2 = 0x29d000, - GRCBASE_PSWWR = 0x29a000, - GRCBASE_PSWWR2 = 0x29b000, - GRCBASE_PSWRQ = 0x280000, - GRCBASE_PSWRQ2 = 0x240000, - GRCBASE_PGLCS = 0x0, - GRCBASE_PTU = 0x560000, - GRCBASE_DMAE = 0xc000, - GRCBASE_TCM = 0x1180000, - GRCBASE_MCM = 0x1200000, - GRCBASE_UCM = 0x1280000, - GRCBASE_XCM = 0x1000000, - GRCBASE_YCM = 0x1080000, - GRCBASE_PCM = 0x1100000, - GRCBASE_QM = 0x2f0000, - GRCBASE_TM = 0x2c0000, - GRCBASE_DORQ = 0x100000, - GRCBASE_BRB = 0x340000, - GRCBASE_SRC = 0x238000, - GRCBASE_PRS = 0x1f0000, - GRCBASE_TSDM = 0xfb0000, - GRCBASE_MSDM = 0xfc0000, - GRCBASE_USDM = 0xfd0000, - GRCBASE_XSDM = 0xf80000, - GRCBASE_YSDM = 0xf90000, - GRCBASE_PSDM = 0xfa0000, - GRCBASE_TSEM = 0x1700000, - GRCBASE_MSEM = 0x1800000, - GRCBASE_USEM = 0x1900000, - GRCBASE_XSEM = 0x1400000, - GRCBASE_YSEM = 0x1500000, - GRCBASE_PSEM = 0x1600000, - GRCBASE_RSS = 0x238800, - GRCBASE_TMLD = 0x4d0000, - GRCBASE_MULD = 0x4e0000, - GRCBASE_YULD = 0x4c8000, - GRCBASE_XYLD = 0x4c0000, - GRCBASE_PRM = 0x230000, - GRCBASE_PBF_PB1 = 0xda0000, - GRCBASE_PBF_PB2 = 0xda4000, - GRCBASE_RPB = 0x23c000, - GRCBASE_BTB = 0xdb0000, - GRCBASE_PBF = 0xd80000, - GRCBASE_RDIF = 0x300000, - GRCBASE_TDIF = 0x310000, - GRCBASE_CDU = 0x580000, - GRCBASE_CCFC = 0x2e0000, - GRCBASE_TCFC = 0x2d0000, - GRCBASE_IGU = 0x180000, - GRCBASE_CAU = 0x1c0000, - GRCBASE_UMAC = 0x51000, - GRCBASE_XMAC = 0x210000, - GRCBASE_DBG = 0x10000, - GRCBASE_NIG = 0x500000, - GRCBASE_WOL = 0x600000, - GRCBASE_BMBN = 0x610000, - GRCBASE_IPC = 0x20000, - GRCBASE_NWM = 0x800000, - GRCBASE_NWS = 0x700000, - GRCBASE_MS = 0x6a0000, - GRCBASE_PHY_PCIE = 0x620000, - GRCBASE_MISC_AEU = 0x8000, - GRCBASE_BAR0_MAP = 0x1c00000, + GRCBASE_GRC = 0x50000, + GRCBASE_MISCS = 0x9000, + GRCBASE_MISC = 0x8000, + GRCBASE_DBU = 0xa000, + GRCBASE_PGLUE_B = 0x2a8000, + GRCBASE_CNIG = 0x218000, + GRCBASE_CPMU = 0x30000, + GRCBASE_NCSI = 0x40000, + GRCBASE_OPTE = 0x53000, + GRCBASE_BMB = 0x540000, + GRCBASE_PCIE = 0x54000, + GRCBASE_MCP = 0xe00000, + GRCBASE_MCP2 = 0x52000, + GRCBASE_PSWHST = 0x2a0000, + GRCBASE_PSWHST2 = 0x29e000, + GRCBASE_PSWRD = 0x29c000, + GRCBASE_PSWRD2 = 0x29d000, + GRCBASE_PSWWR = 0x29a000, + GRCBASE_PSWWR2 = 0x29b000, + GRCBASE_PSWRQ = 0x280000, + GRCBASE_PSWRQ2 = 0x240000, + GRCBASE_PGLCS = 0x0, + GRCBASE_DMAE = 0xc000, + GRCBASE_PTU = 0x560000, + GRCBASE_TCM = 0x1180000, + GRCBASE_MCM = 0x1200000, + GRCBASE_UCM = 0x1280000, + GRCBASE_XCM = 0x1000000, + GRCBASE_YCM = 0x1080000, + GRCBASE_PCM = 0x1100000, + GRCBASE_QM = 0x2f0000, + GRCBASE_TM = 0x2c0000, + GRCBASE_DORQ = 0x100000, + GRCBASE_BRB = 0x340000, + GRCBASE_SRC = 0x238000, + GRCBASE_PRS = 0x1f0000, + GRCBASE_TSDM = 0xfb0000, + GRCBASE_MSDM = 0xfc0000, + GRCBASE_USDM = 0xfd0000, + GRCBASE_XSDM = 0xf80000, + GRCBASE_YSDM = 0xf90000, + GRCBASE_PSDM = 0xfa0000, + GRCBASE_TSEM = 0x1700000, + GRCBASE_MSEM = 0x1800000, + GRCBASE_USEM = 0x1900000, + GRCBASE_XSEM = 0x1400000, + GRCBASE_YSEM = 0x1500000, + GRCBASE_PSEM = 0x1600000, + GRCBASE_RSS = 0x238800, + GRCBASE_TMLD = 0x4d0000, + GRCBASE_MULD = 0x4e0000, + GRCBASE_YULD = 0x4c8000, + GRCBASE_XYLD = 0x4c0000, + GRCBASE_PRM = 0x230000, + GRCBASE_PBF_PB1 = 0xda0000, + GRCBASE_PBF_PB2 = 0xda4000, + GRCBASE_RPB = 0x23c000, + GRCBASE_BTB = 0xdb0000, + GRCBASE_PBF = 0xd80000, + GRCBASE_RDIF = 0x300000, + GRCBASE_TDIF = 0x310000, + GRCBASE_CDU = 0x580000, + GRCBASE_CCFC = 0x2e0000, + GRCBASE_TCFC = 0x2d0000, + GRCBASE_IGU = 0x180000, + GRCBASE_CAU = 0x1c0000, + GRCBASE_UMAC = 0x51000, + GRCBASE_XMAC = 0x210000, + GRCBASE_DBG = 0x10000, + GRCBASE_NIG = 0x500000, + GRCBASE_WOL = 0x600000, + GRCBASE_BMBN = 0x610000, + GRCBASE_IPC = 0x20000, + GRCBASE_NWM = 0x800000, + GRCBASE_NWS = 0x700000, + GRCBASE_MS = 0x6a0000, + GRCBASE_PHY_PCIE = 0x620000, + GRCBASE_LED = 0x6b8000, + GRCBASE_MISC_AEU = 0x8000, + GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR }; @@ -879,8 +1364,8 @@ enum block_id { BLOCK_PSWRQ, BLOCK_PSWRQ2, BLOCK_PGLCS, - BLOCK_PTU, BLOCK_DMAE, + BLOCK_PTU, BLOCK_TCM, BLOCK_MCM, BLOCK_UCM, @@ -934,141 +1419,216 @@ enum block_id { BLOCK_NWS, BLOCK_MS, BLOCK_PHY_PCIE, + BLOCK_LED, BLOCK_MISC_AEU, BLOCK_BAR0_MAP, MAX_BLOCK_ID }; -enum command_type_bit { - IGU_COMMAND_TYPE_NOP = 0, - IGU_COMMAND_TYPE_SET = 1, - MAX_COMMAND_TYPE_BIT +/* binary debug buffer types */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE, + BIN_BUF_DBG_DUMP_REG, + BIN_BUF_DBG_DUMP_MEM, + BIN_BUF_DBG_IDLE_CHK_REGS, + BIN_BUF_DBG_IDLE_CHK_IMMS, + BIN_BUF_DBG_IDLE_CHK_RULES, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, + BIN_BUF_DBG_ATTN_BLOCKS, + BIN_BUF_DBG_ATTN_REGS, + BIN_BUF_DBG_ATTN_INDEXES, + BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_PARSING_STRINGS, + MAX_BIN_DBG_BUFFER_TYPE }; -struct dmae_cmd { - __le32 opcode; -#define DMAE_CMD_SRC_MASK 0x1 -#define DMAE_CMD_SRC_SHIFT 0 -#define DMAE_CMD_DST_MASK 0x3 -#define DMAE_CMD_DST_SHIFT 1 -#define DMAE_CMD_C_DST_MASK 0x1 -#define DMAE_CMD_C_DST_SHIFT 3 -#define DMAE_CMD_CRC_RESET_MASK 0x1 -#define DMAE_CMD_CRC_RESET_SHIFT 4 -#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 -#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 -#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 -#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 -#define DMAE_CMD_COMP_FUNC_MASK 0x1 -#define DMAE_CMD_COMP_FUNC_SHIFT 7 -#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 -#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 -#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 -#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 -#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 -#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 -#define DMAE_CMD_RESERVED1_MASK 0x1 -#define DMAE_CMD_RESERVED1_SHIFT 13 -#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 -#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 -#define DMAE_CMD_ERR_HANDLING_MASK 0x3 -#define DMAE_CMD_ERR_HANDLING_SHIFT 16 -#define DMAE_CMD_PORT_ID_MASK 0x3 -#define DMAE_CMD_PORT_ID_SHIFT 18 -#define DMAE_CMD_SRC_PF_ID_MASK 0xF -#define DMAE_CMD_SRC_PF_ID_SHIFT 20 -#define DMAE_CMD_DST_PF_ID_MASK 0xF -#define DMAE_CMD_DST_PF_ID_SHIFT 24 -#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 -#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 -#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 -#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 -#define DMAE_CMD_RESERVED2_MASK 0x3 -#define DMAE_CMD_RESERVED2_SHIFT 30 - __le32 src_addr_lo; - __le32 src_addr_hi; - __le32 dst_addr_lo; - __le32 dst_addr_hi; - __le16 length /* Length in DW */; - __le16 opcode_b; -#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */ -#define DMAE_CMD_SRC_VF_ID_SHIFT 0 -#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */ -#define DMAE_CMD_DST_VF_ID_SHIFT 8 - __le32 comp_addr_lo /* PCIe completion address low or grc address */; - __le32 comp_addr_hi; - __le32 comp_val /* Value to write to copmletion address */; - __le32 crc32 /* crc16 result */; - __le32 crc_32_c /* crc32_c result */; - __le16 crc16 /* crc16 result */; - __le16 crc16_c /* crc16_c result */; - __le16 crc10 /* crc_t10 result */; - __le16 reserved; - __le16 xsum16 /* checksum16 result */; - __le16 xsum8 /* checksum8 result */; +/* Chip IDs */ +enum chip_ids { + CHIP_RESERVED, + CHIP_BB_B0, + CHIP_RESERVED2, + MAX_CHIP_IDS }; -struct igu_cleanup { - __le32 sb_id_and_flags; -#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF -#define IGU_CLEANUP_RESERVED0_SHIFT 0 -#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */ -#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 -#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 -#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 -#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 -#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 - __le32 reserved1; +/* Attention bit mapping */ +struct dbg_attn_bit_mapping { + __le16 data; +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 }; -union igu_command { - struct igu_prod_cons_update prod_cons_update; - struct igu_cleanup cleanup; +/* Attention block per-type data */ +struct dbg_attn_block_type_data { + __le16 names_offset; + __le16 reserved1; + u8 num_regs; + u8 reserved2; + __le16 regs_offset; }; -struct igu_command_reg_ctrl { - __le16 opaque_fid; - __le16 igu_command_reg_ctrl_fields; -#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF -#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 -#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 -#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 -#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 -#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +/* Block attentions */ +struct dbg_attn_block { + struct dbg_attn_block_type_data per_type_data[2]; }; -struct igu_mapping_line { - __le32 igu_mapping_line_fields; -#define IGU_MAPPING_LINE_VALID_MASK 0x1 -#define IGU_MAPPING_LINE_VALID_SHIFT 0 -#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF -#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 -#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF -#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 -#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */ -#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 -#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F -#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 -#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF -#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +/* Attention register result */ +struct dbg_attn_reg_result { + __le32 data; +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24 + __le16 attn_idx_offset; + __le16 reserved; + __le32 sts_val; + __le32 mask_val; +}; + +/* Attention block result */ +struct dbg_attn_block_result { + u8 block_id; + u8 data; +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 + __le16 names_offset; + struct dbg_attn_reg_result reg_results[15]; +}; + +/* mode header */ +struct dbg_mode_hdr { + __le16 data; +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* Attention register */ +struct dbg_attn_reg { + struct dbg_mode_hdr mode; + __le16 attn_idx_offset; + __le32 data; +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF +#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24 + __le32 sts_clr_address; + __le32 mask_address; +}; + +/* attention types */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + +/* Debug status codes */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_TOO_MANY_INPUTS, + DBG_STATUS_INPUT_OVERLAP, + DBG_STATUS_HW_ONLY_RECORDING, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_64B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_DMAE_FAILED, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + MAX_DBG_STATUS }; -struct igu_msix_vector { - struct regpair address; - __le32 data; - __le32 msix_vector_fields; -#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 -#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 -#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF -#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 -#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF -#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 -#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF -#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +/********************************/ +/* HSI Init Functions constants */ +/********************************/ + +/* Number of VLAN priorities */ +#define NUM_OF_VLAN_PRIORITIES 8 + +/* QM per-port init parameters */ +struct init_qm_port_params { + u8 active; + u8 active_phys_tcs; + __le16 num_pbf_cmd_lines; + __le16 num_btb_blocks; + __le16 reserved; }; +/* QM per-PQ init parameters */ +struct init_qm_pq_params { + u8 vport_id; + u8 tc_id; + u8 wrr_group; + u8 rl_valid; +}; + +/* QM per-vport init parameters */ +struct init_qm_vport_params { + __le32 vport_rl; + __le16 vport_wfq; + __le16 first_tx_pq_id[NUM_OF_TCS]; +}; + +/**************************************/ +/* Init Tool HSI constants and macros */ +/**************************************/ + +/* Width of GRC address in bits (addresses are specified in dwords) */ +#define GRC_ADDR_BITS 23 +#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) + +/* indicates an init that should be applied to any phase ID */ +#define ANY_PHASE_ID 0xffff + +/* Max size in dwords of a zipped array */ +#define MAX_ZIPPED_SIZE 8192 + enum init_modes { - MODE_BB_A0, + MODE_RESERVED, MODE_BB_B0, MODE_RESERVED2, MODE_ASIC, @@ -1083,7 +1643,8 @@ enum init_modes { MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, MODE_100G, - MODE_EAGLE_ENG1_WORKAROUND, + MODE_40G, + MODE_RESERVED7, MAX_INIT_MODES }; @@ -1096,484 +1657,302 @@ enum init_phases { MAX_INIT_PHASES }; -/* per encapsulation type enabling flags */ -struct prs_reg_encapsulation_type_en { - u8 flags; -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 -#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 -#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 -#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 -#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 -}; - -enum pxp_tph_st_hint { - TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */, - TPH_ST_HINT_REQUESTER /* Read/Write access by Device */, - TPH_ST_HINT_TARGET, - TPH_ST_HINT_TARGET_PRIO, - MAX_PXP_TPH_ST_HINT -}; - -/* QM hardware structure of enable bypass credit mask */ -struct qm_rf_bypass_mask { - u8 flags; -#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 -#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 -#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 -#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 -#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 -#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 -#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 -#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 -#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 -#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 -#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 -#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 -#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 -#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 -#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 -#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 -}; - -/* QM hardware structure of opportunistic credit mask */ -struct qm_rf_opportunistic_mask { - __le16 flags; -#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 -#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 -#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 -#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 -#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 -#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 -#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 -#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 -#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F -#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 -}; - -/* QM hardware structure of QM map memory */ -struct qm_rf_pq_map { - u32 reg; -#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ -#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ -#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ -#define QM_RF_PQ_MAP_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ -#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 -}; - -/* Completion params for aggregated interrupt completion */ -struct sdm_agg_int_comp_params { - __le16 params; -#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F -#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF -#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +enum init_split_types { + SPLIT_TYPE_NONE, + SPLIT_TYPE_PORT, + SPLIT_TYPE_PF, + SPLIT_TYPE_PORT_PF, + SPLIT_TYPE_VF, + MAX_INIT_SPLIT_TYPES }; -/* SDM operation gen command (generate aggregative interrupt) */ -struct sdm_op_gen { - __le32 command; -#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */ -#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 -#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ -#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 -#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ -#define SDM_OP_GEN_RESERVED_SHIFT 20 -}; - -/*********************************** Init ************************************/ - -/* Width of GRC address in bits (addresses are specified in dwords) */ -#define GRC_ADDR_BITS 23 -#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) - -/* indicates an init that should be applied to any phase ID */ -#define ANY_PHASE_ID 0xffff - -/* init pattern size in bytes */ -#define INIT_PATTERN_SIZE_BITS 4 -#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS) - -/* Max size in dwords of a zipped array */ -#define MAX_ZIPPED_SIZE 8192 - -/* Global PXP window */ -#define NUM_OF_PXP_WIN 19 -#define PXP_WIN_DWORD_SIZE_BITS 10 -#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS) -#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2) -#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4) - -/********************************* GRC Dump **********************************/ - -/* width of GRC dump register sequence length in bits */ -#define DUMP_SEQ_LEN_BITS 8 -#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1) - -/* width of GRC dump memory length in bits */ -#define DUMP_MEM_LEN_BITS 18 -#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1) - -/* width of register type ID in bits */ -#define REG_TYPE_ID_BITS 6 -#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1) - -/* width of block ID in bits */ -#define BLOCK_ID_BITS 8 -#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1) - -/******************************** Idle Check *********************************/ - -/* max number of idle check predicate immediates */ -#define MAX_IDLE_CHK_PRED_IMM 3 - -/* max number of idle check argument registers */ -#define MAX_IDLE_CHK_READ_REGS 3 - -/* max number of idle check loops */ -#define MAX_IDLE_CHK_LOOPS 0x10000 - -/* max idle check address increment */ -#define MAX_IDLE_CHK_INCREMENT 0x10000 - -/* inicates an undefined idle check line index */ -#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff - -/* max number of register values following the idle check header */ -#define IDLE_CHK_MAX_DUMP_REGS 2 - -/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */ -#define IDLE_CHK_QM_RD_WR_PTR 0 -#define IDLE_CHK_QM_RD_WR_BANK 1 - -/**************************************/ -/* HSI Functions constants and macros */ -/**************************************/ - -/* Number of VLAN priorities */ -#define NUM_OF_VLAN_PRIORITIES 8 - -/* the MCP Trace meta data signautre is duplicated in the perl script that - * generats the NVRAM images. - */ -#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa - /* Binary buffer header */ struct bin_buffer_hdr { - u32 offset; - u32 length /* buffer length in bytes */; -}; - -/* binary buffer types */ -enum bin_buffer_type { - BIN_BUF_FW_VER_INFO /* fw_ver_info struct */, - BIN_BUF_INIT_CMD /* init commands */, - BIN_BUF_INIT_VAL /* init data */, - BIN_BUF_INIT_MODE_TREE /* init modes tree */, - BIN_BUF_IRO /* internal RAM offsets array */, - MAX_BIN_BUFFER_TYPE + __le32 offset; + __le32 length; }; -/* Chip IDs */ -enum chip_ids { - CHIP_BB_A0 /* BB A0 chip ID */, - CHIP_BB_B0 /* BB B0 chip ID */, - CHIP_K2 /* AH chip ID */, - MAX_CHIP_IDS +/* binary init buffer types */ +enum bin_init_buffer_type { + BIN_BUF_FW_VER_INFO, + BIN_BUF_INIT_CMD, + BIN_BUF_INIT_VAL, + BIN_BUF_INIT_MODE_TREE, + BIN_BUF_IRO, + MAX_BIN_INIT_BUFFER_TYPE }; +/* init array header: raw */ struct init_array_raw_hdr { __le32 data; -#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */ -#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 }; +/* init array header: standard */ struct init_array_standard_hdr { __le32 data; -#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF -#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 }; +/* init array header: zipped */ struct init_array_zipped_hdr { __le32 data; -#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF -#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 }; +/* init array header: pattern */ struct init_array_pattern_hdr { __le32 data; -#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF -#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 -#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF -#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 -#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF -#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 +#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 }; +/* init array header union */ union init_array_hdr { - struct init_array_raw_hdr raw /* raw init array header */; - struct init_array_standard_hdr standard; - struct init_array_zipped_hdr zipped /* zipped init array header */; - struct init_array_pattern_hdr pattern /* pattern init array header */; + struct init_array_raw_hdr raw; + struct init_array_standard_hdr standard; + struct init_array_zipped_hdr zipped; + struct init_array_pattern_hdr pattern; }; +/* init array types */ enum init_array_types { - INIT_ARR_STANDARD /* standard init array */, - INIT_ARR_ZIPPED /* zipped init array */, - INIT_ARR_PATTERN /* a repeated pattern */, + INIT_ARR_STANDARD, + INIT_ARR_ZIPPED, + INIT_ARR_PATTERN, MAX_INIT_ARRAY_TYPES }; /* init operation: callback */ struct init_callback_op { - __le32 op_data; -#define INIT_CALLBACK_OP_OP_MASK 0xF -#define INIT_CALLBACK_OP_OP_SHIFT 0 -#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF -#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - __le16 callback_id /* Callback ID */; - __le16 block_id /* Blocks ID */; + __le32 op_data; +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + __le16 callback_id; + __le16 block_id; }; /* init operation: delay */ struct init_delay_op { - __le32 op_data; -#define INIT_DELAY_OP_OP_MASK 0xF -#define INIT_DELAY_OP_OP_SHIFT 0 -#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF -#define INIT_DELAY_OP_RESERVED_SHIFT 4 - __le32 delay /* delay in us */; + __le32 op_data; +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + __le32 delay; }; /* init operation: if_mode */ struct init_if_mode_op { __le32 op_data; -#define INIT_IF_MODE_OP_OP_MASK 0xF -#define INIT_IF_MODE_OP_OP_SHIFT 0 -#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF -#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 -#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF -#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - __le16 reserved2; - __le16 modes_buf_offset; +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + __le16 reserved2; + __le16 modes_buf_offset; }; -/* init operation: if_phase */ +/* init operation: if_phase */ struct init_if_phase_op { __le32 op_data; -#define INIT_IF_PHASE_OP_OP_MASK 0xF -#define INIT_IF_PHASE_OP_OP_SHIFT 0 -#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 -#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 -#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF -#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 -#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF -#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 __le32 phase_data; -#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ -#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 -#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF -#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 -#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */ -#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 }; /* init mode operators */ enum init_mode_ops { - INIT_MODE_OP_NOT /* init mode not operator */, - INIT_MODE_OP_OR /* init mode or operator */, - INIT_MODE_OP_AND /* init mode and operator */, + INIT_MODE_OP_NOT, + INIT_MODE_OP_OR, + INIT_MODE_OP_AND, MAX_INIT_MODE_OPS }; /* init operation: raw */ struct init_raw_op { - __le32 op_data; -#define INIT_RAW_OP_OP_MASK 0xF -#define INIT_RAW_OP_OP_SHIFT 0 -#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ -#define INIT_RAW_OP_PARAM1_SHIFT 4 - __le32 param2 /* Init param 2 */; + __le32 op_data; +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF +#define INIT_RAW_OP_PARAM1_SHIFT 4 + __le32 param2; }; /* init array params */ struct init_op_array_params { - __le16 size /* array size in dwords */; - __le16 offset /* array start offset in dwords */; + __le16 size; + __le16 offset; }; /* Write init operation arguments */ union init_write_args { - __le32 inline_val; - __le32 zeros_count; - __le32 array_offset; - struct init_op_array_params runtime; + __le32 inline_val; + __le32 zeros_count; + __le32 array_offset; + struct init_op_array_params runtime; }; /* init operation: write */ struct init_write_op { __le32 data; -#define INIT_WRITE_OP_OP_MASK 0xF -#define INIT_WRITE_OP_OP_SHIFT 0 -#define INIT_WRITE_OP_SOURCE_MASK 0x7 -#define INIT_WRITE_OP_SOURCE_SHIFT 4 -#define INIT_WRITE_OP_RESERVED_MASK 0x1 -#define INIT_WRITE_OP_RESERVED_SHIFT 7 -#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 -#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 -#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF -#define INIT_WRITE_OP_ADDRESS_SHIFT 9 - union init_write_args args /* Write init operation arguments */; +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args; }; /* init operation: read */ struct init_read_op { __le32 op_data; -#define INIT_READ_OP_OP_MASK 0xF -#define INIT_READ_OP_OP_SHIFT 0 -#define INIT_READ_OP_POLL_TYPE_MASK 0xF -#define INIT_READ_OP_POLL_TYPE_SHIFT 4 -#define INIT_READ_OP_RESERVED_MASK 0x1 -#define INIT_READ_OP_RESERVED_SHIFT 8 -#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF -#define INIT_READ_OP_ADDRESS_SHIFT 9 +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 8 +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; + }; /* Init operations union */ union init_op { - struct init_raw_op raw /* raw init operation */; - struct init_write_op write /* write init operation */; - struct init_read_op read /* read init operation */; - struct init_if_mode_op if_mode /* if_mode init operation */; - struct init_if_phase_op if_phase /* if_phase init operation */; - struct init_callback_op callback /* callback init operation */; - struct init_delay_op delay /* delay init operation */; + struct init_raw_op raw; + struct init_write_op write; + struct init_read_op read; + struct init_if_mode_op if_mode; + struct init_if_phase_op if_phase; + struct init_callback_op callback; + struct init_delay_op delay; }; /* Init command operation types */ enum init_op_types { - INIT_OP_READ /* GRC read init command */, - INIT_OP_WRITE /* GRC write init command */, + INIT_OP_READ, + INIT_OP_WRITE, INIT_OP_IF_MODE, INIT_OP_IF_PHASE, - INIT_OP_DELAY /* delay init command */, - INIT_OP_CALLBACK /* callback init command */, + INIT_OP_DELAY, + INIT_OP_CALLBACK, MAX_INIT_OP_TYPES }; +/* init polling types */ enum init_poll_types { - INIT_POLL_NONE /* No polling */, - INIT_POLL_EQ /* init value is included in the init command */, - INIT_POLL_OR /* init value is all zeros */, - INIT_POLL_AND /* init value is an array of values */, + INIT_POLL_NONE, + INIT_POLL_EQ, + INIT_POLL_OR, + INIT_POLL_AND, MAX_INIT_POLL_TYPES }; /* init source types */ enum init_source_types { - INIT_SRC_INLINE /* init value is included in the init command */, - INIT_SRC_ZEROS /* init value is all zeros */, - INIT_SRC_ARRAY /* init value is an array of values */, - INIT_SRC_RUNTIME /* init value is provided during runtime */, + INIT_SRC_INLINE, + INIT_SRC_ZEROS, + INIT_SRC_ARRAY, + INIT_SRC_RUNTIME, MAX_INIT_SOURCE_TYPES }; /* Internal RAM Offsets macro data */ struct iro { - u32 base /* RAM field offset */; - u16 m1 /* multiplier 1 */; - u16 m2 /* multiplier 2 */; - u16 m3 /* multiplier 3 */; - u16 size /* RAM field size */; + __le32 base; + __le16 m1; + __le16 m2; + __le16 m3; + __le16 size; }; -/* QM per-port init parameters */ -struct init_qm_port_params { - u8 active /* Indicates if this port is active */; - u8 num_active_phys_tcs; - u16 num_pbf_cmd_lines; - u16 num_btb_blocks; - __le16 reserved; -}; - -/* QM per-PQ init parameters */ -struct init_qm_pq_params { - u8 vport_id /* VPORT ID */; - u8 tc_id /* TC ID */; - u8 wrr_group /* WRR group */; - u8 reserved; -}; +/** + * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct. + * + * @param p_hwfn + * @param results - Pointer to the attention read results + * + * @return error if one of the following holds: + * - the version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); -/* QM per-vport init parameters */ -struct init_qm_vport_params { - u32 vport_rl; - u16 vport_wfq; - u16 first_tx_pq_id[NUM_OF_TCS]; -}; +#define MAX_NAME_LEN 16 /* Win 2 */ #define GTT_BAR0_MAP_REG_IGU_CMD \ 0x00f000UL + /* Win 3 */ #define GTT_BAR0_MAP_REG_TSDM_RAM \ 0x010000UL + /* Win 4 */ #define GTT_BAR0_MAP_REG_MSDM_RAM \ 0x011000UL + /* Win 5 */ #define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \ 0x012000UL + /* Win 6 */ #define GTT_BAR0_MAP_REG_USDM_RAM \ 0x013000UL + /* Win 7 */ #define GTT_BAR0_MAP_REG_USDM_RAM_1024 \ 0x014000UL + /* Win 8 */ #define GTT_BAR0_MAP_REG_USDM_RAM_2048 \ 0x015000UL + /* Win 9 */ #define GTT_BAR0_MAP_REG_XSDM_RAM \ 0x016000UL + /* Win 10 */ #define GTT_BAR0_MAP_REG_YSDM_RAM \ 0x017000UL + /* Win 11 */ #define GTT_BAR0_MAP_REG_PSDM_RAM \ 0x018000UL @@ -1584,785 +1963,718 @@ struct init_qm_vport_params { * Returns the required host memory size in 4KB units. * Must be called before all QM init HSI functions. * - * @param pf_id - physical function ID - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF + * @param pf_id - physical function ID + * @param num_pf_cids - number of connections used by this PF + * @param num_vf_cids - number of connections used by VFs of this PF + * @param num_tids - number of tasks used by this PF + * @param num_pf_pqs - number of PQs used by this PF + * @param num_vf_pqs - number of PQs used by VFs of this PF * * @return The required host memory size in 4KB units. */ -u32 qed_qm_pf_mem_size(u8 pf_id, - u32 num_pf_cids, - u32 num_vf_cids, - u32 num_tids, - u16 num_pf_pqs, - u16 num_vf_pqs); +u32 qed_qm_pf_mem_size(u8 pf_id, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); struct qed_qm_common_rt_init_params { - u8 max_ports_per_engine; - u8 max_phys_tcs_per_port; - bool pf_rl_en; - bool pf_wfq_en; - bool vport_rl_en; - bool vport_wfq_en; - struct init_qm_port_params *port_params; + u8 max_ports_per_engine; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + struct init_qm_port_params *port_params; }; +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params); + +struct qed_qm_pf_rt_init_params { + u8 port_id; + u8 pf_id; + u8 max_phys_tcs_per_port; + bool is_first_pf; + u32 num_pf_cids; + u32 num_vf_cids; + u32 num_tids; + u16 start_pq; + u16 num_pf_pqs; + u16 num_vf_pqs; + u8 start_vport; + u8 num_vports; + u8 pf_wfq; + u32 pf_rl; + struct init_qm_pq_params *pq_params; + struct init_qm_vport_params *vport_params; +}; + +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); + /** - * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the - * engine phase. + * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF * * @param p_hwfn - * @param max_ports_per_engine - max number of ports per engine in HW - * @param max_phys_tcs_per_port - max number of physical TCs per port in HW - * @param pf_rl_en - enable per-PF rate limiters - * @param pf_wfq_en - enable per-PF WFQ - * @param vport_rl_en - enable per-VPORT rate limiters - * @param vport_wfq_en - enable per-VPORT WFQ - * @param port_params - array of size MAX_NUM_PORTS with - * arameters for each port + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_wfq - WFQ weight. Must be non-zero. * * @return 0 on success, -1 on error. */ -int qed_qm_common_rt_init( - struct qed_hwfn *p_hwfn, - struct qed_qm_common_rt_init_params *p_params); - -struct qed_qm_pf_rt_init_params { - u8 port_id; - u8 pf_id; - u8 max_phys_tcs_per_port; - bool is_first_pf; - u32 num_pf_cids; - u32 num_vf_cids; - u32 num_tids; - u16 start_pq; - u16 num_pf_pqs; - u16 num_vf_pqs; - u8 start_vport; - u8 num_vports; - u8 pf_wfq; - u32 pf_rl; - struct init_qm_pq_params *pq_params; - struct init_qm_vport_params *vport_params; -}; - -int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct qed_qm_pf_rt_init_params *p_params); +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_init_pf_rl Initializes the rate limit of the specified PF + * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF * * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_rl - rate limit in Mb/sec units * * @return 0 on success, -1 on error. */ -int qed_init_pf_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 pf_id, - u32 pf_rl); +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); /** - * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT + * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT * * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param vport_id - VPORT ID - * @param vport_rl - rate limit in Mb/sec units + * @param p_ptt - ptt window used for writing the registers + * @param first_tx_pq_id- An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @param vport_wfq - WFQ weight. Must be non-zero. * * @return 0 on success, -1 on error. */ +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); -int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 vport_id, - u32 vport_rl); +/** + * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param vport_id - VPORT ID + * @param vport_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int qed_init_vport_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl); /** * @brief qed_send_qm_stop_cmd Sends a stop command to the QM * * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers + * @param p_ptt * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. + * @param is_tx_pq - true for Tx PQs, false for Other PQs. + * @param start_pq - first PQ ID to stop + * @param num_pqs - Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occurred while waiting - * for QM command done. + * @return bool, true if successful, false if timeout occured while waiting for QM command done. */ +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs); -bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool is_release_cmd, - bool is_tx_pq, - u16 start_pq, - u16 num_pqs); - +/** + * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port + * + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - vxlan destination udp port. + */ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u16 dest_port); + struct qed_ptt *p_ptt, u16 dest_port); + +/** + * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param vxlan_enable - vxlan enable flag. + */ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable); + +/** + * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param eth_gre_enable - eth GRE enable enable flag. + * @param ip_gre_enable - IP GRE enable enable flag. + */ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, bool eth_gre_enable, - bool ip_gre_enable); + struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable); + +/** + * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port + * + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - geneve destination udp port. + */ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); + +/** + * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param eth_geneve_enable - eth GENEVE enable enable flag. + * @param ip_geneve_enable - IP GENEVE enable enable flag. + */ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, bool eth_geneve_enable, - bool ip_geneve_enable); - -/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) -/* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) -/* Tstorm ll2 port statistics */ -#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ - (IRO[2].base + ((port_id) * IRO[2].m1)) -#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) -/* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ - (IRO[3].base + ((vf_id) * IRO[3].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) -/* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) -/* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[5].size) -/* Ustorm Common Queue ring consumer */ -#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ - (IRO[6].base + ((global_queue_id) * IRO[6].m1)) -#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) -/* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) -/* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) -/* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) -/* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) -/* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) -/* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) -/* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ - (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size) -/* Tstorm LightL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) -/* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ - (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) -/* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ - (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) -/* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[17].base + ((stat_counter_id) * IRO[17].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) -/* Mstorm producers */ -#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1)) -#define MSTORM_PRODS_SIZE (IRO[18].size) -/* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) -/* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[20].base + ((stat_counter_id) * IRO[20].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) -/* Ustorm queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[21].base + ((queue_id) * IRO[21].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) -/* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ - (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) -/* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) -/* Tstorm Eth limit Rx rate */ -#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1)) -#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) -/* Ystorm queue zone */ -#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[25].base + ((queue_id) * IRO[25].m1)) -#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) -/* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[26].base + ((rss_id) * IRO[26].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) -/* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[27].base + ((rss_id) * IRO[27].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) -/* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[28].base + ((pf_id) * IRO[28].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) -/* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size) -/* Mstorm rq-cons of given queue-id */ -#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \ - (IRO[30].base + ((rq_queue_id) * IRO[30].m1)) -#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size) -/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */ -#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size) -/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */ -#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size) -/* Tstorm iSCSI RX stats */ -#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size) -/* Mstorm iSCSI RX stats */ -#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[34].base + ((pf_id) * IRO[34].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size) -/* Ustorm iSCSI RX stats */ -#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[35].base + ((pf_id) * IRO[35].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size) -/* Xstorm iSCSI TX stats */ -#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[36].base + ((pf_id) * IRO[36].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size) -/* Ystorm iSCSI TX stats */ -#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size) -/* Pstorm iSCSI TX stats */ -#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size) -/* Tstorm FCoE RX stats */ -#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size) -/* Mstorm FCoE RX stats */ -#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size) -/* Pstorm FCoE TX stats */ -#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size) -/* Pstorm RoCE statistics */ -#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ - (IRO[42].base + ((stat_counter_id) * IRO[42].m1)) -#define PSTORM_ROCE_STAT_SIZE (IRO[42].size) -/* Tstorm RoCE statistics */ -#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ - (IRO[43].base + ((stat_counter_id) * IRO[43].m1)) -#define TSTORM_ROCE_STAT_SIZE (IRO[43].size) - -static const struct iro iro_arr[44] = { - { 0x10, 0x0, 0x0, 0x0, 0x8 }, - { 0x47c8, 0x60, 0x0, 0x0, 0x60 }, - { 0x5e30, 0x20, 0x0, 0x0, 0x20 }, - { 0x510, 0x8, 0x0, 0x0, 0x4 }, - { 0x490, 0x8, 0x0, 0x0, 0x4 }, - { 0x10, 0x8, 0x0, 0x0, 0x2 }, - { 0x90, 0x8, 0x0, 0x0, 0x2 }, - { 0x4940, 0x0, 0x0, 0x0, 0x78 }, - { 0x3de0, 0x0, 0x0, 0x0, 0x78 }, - { 0x2998, 0x0, 0x0, 0x0, 0x78 }, - { 0x4750, 0x0, 0x0, 0x0, 0x78 }, - { 0x56d0, 0x0, 0x0, 0x0, 0x78 }, - { 0x7e50, 0x0, 0x0, 0x0, 0x78 }, - { 0x100, 0x8, 0x0, 0x0, 0x8 }, - { 0x5c10, 0x10, 0x0, 0x0, 0x10 }, - { 0xb508, 0x30, 0x0, 0x0, 0x30 }, - { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, - { 0x58a0, 0x40, 0x0, 0x0, 0x40 }, - { 0x200, 0x10, 0x0, 0x0, 0x8 }, - { 0xa230, 0x0, 0x0, 0x0, 0x4 }, - { 0x8058, 0x40, 0x0, 0x0, 0x30 }, - { 0xd00, 0x8, 0x0, 0x0, 0x8 }, - { 0x2b30, 0x80, 0x0, 0x0, 0x38 }, - { 0xa808, 0x0, 0x0, 0x0, 0xf0 }, - { 0xa8f8, 0x8, 0x0, 0x0, 0x8 }, - { 0x80, 0x8, 0x0, 0x0, 0x8 }, - { 0xac0, 0x8, 0x0, 0x0, 0x8 }, - { 0x2580, 0x8, 0x0, 0x0, 0x8 }, - { 0x2500, 0x8, 0x0, 0x0, 0x8 }, - { 0x440, 0x8, 0x0, 0x0, 0x2 }, - { 0x1800, 0x8, 0x0, 0x0, 0x2 }, - { 0x1a00, 0x10, 0x8, 0x0, 0x2 }, - { 0x640, 0x10, 0x8, 0x0, 0x2 }, - { 0xd9b8, 0x38, 0x0, 0x0, 0x24 }, - { 0x11048, 0x10, 0x0, 0x0, 0x8 }, - { 0x11678, 0x38, 0x0, 0x0, 0x18 }, - { 0xaec0, 0x30, 0x0, 0x0, 0x10 }, - { 0x8700, 0x28, 0x0, 0x0, 0x18 }, - { 0xec00, 0x10, 0x0, 0x0, 0x10 }, - { 0xde38, 0x40, 0x0, 0x0, 0x30 }, - { 0x121a8, 0x38, 0x0, 0x0, 0x8 }, - { 0xf068, 0x20, 0x0, 0x0, 0x20 }, - { 0x2b68, 0x80, 0x0, 0x0, 0x10 }, - { 0x4ab8, 0x10, 0x0, 0x0, 0x10 }, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable); + +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +#define TSTORM_PORT_STAT_OFFSET(port_id) \ + (IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + (IRO[3].base + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ + (IRO[4].base + (pf_id) * IRO[4].m1) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) +#define USTORM_EQE_CONS_OFFSET(pf_id) \ + (IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \ + (IRO[6].base + ((queue_zone_id) * IRO[6].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size) +#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \ + (IRO[7].base + ((queue_zone_id) * IRO[7].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size) +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[18].base + ((stat_counter_id) * IRO[18].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size) +#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \ + (IRO[19].base + ((queue_id) * IRO[19].m1)) +#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size) +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[21].base + ((pf_id) * IRO[21].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size) +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[22].size) +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[23].base + ((pf_id) * IRO[23].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size) +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[24].base + ((stat_counter_id) * IRO[24].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size) +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[25].base + ((pf_id) * IRO[25].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size) +#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \ + (IRO[26].base + ((ethtype) * IRO[26].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size) +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[28].base + ((pf_id) * IRO[28].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size) +#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[29].base + ((queue_id) * IRO[29].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size) + +static const struct iro iro_arr[46] = { + {0x0, 0x0, 0x0, 0x0, 0x8}, + {0x4cb0, 0x78, 0x0, 0x0, 0x78}, + {0x6318, 0x20, 0x0, 0x0, 0x20}, + {0xb00, 0x8, 0x0, 0x0, 0x4}, + {0xa80, 0x8, 0x0, 0x0, 0x4}, + {0x0, 0x8, 0x0, 0x0, 0x2}, + {0x80, 0x8, 0x0, 0x0, 0x4}, + {0x84, 0x8, 0x0, 0x0, 0x2}, + {0x4bc0, 0x0, 0x0, 0x0, 0x78}, + {0x3df0, 0x0, 0x0, 0x0, 0x78}, + {0x29b0, 0x0, 0x0, 0x0, 0x78}, + {0x4c38, 0x0, 0x0, 0x0, 0x78}, + {0x4a48, 0x0, 0x0, 0x0, 0x78}, + {0x7e48, 0x0, 0x0, 0x0, 0x78}, + {0xa28, 0x8, 0x0, 0x0, 0x8}, + {0x60f8, 0x10, 0x0, 0x0, 0x10}, + {0xb820, 0x30, 0x0, 0x0, 0x30}, + {0x95b8, 0x30, 0x0, 0x0, 0x30}, + {0x4c18, 0x80, 0x0, 0x0, 0x40}, + {0x1f8, 0x4, 0x0, 0x0, 0x4}, + {0xc9a8, 0x0, 0x0, 0x0, 0x4}, + {0x4c58, 0x80, 0x0, 0x0, 0x20}, + {0x8050, 0x40, 0x0, 0x0, 0x30}, + {0xe770, 0x60, 0x0, 0x0, 0x60}, + {0x2b48, 0x80, 0x0, 0x0, 0x38}, + {0xdf88, 0x78, 0x0, 0x0, 0x78}, + {0x1f8, 0x4, 0x0, 0x0, 0x4}, + {0xacf0, 0x0, 0x0, 0x0, 0xf0}, + {0xade0, 0x8, 0x0, 0x0, 0x8}, + {0x1f8, 0x8, 0x0, 0x0, 0x8}, + {0xac0, 0x8, 0x0, 0x0, 0x8}, + {0x2578, 0x8, 0x0, 0x0, 0x8}, + {0x24f8, 0x8, 0x0, 0x0, 0x8}, + {0x0, 0x8, 0x0, 0x0, 0x8}, + {0x200, 0x10, 0x8, 0x0, 0x8}, + {0xb78, 0x10, 0x8, 0x0, 0x2}, + {0xd888, 0x38, 0x0, 0x0, 0x24}, + {0x12120, 0x10, 0x0, 0x0, 0x8}, + {0x11b20, 0x38, 0x0, 0x0, 0x18}, + {0xa8c0, 0x30, 0x0, 0x0, 0x10}, + {0x86f8, 0x28, 0x0, 0x0, 0x18}, + {0xeff8, 0x10, 0x0, 0x0, 0x10}, + {0xdd08, 0x48, 0x0, 0x0, 0x38}, + {0xf460, 0x20, 0x0, 0x0, 0x20}, + {0x2b80, 0x80, 0x0, 0x0, 0x10}, + {0x5000, 0x10, 0x0, 0x0, 0x10}, }; /* Runtime array offsets */ -#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 -#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 -#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 -#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 -#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 -#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 -#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 -#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 -#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 -#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 -#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 -#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 -#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 -#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 -#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 -#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 -#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 -#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 -#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 -#define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 -#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 -#define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6667 -#define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6669 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703 -#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713 -#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129 -#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708 -#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29836 -#define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856 -#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29964 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29965 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29966 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995 -#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251 -#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30507 -#define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30764 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30766 -#define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782 -#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30798 -#define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30814 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816 -#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832 -#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30848 -#define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31008 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31009 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010 -#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31522 -#define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034 -#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 32546 -#define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33058 -#define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570 -#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922 - -#define RUNTIME_ARRAY_SIZE 33923 +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29837 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30508 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30765 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30767 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30799 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30815 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30849 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31009 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31010 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31523 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32547 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33059 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924 + +#define RUNTIME_ARRAY_SIZE 33925 /* The eth storm context for the Tstorm */ struct tstorm_eth_conn_st_ctx { @@ -2380,266 +2692,266 @@ struct xstorm_eth_conn_st_ctx { }; struct xstorm_eth_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 eth_state /* state */; - u8 flags0; -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */ -#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */ -#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */ -#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */ -#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */ -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 reserved0; + u8 eth_state; + u8 flags0; +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 u8 flags2; -#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 u8 flags3; -#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */ -#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 u8 flags5; -#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */ -#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */ -#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */ -#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */ -#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 u8 flags6; -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */ -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */ -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */ -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 u8 flags7; -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */ -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */ -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 u8 flags8; -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 u8 flags9; -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */ -#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */ -#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */ -#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */ -#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */ -#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */ -#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 u8 flags10; -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */ -#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */ -#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */ -#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */ -#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */ +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 #define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 u8 flags11; -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */ -#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */ -#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 u8 flags12; -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 u8 flags13; -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */ -#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */ -#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 u8 flags14; -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */ -#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */ -#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */ -#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */ -#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 - u8 edpm_event_id /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 word1 /* physical_q1 */; - __le16 edpm_num_bds /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_prod /* word4 */; - __le16 go_to_bd_cons /* word5 */; - __le16 conn_dpi /* conn_dpi */; - u8 byte3 /* byte3 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - u8 byte6 /* byte6 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* cf_array0 */; - __le32 reg6 /* cf_array1 */; - __le16 word7 /* word7 */; - __le16 word8 /* word8 */; - __le16 word9 /* word9 */; - __le16 word10 /* word10 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - __le32 reg9 /* reg9 */; - u8 byte7 /* byte7 */; - u8 byte8 /* byte8 */; - u8 byte9 /* byte9 */; - u8 byte10 /* byte10 */; - u8 byte11 /* byte11 */; - u8 byte12 /* byte12 */; - u8 byte13 /* byte13 */; - u8 byte14 /* byte14 */; - u8 byte15 /* byte15 */; - u8 byte16 /* byte16 */; - __le16 word11 /* word11 */; - __le32 reg10 /* reg10 */; - __le32 reg11 /* reg11 */; - __le32 reg12 /* reg12 */; - __le32 reg13 /* reg13 */; - __le32 reg14 /* reg14 */; - __le32 reg15 /* reg15 */; - __le32 reg16 /* reg16 */; - __le32 reg17 /* reg17 */; - __le32 reg18 /* reg18 */; - __le32 reg19 /* reg19 */; - __le16 word12 /* word12 */; - __le16 word13 /* word13 */; - __le16 word14 /* word14 */; - __le16 word15 /* word15 */; +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 quota; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 tx_class; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 byte16; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; }; /* The eth storm context for the Ystorm */ @@ -2648,220 +2960,220 @@ struct ystorm_eth_conn_st_ctx { }; struct ystorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 byte0; + u8 state; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ -#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ -#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - __le32 terminate_spqe /* reg0 */; - __le32 reg1 /* reg1 */; - __le16 tx_bd_cons_upd /* word1 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; - __le16 word4 /* word4 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 tx_q0_int_coallecing_timeset; + u8 byte3; + __le16 word0; + __le32 terminate_spqe; + __le32 reg1; + __le16 tx_bd_cons_upd; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; }; struct tstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 byte0; + u8 byte1; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 rx_bd_cons /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 rx_bd_prod /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 rx_bd_cons; + u8 byte4; + u8 byte5; + __le16 rx_bd_prod; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; }; struct ustorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ -#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ -#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 tx_bd_cons /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 tx_int_coallecing_timeset /* reg3 */; - __le16 tx_drv_bd_cons /* word2 */; - __le16 rx_drv_cqe_cons /* word3 */; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 tx_bd_cons; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 tx_int_coallecing_timeset; + __le16 tx_drv_bd_cons; + __le16 rx_drv_cqe_cons; }; /* The eth storm context for the Ustorm */ @@ -2876,47 +3188,75 @@ struct mstorm_eth_conn_st_ctx { /* eth connection context */ struct eth_conn_context { - struct tstorm_eth_conn_st_ctx tstorm_st_context; - struct regpair tstorm_st_padding[2]; - struct pstorm_eth_conn_st_ctx pstorm_st_context; - struct xstorm_eth_conn_st_ctx xstorm_st_context; - struct xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct ystorm_eth_conn_ag_ctx ystorm_ag_context; - struct tstorm_eth_conn_ag_ctx tstorm_ag_context; - struct ustorm_eth_conn_ag_ctx ustorm_ag_context; - struct ustorm_eth_conn_st_ctx ustorm_st_context; - struct mstorm_eth_conn_st_ctx mstorm_st_context; + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct pstorm_eth_conn_st_ctx pstorm_st_context; + struct xstorm_eth_conn_st_ctx xstorm_st_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct ustorm_eth_conn_st_ctx ustorm_st_context; + struct mstorm_eth_conn_st_ctx mstorm_st_context; }; +/* opcodes for the event ring */ +enum eth_event_opcode { + ETH_EVENT_UNUSED, + ETH_EVENT_VPORT_START, + ETH_EVENT_VPORT_UPDATE, + ETH_EVENT_VPORT_STOP, + ETH_EVENT_TX_QUEUE_START, + ETH_EVENT_TX_QUEUE_STOP, + ETH_EVENT_RX_QUEUE_START, + ETH_EVENT_RX_QUEUE_UPDATE, + ETH_EVENT_RX_QUEUE_STOP, + ETH_EVENT_FILTERS_UPDATE, + ETH_EVENT_RESERVED, + ETH_EVENT_RESERVED2, + ETH_EVENT_RESERVED3, + ETH_EVENT_RX_ADD_UDP_FILTER, + ETH_EVENT_RX_DELETE_UDP_FILTER, + ETH_EVENT_RESERVED4, + ETH_EVENT_RESERVED5, + MAX_ETH_EVENT_OPCODE +}; + +/* Classify rule types in E2/E3 */ enum eth_filter_action { + ETH_FILTER_ACTION_UNUSED, ETH_FILTER_ACTION_REMOVE, ETH_FILTER_ACTION_ADD, ETH_FILTER_ACTION_REMOVE_ALL, MAX_ETH_FILTER_ACTION }; +/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */ struct eth_filter_cmd { - u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; - u8 vport_id /* the vport id */; - u8 action /* filter command action: add/remove/replace */; - u8 reserved0; - __le32 vni; - __le16 mac_lsb; - __le16 mac_mid; - __le16 mac_msb; - __le16 vlan_id; + u8 type; + u8 vport_id; + u8 action; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; }; +/* $$KEEP_ENDIANNESS$$ */ struct eth_filter_cmd_header { - u8 rx; - u8 tx; - u8 cmd_cnt; - u8 assert_on_error; - u8 reserved1[4]; + u8 rx; + u8 tx; + u8 cmd_cnt; + u8 assert_on_error; + u8 reserved1[4]; }; +/* Ethernet filter types: mac/vlan/pair */ enum eth_filter_type { + ETH_FILTER_TYPE_UNUSED, ETH_FILTER_TYPE_MAC, ETH_FILTER_TYPE_VLAN, ETH_FILTER_TYPE_PAIR, @@ -2929,463 +3269,3515 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* Ethernet Ramrod Command IDs */ enum eth_ramrod_cmd_id { ETH_RAMROD_UNUSED, - ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, - ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, - ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, - ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, - ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, - ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, - ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, - ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, - ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, - ETH_RAMROD_RESERVED, - ETH_RAMROD_RESERVED2, - ETH_RAMROD_RESERVED3, - ETH_RAMROD_RESERVED4, - ETH_RAMROD_RESERVED5, - ETH_RAMROD_RESERVED6, - ETH_RAMROD_RESERVED7, - ETH_RAMROD_RESERVED8, + ETH_RAMROD_VPORT_START, + ETH_RAMROD_VPORT_UPDATE, + ETH_RAMROD_VPORT_STOP, + ETH_RAMROD_RX_QUEUE_START, + ETH_RAMROD_RX_QUEUE_STOP, + ETH_RAMROD_TX_QUEUE_START, + ETH_RAMROD_TX_QUEUE_STOP, + ETH_RAMROD_FILTERS_UPDATE, + ETH_RAMROD_RX_QUEUE_UPDATE, + ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION, + ETH_RAMROD_RX_ADD_OPENFLOW_FILTER, + ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER, + ETH_RAMROD_RX_ADD_UDP_FILTER, + ETH_RAMROD_RX_DELETE_UDP_FILTER, + ETH_RAMROD_RX_CREATE_GFT_ACTION, + ETH_RAMROD_GFT_UPDATE_FILTER, MAX_ETH_RAMROD_CMD_ID }; +/* return code from eth sp ramrods */ +struct eth_return_code { + u8 value; +#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F +#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 +#define ETH_RETURN_CODE_RESERVED_MASK 0x3 +#define ETH_RETURN_CODE_RESERVED_SHIFT 5 +#define ETH_RETURN_CODE_RX_TX_MASK 0x1 +#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +}; + +/* What to do in case an error occurs */ enum eth_tx_err { - ETH_TX_ERR_DROP /* Drop erronous packet. */, + ETH_TX_ERR_DROP, ETH_TX_ERR_ASSERT_MALICIOUS, MAX_ETH_TX_ERR }; +/* Array of the different error type behaviors */ struct eth_tx_err_vals { __le16 values; -#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 -#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 -#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 -#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 -#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 -#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 -#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 -#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 -#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 -#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 -#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 -#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 -#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 -#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 -#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF -#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 -}; - +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + +/* vport rss configuration data */ struct eth_vport_rss_config { __le16 capabilities; -#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 -#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 -#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 -#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 -#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 -#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 -#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF -#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 - u8 rss_id; - u8 rss_mode; - u8 update_rss_key; - u8 update_rss_ind_table; - u8 update_rss_capabilities; - u8 tbl_size; - __le32 reserved2[2]; - __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; - __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; - __le32 reserved3[2]; -}; - +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 + u8 rss_id; + u8 rss_mode; + u8 update_rss_key; + u8 update_rss_ind_table; + u8 update_rss_capabilities; + u8 tbl_size; + __le32 reserved2[2]; + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; + __le32 reserved3[2]; +}; + +/* eth vport RSS mode */ enum eth_vport_rss_mode { ETH_VPORT_RSS_MODE_DISABLED, ETH_VPORT_RSS_MODE_REGULAR, MAX_ETH_VPORT_RSS_MODE }; +/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */ struct eth_vport_rx_mode { __le16 state; -#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 -#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 -#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 -#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 -#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 -#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF -#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 __le16 reserved2[3]; }; +/* Command for setting tpa parameters */ struct eth_vport_tpa_param { - u8 tpa_ipv4_en_flg; - u8 tpa_ipv6_en_flg; - u8 tpa_ipv4_tunn_en_flg; - u8 tpa_ipv6_tunn_en_flg; - u8 tpa_pkt_split_flg; - u8 tpa_hdr_data_split_flg; - u8 tpa_gro_consistent_flg; - u8 tpa_max_aggs_num; - u16 tpa_max_size; - u16 tpa_min_size_to_start; - u16 tpa_min_size_to_cont; - u8 max_buff_num; - u8 reserved; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + + u8 tpa_max_aggs_num; + + __le16 tpa_max_size; + __le16 tpa_min_size_to_start; + + __le16 tpa_min_size_to_cont; + u8 max_buff_num; + u8 reserved; }; +/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */ struct eth_vport_tx_mode { __le16 state; -#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 -#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 -#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 -#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 -#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 -#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 -#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF -#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 __le16 reserved2[3]; }; +/* Ramrod data for rx queue start ramrod */ struct rx_queue_start_ramrod_data { - __le16 rx_queue_id; - __le16 num_of_pbl_pages; - __le16 bd_max_bytes; - __le16 sb_id; - u8 sb_index; - u8 vport_id; - u8 default_rss_queue_flg; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 stats_counter_id; - u8 pin_context; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - u8 pxp_st_hint; - __le16 pxp_st_index; - u8 pmd_mode; - u8 notify_en; - u8 toggle_val; - u8 reserved[7]; - __le16 reserved1; - struct regpair cqe_pbl_addr; - struct regpair bd_base; - struct regpair reserved2; + __le16 rx_queue_id; + __le16 num_of_pbl_pages; + __le16 bd_max_bytes; + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 default_rss_queue_flg; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 stats_counter_id; + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + u8 pxp_st_hint; + + __le16 pxp_st_index; + u8 pmd_mode; + + u8 notify_en; + u8 toggle_val; + + u8 vf_rx_prod_index; + + u8 reserved[6]; + __le16 reserved1; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair reserved2; }; +/* Ramrod data for rx queue start ramrod */ struct rx_queue_stop_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 vport_id; - u8 reserved[3]; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[3]; }; +/* Ramrod data for rx queue update ramrod */ struct rx_queue_update_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 vport_id; - u8 reserved[4]; - u8 reserved1; - u8 reserved2; - u8 reserved3; - __le16 reserved4; - __le16 reserved5; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[4]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; struct regpair reserved6; }; -struct tx_queue_start_ramrod_data { - __le16 sb_id; - u8 sb_index; - u8 vport_id; - u8 reserved0; - u8 stats_counter_id; - __le16 qm_pq_id; - u8 flags; -#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 -#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 -#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 -#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 -#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 - u8 pxp_st_hint; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - __le16 pxp_st_index; - __le16 comp_agg_size; - __le16 queue_zone_id; - __le16 test_dup_count; - __le16 pbl_size; - __le16 tx_queue_id; - struct regpair pbl_base_addr; - struct regpair bd_cons_address; +/* Ramrod data for rx Add UDP Filter */ +struct rx_udp_filter_data { + __le16 action_icid; + __le16 vlan_id; + u8 ip_type; + u8 tenant_id_exists; + __le16 reserved1; + __le32 ip_dst_addr[4]; + __le32 ip_src_addr[4]; + __le16 udp_dst_port; + __le16 udp_src_port; + __le32 tenant_id; }; +/* Ramrod data for rx queue start ramrod */ +struct tx_queue_start_ramrod_data { + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 reserved0; + u8 stats_counter_id; + __le16 qm_pq_id; + u8 flags; +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + __le16 comp_agg_size; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + __le16 tx_queue_id; + + struct regpair pbl_base_addr; + struct regpair bd_cons_address; +}; + +/* Ramrod data for tx queue stop ramrod */ struct tx_queue_stop_ramrod_data { __le16 reserved[4]; }; +/* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { - struct eth_filter_cmd_header filter_cmd_hdr; - struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; + struct eth_filter_cmd_header filter_cmd_hdr; + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; }; +/* Ramrod data for vport start ramrod */ struct vport_start_ramrod_data { - u8 vport_id; - u8 sw_fid; - __le16 mtu; - u8 drop_ttl0_en; - u8 inner_vlan_removal_en; - struct eth_vport_rx_mode rx_mode; - struct eth_vport_tx_mode tx_mode; - struct eth_vport_tpa_param tpa_param; - __le16 default_vlan; - u8 tx_switching_en; - u8 anti_spoofing_en; - u8 default_vlan_en; - u8 handle_ptp_pkts; - u8 silent_vlan_removal_en; - u8 untagged; - struct eth_tx_err_vals tx_err_behav; - u8 zero_placement_offset; - u8 reserved[7]; -}; - + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + __le16 default_vlan; + u8 tx_switching_en; + u8 anti_spoofing_en; + + u8 default_vlan_en; + + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + u8 untagged; + struct eth_tx_err_vals tx_err_behav; + + u8 zero_placement_offset; + u8 ctl_frame_mac_check_en; + u8 ctl_frame_ethtype_check_en; + u8 reserved[5]; +}; + +/* Ramrod data for vport stop ramrod */ struct vport_stop_ramrod_data { - u8 vport_id; - u8 reserved[7]; + u8 vport_id; + u8 reserved[7]; }; +/* Ramrod data for vport update ramrod */ struct vport_update_ramrod_data_cmn { - u8 vport_id; - u8 update_rx_active_flg; - u8 rx_active_flg; - u8 update_tx_active_flg; - u8 tx_active_flg; - u8 update_rx_mode_flg; - u8 update_tx_mode_flg; - u8 update_approx_mcast_flg; - u8 update_rss_flg; - u8 update_inner_vlan_removal_en_flg; - u8 inner_vlan_removal_en; - u8 update_tpa_param_flg; - u8 update_tpa_en_flg; - u8 update_tx_switching_en_flg; - u8 tx_switching_en; - u8 update_anti_spoofing_en_flg; - u8 anti_spoofing_en; - u8 update_handle_ptp_pkts; - u8 handle_ptp_pkts; - u8 update_default_vlan_en_flg; - u8 default_vlan_en; - u8 update_default_vlan_flg; - __le16 default_vlan; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - u8 silent_vlan_removal_en; - u8 update_mtu_flg; - __le16 mtu; - u8 reserved[2]; + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_tx_switching_en_flg; + + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + + u8 default_vlan_en; + + u8 update_default_vlan_flg; + + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 update_mtu_flg; + + __le16 mtu; + u8 reserved[2]; }; struct vport_update_ramrod_mcast { __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; }; +/* Ramrod data for vport update ramrod */ struct vport_update_ramrod_data { - struct vport_update_ramrod_data_cmn common; - struct eth_vport_rx_mode rx_mode; - struct eth_vport_tx_mode tx_mode; - struct eth_vport_tpa_param tpa_param; - struct vport_update_ramrod_mcast approx_mcast; - struct eth_vport_rss_config rss_config; + struct vport_update_ramrod_data_cmn common; + + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config; +}; + +struct mstorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct rdma_close_func_ramrod_data { + u8 cnq_start_offset; + u8 num_cnqs; + u8 vf_id; + u8 vf_valid; + u8 reserved[4]; +}; + +struct rdma_cnq_params { + __le16 sb_num; + u8 sb_index; + u8 num_pbl_pages; + __le32 reserved; + struct regpair pbl_base_addr; + __le16 queue_zone_num; + u8 reserved1[6]; +}; + +struct rdma_create_cq_ramrod_data { + struct regpair cq_handle; + struct regpair pbl_addr; + __le32 max_cqes; + __le16 pbl_num_pages; + __le16 dpi; + u8 is_two_level_pbl; + u8 cnq_id; + u8 pbl_log_page_size; + u8 toggle_bit; + __le16 int_timeout; + __le16 reserved1; }; -#define VF_MAX_STATIC 192 /* In case of K2 */ +struct rdma_deregister_tid_ramrod_data { + __le32 itid; + __le32 reserved; +}; -#define MCP_GLOB_PATH_MAX 2 -#define MCP_PORT_MAX 2 /* Global */ -#define MCP_GLOB_PORT_MAX 4 /* Global */ -#define MCP_GLOB_FUNC_MAX 16 /* Global */ +struct rdma_destroy_cq_output_params { + __le16 cnq_num; + __le16 reserved0; + __le32 reserved1; +}; + +struct rdma_destroy_cq_ramrod_data { + struct regpair output_params_addr; +}; + +enum rdma_event_opcode { + RDMA_EVENT_UNUSED, + RDMA_EVENT_FUNC_INIT, + RDMA_EVENT_FUNC_CLOSE, + RDMA_EVENT_REGISTER_MR, + RDMA_EVENT_DEREGISTER_MR, + RDMA_EVENT_CREATE_CQ, + RDMA_EVENT_RESIZE_CQ, + RDMA_EVENT_DESTROY_CQ, + RDMA_EVENT_CREATE_SRQ, + RDMA_EVENT_MODIFY_SRQ, + RDMA_EVENT_DESTROY_SRQ, + MAX_RDMA_EVENT_OPCODE +}; + +enum rdma_fw_return_code { + RDMA_RETURN_OK = 0, + RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR, + RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR, + RDMA_RETURN_RESIZE_CQ_ERR, + RDMA_RETURN_NIG_DRAIN_REQ, + MAX_RDMA_FW_RETURN_CODE +}; + +struct rdma_init_func_hdr { + u8 cnq_start_offset; + u8 num_cnqs; + u8 cq_ring_mode; + u8 cnp_vlan_priority; + __le32 cnp_send_timeout; + u8 cnp_dscp; + u8 vf_id; + u8 vf_valid; + u8 reserved[5]; +}; + +struct rdma_init_func_ramrod_data { + struct rdma_init_func_hdr params_header; + struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; +}; + +enum rdma_ramrod_cmd_id { + RDMA_RAMROD_UNUSED, + RDMA_RAMROD_FUNC_INIT, + RDMA_RAMROD_FUNC_CLOSE, + RDMA_RAMROD_REGISTER_MR, + RDMA_RAMROD_DEREGISTER_MR, + RDMA_RAMROD_CREATE_CQ, + RDMA_RAMROD_RESIZE_CQ, + RDMA_RAMROD_DESTROY_CQ, + RDMA_RAMROD_CREATE_SRQ, + RDMA_RAMROD_MODIFY_SRQ, + RDMA_RAMROD_DESTROY_SRQ, + MAX_RDMA_RAMROD_CMD_ID +}; + +struct rdma_register_tid_ramrod_data { + __le32 flags; +#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF +#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31 + u8 flags1; +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 + u8 flags2; +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 + u8 key; + u8 length_hi; + u8 vf_id; + u8 vf_valid; + __le16 pd; + __le32 length_lo; + __le32 itid; + __le32 reserved2; + struct regpair va; + struct regpair pbl_base; + struct regpair dif_error_addr; + struct regpair dif_runt_addr; + __le32 reserved3[2]; +}; + +struct rdma_resize_cq_output_params { + __le32 old_cq_cons; + __le32 old_cq_prod; +}; + +struct rdma_resize_cq_ramrod_data { + u8 flags; +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2 + u8 pbl_log_page_size; + __le16 pbl_num_pages; + __le32 max_cqes; + struct regpair pbl_addr; + struct regpair output_params_addr; +}; + +struct rdma_srq_context { + struct regpair temp[8]; +}; + +struct rdma_srq_create_ramrod_data { + struct regpair pbl_base_addr; + __le16 pages_in_srq_pbl; + __le16 pd_id; + struct rdma_srq_id srq_id; + __le16 page_size; + __le16 reserved1; + __le32 reserved2; + struct regpair producers_addr; +}; + +struct rdma_srq_destroy_ramrod_data { + struct rdma_srq_id srq_id; + __le32 reserved; +}; + +struct rdma_srq_modify_ramrod_data { + struct rdma_srq_id srq_id; + __le32 wqe_limit; +}; + +struct ystorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct ystorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 msem_ctx_upd_seq; + u8 flags0; +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct mstorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct ustorm_rdma_task_st_ctx { + struct regpair temp[2]; +}; + +struct ustorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 reg2; + __le32 dif_runt_value; + __le32 reg4; + __le32 reg5; +}; + +struct rdma_task_context { + struct ystorm_rdma_task_st_ctx ystorm_st_context; + struct ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct tdif_task_context tdif_context; + struct mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_st_ctx mstorm_st_context; + struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +enum rdma_tid_type { + RDMA_TID_REGISTERED_MR, + RDMA_TID_FMR, + RDMA_TID_MW_TYPE1, + RDMA_TID_MW_TYPE2A, + MAX_RDMA_TID_TYPE +}; + +struct mstorm_rdma_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct tstorm_rdma_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct tstorm_rdma_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +struct ustorm_rdma_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 conn_dpi; + __le16 word1; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 int_timeout; + __le16 word3; +}; + +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 + u8 flags1; +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 + u8 flags7; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; +}; + +struct xstorm_rdma_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; + __le32 reg5; + __le32 reg6; +}; + +struct ystorm_rdma_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct mstorm_roce_conn_st_ctx { + struct regpair temp[6]; +}; + +struct pstorm_roce_conn_st_ctx { + struct regpair temp[16]; +}; + +struct ystorm_roce_conn_st_ctx { + struct regpair temp[2]; +}; + +struct xstorm_roce_conn_st_ctx { + struct regpair temp[22]; +}; + +struct tstorm_roce_conn_st_ctx { + struct regpair temp[30]; +}; + +struct ustorm_roce_conn_st_ctx { + struct regpair temp[12]; +}; + +struct roce_conn_context { + struct ystorm_roce_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_roce_conn_st_ctx pstorm_st_context; + struct xstorm_roce_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_rdma_conn_ag_ctx xstorm_ag_context; + struct tstorm_rdma_conn_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct tstorm_roce_conn_st_ctx tstorm_st_context; + struct mstorm_roce_conn_st_ctx mstorm_st_context; + struct ustorm_roce_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + +struct roce_create_qp_req_ramrod_data { + __le16 flags; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 + u8 max_ord; + u8 traffic_class; + u8 hop_limit; + u8 orq_num_pages; + __le16 p_key; + __le32 flow_label; + __le32 dst_qp_id; + __le32 ack_timeout_val; + __le32 initial_psn; + __le16 mtu; + __le16 pd; + __le16 sq_num_pages; + __le16 reseved2; + struct regpair sq_pbl_addr; + struct regpair orq_pbl_addr; + __le16 local_mac_addr[3]; + __le16 remote_mac_addr[3]; + __le16 vlan_id; + __le16 udp_src_port; + __le32 src_gid[4]; + __le32 dst_gid[4]; + struct regpair qp_handle_for_cqe; + struct regpair qp_handle_for_async; + u8 stats_counter_id; + u8 reserved3[7]; + __le32 cq_cid; + __le16 physical_queue0; + __le16 dpi; +}; + +struct roce_create_qp_resp_ramrod_data { + __le16 flags; +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 + u8 max_ird; + u8 traffic_class; + u8 hop_limit; + u8 irq_num_pages; + __le16 p_key; + __le32 flow_label; + __le32 dst_qp_id; + u8 stats_counter_id; + u8 reserved1; + __le16 mtu; + __le32 initial_psn; + __le16 pd; + __le16 rq_num_pages; + struct rdma_srq_id srq_id; + struct regpair rq_pbl_addr; + struct regpair irq_pbl_addr; + __le16 local_mac_addr[3]; + __le16 remote_mac_addr[3]; + __le16 vlan_id; + __le16 udp_src_port; + __le32 src_gid[4]; + __le32 dst_gid[4]; + struct regpair qp_handle_for_cqe; + struct regpair qp_handle_for_async; + __le32 reserved2[2]; + __le32 cq_cid; + __le16 physical_queue0; + __le16 dpi; +}; + +struct roce_destroy_qp_req_output_params { + __le32 num_bound_mw; + __le32 reserved; +}; + +struct roce_destroy_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +struct roce_destroy_qp_resp_output_params { + __le32 num_invalidated_mw; + __le32 reserved; +}; + +struct roce_destroy_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + +enum roce_event_opcode { + ROCE_EVENT_CREATE_QP = 11, + ROCE_EVENT_MODIFY_QP, + ROCE_EVENT_QUERY_QP, + ROCE_EVENT_DESTROY_QP, + MAX_ROCE_EVENT_OPCODE +}; + +struct roce_modify_qp_req_ramrod_data { + __le16 flags; +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13 + u8 fields; +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 + u8 max_ord; + u8 traffic_class; + u8 hop_limit; + __le16 p_key; + __le32 flow_label; + __le32 ack_timeout_val; + __le16 mtu; + __le16 reserved2; + __le32 reserved3[3]; + __le32 src_gid[4]; + __le32 dst_gid[4]; +}; + +struct roce_modify_qp_resp_ramrod_data { + __le16 flags; +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10 + u8 fields; +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 + u8 max_ird; + u8 traffic_class; + u8 hop_limit; + __le16 p_key; + __le32 flow_label; + __le16 mtu; + __le16 reserved2; + __le32 src_gid[4]; + __le32 dst_gid[4]; +}; + +struct roce_query_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 +}; + +struct roce_query_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +struct roce_query_qp_resp_output_params { + __le32 psn; + __le32 err_flag; +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 +}; + +struct roce_query_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + +enum roce_ramrod_cmd_id { + ROCE_RAMROD_CREATE_QP = 11, + ROCE_RAMROD_MODIFY_QP, + ROCE_RAMROD_QUERY_QP, + ROCE_RAMROD_DESTROY_QP, + MAX_ROCE_RAMROD_CMD_ID +}; + +struct mstorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct mstorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +enum roce_flavor { + PLAIN_ROCE /* RoCE v1 */ , + RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ , + RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ , + MAX_ROCE_FLAVOR +}; + +struct tstorm_roce_req_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 snd_nxt_psn; + __le32 snd_max_psn; + __le32 orq_prod; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 tx_cqe_error_type; + u8 orq_cache_idx; + __le16 snd_sq_cons_th; + u8 byte4; + u8 byte5; + __le16 snd_sq_cons; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct tstorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 state; + u8 flags0; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 psn_and_rxmit_id_echo; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 tx_async_error_type; + u8 byte3; + __le16 rq_cons; + u8 byte4; + u8 byte5; + __le16 rq_prod; + __le16 conn_dpi; + __le16 irq_cons; + __le32 num_invlidated_mw; + __le32 reg10; +}; + +struct ustorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct ustorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct xstorm_roce_req_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 sq_cmp_cons; + __le16 sq_cons; + __le16 sq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 lsn; + __le32 ssn; + __le32 snd_una_psn; + __le32 snd_nxt_psn; + __le32 reg4; + __le32 orq_cons_th; + __le32 orq_cons; +}; + +struct xstorm_roce_resp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 irq_prod; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 irq_cons; + u8 rxmit_opcode; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 rxmit_psn_and_id; + __le32 rxmit_bytes_length; + __le32 psn; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 msn_and_syndrome; +}; + +struct ystorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct ystorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct ystorm_iscsi_conn_st_ctx { + __le32 reserved[4]; +}; + +struct pstorm_iscsi_tcp_conn_st_ctx { + __le32 tcp[32]; + __le32 iscsi[4]; +}; + +struct xstorm_iscsi_tcp_conn_st_ctx { + __le32 reserved_iscsi[40]; + __le32 reserved_tcp[4]; +}; + +struct xstorm_iscsi_conn_ag_ctx { + u8 cdu_validation; + u8 state; + u8 flags0; +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 + u8 flags2; +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 + u8 flags6; +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 dummy_dorq_var; + __le16 sq_cons; + __le16 sq_prod; + __le16 word5; + __le16 slow_io_total_data_tx_update; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 reg4; + __le32 reg5; + __le32 hq_scan_next_relevant_ack; + __le16 r2tq_prod; + __le16 r2tq_cons; + __le16 hq_prod; + __le16 hq_cons; + __le32 remain_seq; + __le32 bytes_to_next_pdu; + __le32 hq_tcp_seq; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 byte16; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 exp_stat_sn; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_iscsi_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; +}; + +struct ustorm_iscsi_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct tstorm_iscsi_conn_st_ctx { + __le32 reserved[40]; +}; + +struct mstorm_iscsi_conn_ag_ctx { + u8 reserved; + u8 state; + u8 flags0; +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct mstorm_iscsi_tcp_conn_st_ctx { + __le32 reserved_tcp[20]; + __le32 reserved_iscsi[8]; +}; + +struct ustorm_iscsi_conn_st_ctx { + __le32 reserved[52]; +}; + +struct iscsi_conn_context { + struct ystorm_iscsi_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct pb_context xpb2_context; + struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct pb_context upb_context; + struct tstorm_iscsi_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; + struct ustorm_iscsi_conn_st_ctx ustorm_st_context; +}; + +struct iscsi_init_ramrod_params { + struct iscsi_spe_func_init iscsi_init_spe; + struct tcp_init_params tcp_init; +}; + +struct ystorm_iscsi_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; +#define VF_MAX_STATIC 192 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 +#define MCP_GLOB_PORT_MAX 4 +#define MCP_GLOB_FUNC_MAX 16 -typedef u32 offsize_t; /* In DWORDS !!! */ /* Offset from the beginning of the MCP scratchpad */ -#define OFFSIZE_OFFSET_SHIFT 0 -#define OFFSIZE_OFFSET_MASK 0x0000ffff +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff /* Size of specific element (not the whole array if any) */ -#define OFFSIZE_SIZE_SHIFT 16 -#define OFFSIZE_SIZE_MASK 0xffff0000 +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 -/* SECTION_OFFSET is calculating the offset in bytes out of offsize */ -#define SECTION_OFFSET(_offsize) ((((_offsize & \ - OFFSIZE_OFFSET_MASK) >> \ - OFFSIZE_OFFSET_SHIFT) << 2)) +#define SECTION_OFFSET(_offsize) ((((_offsize & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) -/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */ -#define QED_SECTION_SIZE(_offsize) (((_offsize & \ - OFFSIZE_SIZE_MASK) >> \ - OFFSIZE_SIZE_SHIFT) << 2) +#define QED_SECTION_SIZE(_offsize) (((_offsize & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) -/* SECTION_ADDR returns the GRC addr of a section, given offsize and index - * within section. - */ -#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ - SECTION_OFFSET(_offsize) + \ - (QED_SECTION_SIZE(_offsize) * idx)) +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET(_offsize) + \ + (QED_SECTION_SIZE(_offsize) * idx)) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + (_pub_base + offsetof(struct mcp_public_data, sections[_section])) -/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. - * Use offsetof, since the OFFSETUP collide with the firmware definition - */ -#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \ - offsetof(struct \ - mcp_public_data, \ - sections[_section])) /* PHY configuration */ -struct pmm_phy_cfg { - u32 speed; -#define PMM_SPEED_AUTONEG 0 - - u32 pause; /* bitmask */ -#define PMM_PAUSE_NONE 0x0 -#define PMM_PAUSE_AUTONEG 0x1 -#define PMM_PAUSE_RX 0x2 -#define PMM_PAUSE_TX 0x4 - - u32 adv_speed; /* Default should be the speed_cap_mask */ - u32 loopback_mode; -#define PMM_LOOPBACK_NONE 0 -#define PMM_LOOPBACK_INT_PHY 1 -#define PMM_LOOPBACK_EXT_PHY 2 -#define PMM_LOOPBACK_EXT 3 -#define PMM_LOOPBACK_MAC 4 - - /* features */ +struct eth_phy_cfg { + u32 speed; +#define ETH_SPEED_AUTONEG 0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + u32 loopback_mode; +#define ETH_LOOPBACK_NONE (0) +#define ETH_LOOPBACK_INT_PHY (1) +#define ETH_LOOPBACK_EXT_PHY (2) +#define ETH_LOOPBACK_EXT (3) +#define ETH_LOOPBACK_MAC (4) + u32 feature_config_flags; +#define ETH_EEE_MODE_ADV_LPI (1 << 0) }; struct port_mf_cfg { - u32 dynamic_cfg; /* device control channel */ -#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff -#define PORT_MF_CFG_OV_TAG_SHIFT 0 -#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK - - u32 reserved[1]; -}; - -/* DO NOT add new fields in the middle - * MUST be synced with struct pmm_stats_map - */ -struct pmm_stats { - u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/ - u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/ - u64 r255; - u64 r511; - u64 r1023; - u64 r1518; - u64 r1522; - u64 r2047; - u64 r4095; - u64 r9216; - u64 r16383; - u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/ - u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/ - u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/ - u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/ - u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/ - u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */ - u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/ - u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */ - u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */ - u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */ - u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */ - u64 t127; - u64 t255; - u64 t511; - u64 t1023; - u64 t1518; - u64 t2047; - u64 t4095; - u64 t9216; - u64 t16383; - u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */ - u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */ - u64 tlpiec; - u64 tncl; - u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */ - u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */ - u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */ - u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */ - u64 rxpok; - u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */ - u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */ - u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */ - u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */ - u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */ + u32 dynamic_cfg; +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +struct eth_stats { + u64 r64; + u64 r127; + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + u64 rfcs; + u64 rxcf; + u64 rxpf; + u64 rxpp; + u64 raln; + u64 rfcr; + u64 rovr; + u64 rjbr; + u64 rund; + u64 rfrg; + u64 t64; + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + u64 txpf; + u64 txpp; + u64 tlpiec; + u64 tncl; + u64 rbyte; + u64 rxuca; + u64 rxmca; + u64 rxbca; + u64 rxpok; + u64 tbyte; + u64 txuca; + u64 txmca; + u64 txbca; + u64 txcf; }; struct brb_stats { - u64 brb_truncate[8]; - u64 brb_discard[8]; + u64 brb_truncate[8]; + u64 brb_discard[8]; }; struct port_stats { - struct brb_stats brb; - struct pmm_stats pmm; + struct brb_stats brb; + struct eth_stats eth; }; -#define CMT_TEAM0 0 -#define CMT_TEAM1 1 -#define CMT_TEAM_MAX 2 - struct couple_mode_teaming { u8 port_cmt[MCP_GLOB_PORT_MAX]; -#define PORT_CMT_IN_TEAM BIT(0) +#define PORT_CMT_IN_TEAM (1 << 0) -#define PORT_CMT_PORT_ROLE BIT(1) -#define PORT_CMT_PORT_INACTIVE (0 << 1) -#define PORT_CMT_PORT_ACTIVE BIT(1) +#define PORT_CMT_PORT_ROLE (1 << 1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE (1 << 1) -#define PORT_CMT_TEAM_MASK BIT(2) -#define PORT_CMT_TEAM0 (0 << 2) -#define PORT_CMT_TEAM1 BIT(2) +#define PORT_CMT_TEAM_MASK (1 << 2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 (1 << 2) }; -/************************************** -* LLDP and DCBX HSI structures -**************************************/ -#define LLDP_CHASSIS_ID_STAT_LEN 4 -#define LLDP_PORT_ID_STAT_LEN 4 -#define DCBX_MAX_APP_PROTOCOL 32 -#define MAX_SYSTEM_LLDP_TLV_DATA 32 +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 -enum lldp_agent_e { +enum _lldp_agent { LLDP_NEAREST_BRIDGE = 0, LLDP_NEAREST_NON_TPMR_BRIDGE, LLDP_NEAREST_CUSTOMER_BRIDGE, @@ -3394,690 +6786,525 @@ enum lldp_agent_e { struct lldp_config_params_s { u32 config; -#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff -#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 -#define LLDP_CONFIG_HOLD_MASK 0x00000f00 -#define LLDP_CONFIG_HOLD_SHIFT 8 -#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 -#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 -#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 -#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 -#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 -#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 - u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; }; struct lldp_status_params_s { - u32 prefix_seq_num; - u32 status; /* TBD */ - - /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */ - u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; - - /* Holds remote Port ID TLV header, subtype and 9B of payload. */ - u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; - u32 suffix_seq_num; + u32 prefix_seq_num; + u32 status; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; }; struct dcbx_ets_feature { u32 flags; -#define DCBX_ETS_ENABLED_MASK 0x00000001 -#define DCBX_ETS_ENABLED_SHIFT 0 -#define DCBX_ETS_WILLING_MASK 0x00000002 -#define DCBX_ETS_WILLING_SHIFT 1 -#define DCBX_ETS_ERROR_MASK 0x00000004 -#define DCBX_ETS_ERROR_SHIFT 2 -#define DCBX_ETS_CBS_MASK 0x00000008 -#define DCBX_ETS_CBS_SHIFT 3 -#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 -#define DCBX_ETS_MAX_TCS_SHIFT 4 - u32 pri_tc_tbl[1]; -#define DCBX_ISCSI_OOO_TC 4 -#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) - u32 tc_bw_tbl[2]; - u32 tc_tsa_tbl[2]; -#define DCBX_ETS_TSA_STRICT 0 -#define DCBX_ETS_TSA_CBS 1 -#define DCBX_ETS_TSA_ETS 2 +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 +#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00 +#define DCBX_ISCSI_OOO_TC_SHIFT 8 + u32 pri_tc_tbl[1]; +#define DCBX_ISCSI_OOO_TC (4) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 }; struct dcbx_app_priority_entry { u32 entry; -#define DCBX_APP_PRI_MAP_MASK 0x000000ff -#define DCBX_APP_PRI_MAP_SHIFT 0 -#define DCBX_APP_PRI_0 0x01 -#define DCBX_APP_PRI_1 0x02 -#define DCBX_APP_PRI_2 0x04 -#define DCBX_APP_PRI_3 0x08 -#define DCBX_APP_PRI_4 0x10 -#define DCBX_APP_PRI_5 0x20 -#define DCBX_APP_PRI_6 0x40 -#define DCBX_APP_PRI_7 0x80 -#define DCBX_APP_SF_MASK 0x00000300 -#define DCBX_APP_SF_SHIFT 8 -#define DCBX_APP_SF_ETHTYPE 0 -#define DCBX_APP_SF_PORT 1 -#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 -#define DCBX_APP_PROTOCOL_ID_SHIFT 16 -}; - -/* FW structure in BE */ +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + struct dcbx_app_priority_feature { u32 flags; -#define DCBX_APP_ENABLED_MASK 0x00000001 -#define DCBX_APP_ENABLED_SHIFT 0 -#define DCBX_APP_WILLING_MASK 0x00000002 -#define DCBX_APP_WILLING_SHIFT 1 -#define DCBX_APP_ERROR_MASK 0x00000004 -#define DCBX_APP_ERROR_SHIFT 2 -/* Not in use - * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00 - * #define DCBX_APP_DEFAULT_PRI_SHIFT 8 - */ -#define DCBX_APP_MAX_TCS_MASK 0x0000f000 -#define DCBX_APP_MAX_TCS_SHIFT 12 -#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 -#define DCBX_APP_NUM_ENTRIES_SHIFT 16 +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; }; -/* FW structure in BE */ struct dcbx_features { - /* PG feature */ struct dcbx_ets_feature ets; + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 - /* PFC feature */ - u32 pfc; -#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff -#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 -#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 - -#define DCBX_PFC_FLAGS_MASK 0x0000ff00 -#define DCBX_PFC_FLAGS_SHIFT 8 -#define DCBX_PFC_CAPS_MASK 0x00000f00 -#define DCBX_PFC_CAPS_SHIFT 8 -#define DCBX_PFC_MBC_MASK 0x00004000 -#define DCBX_PFC_MBC_SHIFT 14 -#define DCBX_PFC_WILLING_MASK 0x00008000 -#define DCBX_PFC_WILLING_SHIFT 15 -#define DCBX_PFC_ENABLED_MASK 0x00010000 -#define DCBX_PFC_ENABLED_SHIFT 16 -#define DCBX_PFC_ERROR_MASK 0x00020000 -#define DCBX_PFC_ERROR_SHIFT 17 - - /* APP feature */ struct dcbx_app_priority_feature app; }; struct dcbx_local_params { u32 config; -#define DCBX_CONFIG_VERSION_MASK 0x00000003 -#define DCBX_CONFIG_VERSION_SHIFT 0 -#define DCBX_CONFIG_VERSION_DISABLED 0 -#define DCBX_CONFIG_VERSION_IEEE 1 -#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_STATIC 4 - u32 flags; - struct dcbx_features features; + u32 flags; + struct dcbx_features features; }; struct dcbx_mib { - u32 prefix_seq_num; - u32 flags; - struct dcbx_features features; - u32 suffix_seq_num; + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; }; struct lldp_system_tlvs_buffer_s { - u16 valid; - u16 length; - u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; + u16 valid; + u16 length; + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; }; -/**************************************/ -/* */ -/* P U B L I C G L O B A L */ -/* */ -/**************************************/ -struct public_global { - u32 max_path; -#define MAX_PATH_BIG_BEAR 2 -#define MAX_PATH_K2 1 - u32 max_ports; -#define MODE_1P 1 -#define MODE_2P 2 -#define MODE_3P 3 -#define MODE_4P 4 - u32 debug_mb_offset; - u32 phymod_dbg_mb_offset; - struct couple_mode_teaming cmt; - s32 internal_temperature; - u32 mfw_ver; - u32 running_bundle_id; +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_SHIFT 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; }; -/**************************************/ -/* */ -/* P U B L I C P A T H */ -/* */ -/**************************************/ +struct public_global { + u32 max_path; + u32 max_ports; + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; +}; -/**************************************************************************** -* Shared Memory 2 Region * -****************************************************************************/ -/* The fw_flr_ack is actually built in the following way: */ -/* 8 bit: PF ack */ -/* 128 bit: VF ack */ -/* 8 bit: ios_dis_ack */ -/* In order to maintain endianity in the mailbox hsi, we want to keep using */ -/* u32. The fw must have the VF right after the PF since this is how it */ -/* access arrays(it expects always the VF to reside after the PF, and that */ -/* makes the calculation much easier for it. ) */ -/* In order to answer both limitations, and keep the struct small, the code */ -/* will abuse the structure defined here to achieve the actual partition */ -/* above */ -/****************************************************************************/ struct fw_flr_mb { - u32 aggint; - u32 opgen_addr; - u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */ -#define ACCUM_ACK_PF_BASE 0 -#define ACCUM_ACK_PF_SHIFT 0 - -#define ACCUM_ACK_VF_BASE 8 -#define ACCUM_ACK_VF_SHIFT 3 - -#define ACCUM_ACK_IOV_DIS_BASE 256 -#define ACCUM_ACK_IOV_DIS_SHIFT 8 + u32 aggint; + u32 opgen_addr; + u32 accum_ack; }; struct public_path { - struct fw_flr_mb flr_mb; - u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; - - u32 process_kill; -#define PROCESS_KILL_COUNTER_MASK 0x0000ffff -#define PROCESS_KILL_COUNTER_SHIFT 0 -#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 -#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 #define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) }; -/**************************************/ -/* */ -/* P U B L I C P O R T */ -/* */ -/**************************************/ - -/**************************************************************************** -* Driver <-> FW Mailbox * -****************************************************************************/ - struct public_port { - u32 validity_map; /* 0x0 (4*2 = 0x8) */ - - /* validity bits */ -#define MCP_VALIDITY_PCI_CFG 0x00100000 -#define MCP_VALIDITY_MB 0x00200000 -#define MCP_VALIDITY_DEV_INFO 0x00400000 -#define MCP_VALIDITY_RESERVED 0x00000007 - - /* One licensing bit should be set */ -#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 -#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 -#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 -#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 - - /* Active MFW */ -#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 -#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 -#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040 -#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + u32 validity_map; u32 link_status; -#define LINK_STATUS_LINK_UP \ - 0x00000001 -#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e -#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) -#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) -#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) - -#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 - -#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 -#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 - -#define LINK_STATUS_PFC_ENABLED \ - 0x00000100 -#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 -#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 -#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 -#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 -#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 -#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 -#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 -#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 - -#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 -#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) -#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) -#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) -#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) - -#define LINK_STATUS_SFP_TX_FAULT \ - 0x00100000 -#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 -#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 - - u32 link_status1; - u32 ext_phy_fw_version; - u32 drv_phy_cfg_addr; - - u32 port_stx; - - u32 stat_nig_timer; - - struct port_mf_cfg port_mf_config; - struct port_stats stats; - - u32 media_type; -#define MEDIA_UNSPECIFIED 0x0 -#define MEDIA_SFPP_10G_FIBER 0x1 -#define MEDIA_XFP_FIBER 0x2 -#define MEDIA_DA_TWINAX 0x3 -#define MEDIA_BASE_T 0x4 -#define MEDIA_SFP_1G_FIBER 0x5 -#define MEDIA_MODULE_FIBER 0x6 -#define MEDIA_KR 0xf0 -#define MEDIA_NOT_PRESENT 0xff +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) + +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 + +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) + +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff u32 lfa_status; -#define LFA_LINK_FLAP_REASON_OFFSET 0 -#define LFA_LINK_FLAP_REASON_MASK 0x000000ff -#define LFA_NO_REASON (0 << 0) -#define LFA_LINK_DOWN BIT(0) -#define LFA_FORCE_INIT BIT(1) -#define LFA_LOOPBACK_MISMATCH BIT(2) -#define LFA_SPEED_MISMATCH BIT(3) -#define LFA_FLOW_CTRL_MISMATCH BIT(4) -#define LFA_ADV_SPEED_MISMATCH BIT(5) -#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 -#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 -#define LINK_FLAP_COUNT_OFFSET 16 -#define LINK_FLAP_COUNT_MASK 0x00ff0000 - - u32 link_change_count; - - /* LLDP params */ - struct lldp_config_params_s lldp_config_params[ - LLDP_MAX_LLDP_AGENTS]; - struct lldp_status_params_s lldp_status_params[ - LLDP_MAX_LLDP_AGENTS]; - struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + u32 link_change_count; + + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; /* DCBX related MIB */ - struct dcbx_local_params local_admin_dcbx_mib; - struct dcbx_mib remote_dcbx_mib; - struct dcbx_mib operational_dcbx_mib; + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; - u32 fc_npiv_nvram_tbl_addr; - u32 fc_npiv_nvram_tbl_size; - u32 transceiver_data; -#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF -#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000 -#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001 -}; + u32 reserved[2]; + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 -/**************************************/ -/* */ -/* P U B L I C F U N C */ -/* */ -/**************************************/ + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; +}; struct public_func { - u32 iscsi_boot_signature; - u32 iscsi_boot_block_offset; - - u32 mtu_size; - u32 c2s_pcp_map_lower; - u32 c2s_pcp_map_upper; - u32 c2s_pcp_map_default; - u32 reserved[4]; - - u32 config; - - /* E/R/I/D */ - /* function 0 of each port cannot be hidden */ -#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 -#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 - -#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 -#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 -#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 + u32 reserved0[2]; + + u32 mtu_size; + + u32 reserved[7]; + + u32 config; +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 #define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 -#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 #define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 -#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 - /* MINBW, MAXBW */ - /* value range - 0..100, increments in 1 % */ -#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 -#define FUNC_MF_CFG_MIN_BW_SHIFT 8 -#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 -#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 -#define FUNC_MF_CFG_MAX_BW_SHIFT 16 -#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 - u32 status; -#define FUNC_STATUS_VLINK_DOWN 0x00000001 + u32 status; +#define FUNC_STATUS_VLINK_DOWN 0x00000001 - u32 mac_upper; /* MAC */ -#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff -#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 -#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK - u32 mac_lower; -#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + u32 mac_upper; +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff - u32 fcoe_wwn_port_name_upper; - u32 fcoe_wwn_port_name_lower; + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; - u32 fcoe_wwn_node_name_upper; - u32 fcoe_wwn_node_name_lower; + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; - u32 ovlan_stag; /* tags */ -#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff -#define FUNC_MF_CFG_OV_STAG_SHIFT 0 -#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + u32 ovlan_stag; +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK - u32 pf_allocation; /* vf per pf */ + u32 pf_allocation; - u32 preserve_data; /* Will be used bt CCM */ + u32 preserve_data; - u32 driver_last_activity_ts; + u32 driver_last_activity_ts; - u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */ + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; - u32 drv_id; -#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff -#define DRV_ID_PDA_COMP_VER_SHIFT 0 + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 -#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 -#define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) -#define DRV_ID_DRV_TYPE_MASK 0x7f000000 -#define DRV_ID_DRV_TYPE_SHIFT 24 -#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 -#define DRV_ID_DRV_INIT_HW_SHIFT 31 -#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) }; -/**************************************/ -/* */ -/* P U B L I C M B */ -/* */ -/**************************************/ -/* This is the only section that the driver can write to, and each */ -/* Basically each driver request to set feature parameters, - * will be done using a different command, which will be linked - * to a specific data structure from the union below. - * For huge strucuture, the common blank structure should be used. - */ - struct mcp_mac { - u32 mac_upper; /* Upper 16 bits are always zeroes */ - u32 mac_lower; + u32 mac_upper; + u32 mac_lower; }; struct mcp_val64 { - u32 lo; - u32 hi; + u32 lo; + u32 hi; }; struct mcp_file_att { - u32 nvm_start_addr; - u32 len; + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; + u32 nvm_start_addr; + u32 len; }; #define MCP_DRV_VER_STR_SIZE 16 #define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) #define MCP_DRV_NVM_BUF_LEN 32 struct drv_version_stc { - u32 version; - u8 name[MCP_DRV_VER_STR_SIZE - 4]; + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +#define MAX_NUM_OF_SENSORS 7 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; }; union drv_union_data { - u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; - struct mcp_mac wol_mac; + u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; + struct mcp_mac wol_mac; + + struct eth_phy_cfg drv_phy_cfg; - struct pmm_phy_cfg drv_phy_cfg; + struct mcp_val64 val64; - struct mcp_val64 val64; /* For PHY / AVS commands */ + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; - u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + struct mcp_file_att file_att; - struct mcp_file_att file_att; + u32 ack_vf_disabled[VF_MAX_STATIC / 32]; - u32 ack_vf_disabled[VF_MAX_STATIC / 32]; + struct drv_version_stc drv_version; - struct drv_version_stc drv_version; + struct lan_stats_stc lan_stats; + u64 reserved_stats[11]; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; }; struct public_drv_mb { u32 drv_mb_header; -#define DRV_MSG_CODE_MASK 0xffff0000 -#define DRV_MSG_CODE_LOAD_REQ 0x10000000 -#define DRV_MSG_CODE_LOAD_DONE 0x11000000 -#define DRV_MSG_CODE_INIT_HW 0x12000000 -#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 -#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 -#define DRV_MSG_CODE_INIT_PHY 0x22000000 - /* Params - FORCE - Reinitialize the link regardless of LFA */ - /* - DONT_CARE - Don't flap the link if up */ -#define DRV_MSG_CODE_LINK_RESET 0x23000000 - -#define DRV_MSG_CODE_SET_LLDP 0x24000000 -#define DRV_MSG_CODE_SET_DCBX 0x25000000 +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_LOAD_REQ 0x10000000 +#define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 +#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 +#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 +#define DRV_MSG_CODE_INIT_PHY 0x22000000 +#define DRV_MSG_CODE_LINK_RESET 0x23000000 +#define DRV_MSG_CODE_SET_DCBX 0x25000000 + #define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 -#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 - -#define DRV_MSG_CODE_INITIATE_FLR 0x02000000 -#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 -#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 -#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 -#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 -#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 -#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 -#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000 -#define DRV_MSG_CODE_MCP_RESET 0x00090000 -#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000 -#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000 -#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000 -#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000 -#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 -#define DRV_MSG_CODE_SET_VERSION 0x000f0000 - -#define DRV_MSG_CODE_BIST_TEST 0x001e0000 -#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 - -#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 +#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 +#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 +#define DRV_MSG_CODE_MCP_RESET 0x00090000 +#define DRV_MSG_CODE_SET_VERSION 0x000f0000 + +#define DRV_MSG_CODE_BIST_TEST 0x001e0000 +#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 + +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 + + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 - /* UNLOAD_REQ params */ -#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 -#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 -#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 -#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 - - /* UNLOAD_DONE_params */ -#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 - - /* INIT_PHY params */ -#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 -#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 - - /* LLDP / DCBX params*/ -#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 -#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 -#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 -#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 -#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 -#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 - -#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF -#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 - -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 -#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 - -#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0 -#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF -#define DRV_MB_PARAM_NVM_LEN_SHIFT 24 -#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 - -#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0 -#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF -#define DRV_MB_PARAM_PHY_LANE_SHIFT 16 -#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000 -#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29 -#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000 -#define DRV_MB_PARAM_PHY_PORT_SHIFT 30 -#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000 - -/* configure vf MSIX params*/ -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 -#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 -#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 - -#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 -#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 -#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 - -#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 -#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 -#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 - -#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 -#define DRV_MB_PARAM_BIST_RC_PASSED 1 -#define DRV_MB_PARAM_BIST_RC_FAILED 2 -#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 - -#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 -#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF u32 fw_mb_header; -#define FW_MSG_CODE_MASK 0xffff0000 -#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 -#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 -#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 -#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 -#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 -#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 -#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 -#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 -#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000 -#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000 -#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000 -#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000 -#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000 -#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000 -#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000 -#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 -#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 -#define FW_MSG_CODE_FLR_ACK 0x02000000 -#define FW_MSG_CODE_FLR_NACK 0x02100000 - -#define FW_MSG_CODE_NVM_OK 0x00010000 -#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000 -#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000 -#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000 -#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000 -#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000 -#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000 -#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000 -#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000 -#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000 -#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000 -#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000 -#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000 -#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000 -#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000 -#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000 -#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000 -#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000 -#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 -#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000 -#define FW_MSG_CODE_PHY_OK 0x00110000 -#define FW_MSG_CODE_PHY_ERROR 0x00120000 -#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 -#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 -#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 -#define FW_MSG_CODE_OK 0x00160000 - -#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff - - u32 fw_mb_param; - - u32 drv_pulse_mb; -#define DRV_PULSE_SEQ_MASK 0x00007fff -#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 -#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 +#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 +#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 +#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 +#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 +#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 +#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 +#define FW_MSG_CODE_OK 0x00160000 + +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + u32 mcp_pulse_mb; -#define MCP_PULSE_SEQ_MASK 0x00007fff -#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 -#define MCP_EVENT_MASK 0xffff0000 -#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 union drv_union_data union_data; }; -/* MFW - DRV MB */ -/********************************************************************** -* Description -* Incremental Aggregative -* 8-bit MFW counter per message -* 8-bit ack-counter per message -* Capabilities -* Provides up to 256 aggregative message per type -* Provides 4 message types in dword -* Message type pointers to byte offset -* Backward Compatibility by using sizeof for the counters. -* No lock requires for 32bit messages -* Limitations: -* In case of messages greater than 32bit, a dedicated mechanism(e.g lock) -* is required to prevent data corruption. -**********************************************************************/ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LINK_CHANGE, MFW_DRV_MSG_FLR_FW_ACK_FAILED, @@ -4085,37 +7312,33 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LLDP_DATA_UPDATED, MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, - MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_RESERVED4, MFW_DRV_MSG_BW_UPDATE, - MFW_DRV_MSG_S_TAG_UPDATE, - MFW_DRV_MSG_GET_LAN_STATS, - MFW_DRV_MSG_GET_FCOE_STATS, - MFW_DRV_MSG_GET_ISCSI_STATS, - MFW_DRV_MSG_GET_RDMA_STATS, - MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_BW_UPDATE5, + MFW_DRV_MSG_BW_UPDATE6, + MFW_DRV_MSG_BW_UPDATE7, + MFW_DRV_MSG_BW_UPDATE8, + MFW_DRV_MSG_BW_UPDATE9, + MFW_DRV_MSG_BW_UPDATE10, MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_BW_UPDATE11, MFW_DRV_MSG_MAX }; -#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) -#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) -#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) -#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) +#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) struct public_mfw_mb { - u32 sup_msgs; - u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; - u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; }; -/**************************************/ -/* */ -/* P U B L I C D A T A */ -/* */ -/**************************************/ enum public_sections { - PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */ - PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */ + PUBLIC_DRV_MB, + PUBLIC_MFW_MB, PUBLIC_GLOBAL, PUBLIC_PATH, PUBLIC_PORT, @@ -4123,1080 +7346,179 @@ enum public_sections { PUBLIC_MAX_SECTIONS }; -struct drv_ver_info_stc { - u32 ver; - u8 name[32]; -}; - struct mcp_public_data { - /* The sections fields is an array */ - u32 num_sections; - offsize_t sections[PUBLIC_MAX_SECTIONS]; - struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; - struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; - struct public_global global; - struct public_path path[MCP_GLOB_PATH_MAX]; - struct public_port port[MCP_GLOB_PORT_MAX]; - struct public_func func[MCP_GLOB_FUNC_MAX]; - struct drv_ver_info_stc drv_info; + u32 num_sections; + u32 sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; }; struct nvm_cfg_mac_address { - u32 mac_addr_hi; -#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF -#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 - - u32 mac_addr_lo; + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + u32 mac_addr_lo; }; -/****************************************** -* nvm_cfg1 structs -******************************************/ - struct nvm_cfg1_glob { - u32 generic_cont0; /* 0x0 */ -#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F -#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0 -#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0 -#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1 -#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2 -#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3 -#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 -#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 -#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 -#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 -#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 -#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 -#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 -#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0 -#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1 -#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000 -#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13 -#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000 -#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0 -#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1 -#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000 -#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30 -#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0 -#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0 -#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1 - - u32 engineering_change[3]; /* 0x4 */ - - u32 manufacturing_id; /* 0x10 */ - - u32 serial_number[4]; /* 0x14 */ - - u32 pcie_cfg; /* 0x24 */ -#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003 -#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0 -#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0 -#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1 -#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0 -#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2 -#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0 -#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1 -#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0 -#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2 -#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000 -#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21 -#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000 -#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29 - - u32 mgmt_traffic; /* 0x28 */ -#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001 -#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0 -#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0 -#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1 -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1 -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00 -#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9 -#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000 -#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1 -#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2 - - u32 core_cfg; /* 0x2C */ -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5 -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC -#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100 -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8 -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0 -#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0 -#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1 -#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00 -#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10 -#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000 -#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18 -#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000 -#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26 -#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0 -#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1 -#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0 -#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1 - - u32 e_lane_cfg1; /* 0x30 */ -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 - - u32 e_lane_cfg2; /* 0x34 */ -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 -#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00 -#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8 -#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0 -#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1 -#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2 -#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000 -#define NVM_CFG1_GLOB_NCSI_OFFSET 12 -#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0 -#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1 - - u32 f_lane_cfg1; /* 0x38 */ -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F -#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 -#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 -#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 -#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 -#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 -#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 -#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 -#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 - - u32 f_lane_cfg2; /* 0x3C */ -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 -#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 -#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 -#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 -#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 -#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 -#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 -#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 -#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 - - u32 eagle_preemphasis; /* 0x40 */ -#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 - - u32 eagle_driver_current; /* 0x44 */ -#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 - - u32 falcon_preemphasis; /* 0x48 */ -#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 - - u32 falcon_driver_current; /* 0x4C */ -#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 - - u32 pci_id; /* 0x50 */ -#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF -#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0 - - u32 pci_subsys_id; /* 0x54 */ -#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF -#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0 -#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000 -#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16 - - u32 bar; /* 0x58 */ -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9 -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE -#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9 -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE -#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF -#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00 -#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8 -#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0 -#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1 -#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2 -#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3 -#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4 -#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5 -#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6 -#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7 -#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8 -#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9 -#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA -#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB -#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC -#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD -#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE -#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF - - u32 eagle_txfir_main; /* 0x5C */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 - - u32 eagle_txfir_post; /* 0x60 */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 - - u32 falcon_txfir_main; /* 0x64 */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 - - u32 falcon_txfir_post; /* 0x68 */ -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF -#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 -#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 - - u32 manufacture_ver; /* 0x6C */ -#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F -#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0 -#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0 -#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6 -#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000 -#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12 -#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000 -#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18 -#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000 -#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24 - - u32 manufacture_time; /* 0x70 */ -#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F -#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0 -#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0 -#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6 -#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000 -#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12 - - u32 led_global_settings; /* 0x74 */ -#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F -#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0 -#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0 -#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4 -#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00 -#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8 -#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000 -#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12 - - u32 generic_cont1; /* 0x78 */ -#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF -#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0 - - u32 mbi_version; /* 0x7C */ -#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF -#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 -#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 -#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 -#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 - - u32 mbi_date; /* 0x80 */ - - u32 misc_sig; /* 0x84 */ - - /* Define the GPIO mapping to switch i2c mux */ -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19 -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F -#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 - u32 device_capabilities; /* 0x88 */ -#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 - u32 power_dissipated; /* 0x8C */ - u32 power_consumed; /* 0x90 */ - u32 efi_version; /* 0x94 */ - u32 reserved[42]; /* 0x98 */ + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + u32 mbi_version; + u32 mbi_date; + u32 misc_sig; + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_network_modes_capability; + u32 reserved[41]; }; struct nvm_cfg1_path { - u32 reserved[30]; /* 0x0 */ + u32 reserved[30]; }; struct nvm_cfg1_port { - u32 reserved__m_relocated_to_option_123; /* 0x0 */ - u32 reserved__m_relocated_to_option_124; /* 0x4 */ - u32 generic_cont0; /* 0x8 */ -#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF -#define NVM_CFG1_PORT_LED_MODE_OFFSET 0 -#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0 -#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1 -#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2 -#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3 -#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4 -#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5 -#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6 -#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7 -#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8 -#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9 -#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA -#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB -#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC -#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD -#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE -#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF -#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00 -#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8 -#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 -#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 -#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 -#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 -#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 -#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 -#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 - u32 pcie_cfg; /* 0xC */ -#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 -#define NVM_CFG1_PORT_RESERVED15_OFFSET 0 - - u32 features; /* 0x10 */ -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001 -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0 -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0 -#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0 -#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1 - - u32 speed_cap_mask; /* 0x14 */ -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20 -#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40 - - u32 link_settings; /* 0x18 */ -#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F -#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2 -#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0 -#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1 - - u32 phy_cfg; /* 0x1C */ -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8 -#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 -#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 -#define NVM_CFG1_PORT_AN_MODE_OFFSET 24 -#define NVM_CFG1_PORT_AN_MODE_NONE 0x0 -#define NVM_CFG1_PORT_AN_MODE_CL73 0x1 -#define NVM_CFG1_PORT_AN_MODE_CL37 0x2 -#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3 -#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4 -#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5 -#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6 - - u32 mgmt_traffic; /* 0x20 */ -#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F -#define NVM_CFG1_PORT_RESERVED61_OFFSET 0 - - u32 ext_phy; /* 0x24 */ -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0 -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0 -#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1 -#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00 -#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 - - u32 mba_cfg1; /* 0x28 */ -#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 -#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 -#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 -#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 -#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 -#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0 -#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0 -#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 -#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 -#define NVM_CFG1_PORT_RESERVED5_OFFSET 9 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 -#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 - - u32 mba_cfg2; /* 0x2C */ -#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF -#define NVM_CFG1_PORT_RESERVED65_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 -#define NVM_CFG1_PORT_RESERVED66_OFFSET 16 - - u32 vf_cfg; /* 0x30 */ -#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF -#define NVM_CFG1_PORT_RESERVED8_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 -#define NVM_CFG1_PORT_RESERVED6_OFFSET 16 - - struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ - - u32 led_port_settings; /* 0x3C */ -#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF -#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0 -#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00 -#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8 -#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000 -#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20 -#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40 - - u32 transceiver_00; /* 0x40 */ - - /* Define for mapping of transceiver signal module absent */ -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19 -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F -#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20 - /* Define the GPIO mux settings to switch i2c mux to this port */ -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00 -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8 -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000 -#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12 - - u32 reserved[133]; /* 0x44 */ + u32 reserved__m_relocated_to_option_123; + u32 reserved__m_relocated_to_option_124; + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + u32 pcie_cfg; + u32 features; + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 + u32 phy_cfg; + u32 mgmt_traffic; + u32 ext_phy; + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + u32 board_cfg; + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + u32 reserved[116]; }; struct nvm_cfg1_func { - struct nvm_cfg_mac_address mac_address; /* 0x0 */ - - u32 rsrv1; /* 0x8 */ -#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF -#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16 - - u32 rsrv2; /* 0xC */ -#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF -#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16 - - u32 device_id; /* 0x10 */ -#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF -#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 - - u32 cmn_cfg; /* 0x14 */ -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4 -#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 -#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 -#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 -#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 -#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19 -#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0 -#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1 -#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2 -#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3 -#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000 -#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0 -#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1 - - u32 pci_cfg; /* 0x18 */ -#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F -#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0 -#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80 -#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7 -#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000 -#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14 -#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0 -#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1 -#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2 -#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3 -#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4 -#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5 -#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6 -#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7 -#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8 -#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9 -#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA -#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB -#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC -#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD -#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE -#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF -#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000 -#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18 - - struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ - - struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ - u32 preboot_generic_cfg; /* 0x2C */ - u32 reserved[8]; /* 0x30 */ + struct nvm_cfg_mac_address mac_address; + u32 rsrv1; + u32 rsrv2; + u32 device_id; + u32 cmn_cfg; + u32 pci_cfg; + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; + u32 preboot_generic_cfg; + u32 reserved[8]; }; struct nvm_cfg1 { - struct nvm_cfg1_glob glob; /* 0x0 */ - - struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */ - - struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */ - - struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */ -}; - -/****************************************** -* nvm_cfg structs -******************************************/ - -enum nvm_cfg_sections { - NVM_CFG_SECTION_NVM_CFG1, - NVM_CFG_SECTION_MAX -}; - -struct nvm_cfg { - u32 num_sections; - u32 sections_offset[NVM_CFG_SECTION_MAX]; - struct nvm_cfg1 cfg1; -}; - -#define PORT_0 0 -#define PORT_1 1 -#define PORT_2 2 -#define PORT_3 3 - -extern struct spad_layout g_spad; - -#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ - -#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE)) - -#define TO_OFFSIZE(_offset, _size) \ - (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \ - (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT)) - -enum spad_sections { - SPAD_SECTION_TRACE, - SPAD_SECTION_NVM_CFG, - SPAD_SECTION_PUBLIC, - SPAD_SECTION_PRIVATE, - SPAD_SECTION_MAX -}; - -struct spad_layout { - struct nvm_cfg nvm_cfg; - struct mcp_public_data public_data; + struct nvm_cfg1_glob glob; + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; }; - -#define CRC_MAGIC_VALUE 0xDEBB20E3 -#define CRC32_POLYNOMIAL 0xEDB88320 -#define NVM_CRC_SIZE (sizeof(u32)) - -enum nvm_sw_arbitrator { - NVM_SW_ARB_HOST, - NVM_SW_ARB_MCP, - NVM_SW_ARB_UART, - NVM_SW_ARB_RESERVED -}; - -/**************************************************************************** -* Boot Strap Region * -****************************************************************************/ -struct legacy_bootstrap_region { - u32 magic_value; -#define NVM_MAGIC_VALUE 0x669955aa - u32 sram_start_addr; - u32 code_len; /* boot code length (in dwords) */ - u32 code_start_addr; - u32 crc; /* 32-bit CRC */ -}; - -/**************************************************************************** -* Directories Region * -****************************************************************************/ -struct nvm_code_entry { - u32 image_type; /* Image type */ - u32 nvm_start_addr; /* NVM address of the image */ - u32 len; /* Include CRC */ - u32 sram_start_addr; - u32 sram_run_addr; /* Relevant in case of MIM only */ -}; - -enum nvm_image_type { - NVM_TYPE_TIM1 = 0x01, - NVM_TYPE_TIM2 = 0x02, - NVM_TYPE_MIM1 = 0x03, - NVM_TYPE_MIM2 = 0x04, - NVM_TYPE_MBA = 0x05, - NVM_TYPE_MODULES_PN = 0x06, - NVM_TYPE_VPD = 0x07, - NVM_TYPE_MFW_TRACE1 = 0x08, - NVM_TYPE_MFW_TRACE2 = 0x09, - NVM_TYPE_NVM_CFG1 = 0x0a, - NVM_TYPE_L2B = 0x0b, - NVM_TYPE_DIR1 = 0x0c, - NVM_TYPE_EAGLE_FW1 = 0x0d, - NVM_TYPE_FALCON_FW1 = 0x0e, - NVM_TYPE_PCIE_FW1 = 0x0f, - NVM_TYPE_HW_SET = 0x10, - NVM_TYPE_LIM = 0x11, - NVM_TYPE_AVS_FW1 = 0x12, - NVM_TYPE_DIR2 = 0x13, - NVM_TYPE_CCM = 0x14, - NVM_TYPE_EAGLE_FW2 = 0x15, - NVM_TYPE_FALCON_FW2 = 0x16, - NVM_TYPE_PCIE_FW2 = 0x17, - NVM_TYPE_AVS_FW2 = 0x18, - - NVM_TYPE_MAX, -}; - -#define MAX_NVM_DIR_ENTRIES 200 - -struct nvm_dir { - s32 seq; -#define NVM_DIR_NEXT_MFW_MASK 0x00000001 -#define NVM_DIR_SEQ_MASK 0xfffffffe -#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) - -#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK) - - u32 num_images; - u32 rsrv; - struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ -}; - -#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ - (_num_images - \ - 1) * sizeof(struct nvm_code_entry) + \ - NVM_CRC_SIZE) - -struct nvm_vpd_image { - u32 format_revision; -#define VPD_IMAGE_VERSION 1 - - /* This array length depends on the number of VPD fields */ - u8 vpd_data[1]; -}; - -/**************************************************************************** -* NVRAM FULL MAP * -****************************************************************************/ -#define DIR_ID_1 (0) -#define DIR_ID_2 (1) -#define MAX_DIR_IDS (2) - -#define MFW_BUNDLE_1 (0) -#define MFW_BUNDLE_2 (1) -#define MAX_MFW_BUNDLES (2) - -#define FLASH_PAGE_SIZE 0x1000 -#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */ -#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */ -#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */ - -#define LIM_MAX_SIZE ((2 * \ - FLASH_PAGE_SIZE) - \ - sizeof(struct legacy_bootstrap_region) - \ - NVM_RSV_SIZE) -#define LIM_OFFSET (NVM_OFFSET(lim_image)) -#define NVM_RSV_SIZE (44) -#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \ - FPGA_MIM_MAX_SIZE) -#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \ - ((idx == \ - NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0)) -#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \ - MIM_MAX_SIZE(is_asic) * 2) - -union nvm_dir_union { - struct nvm_dir dir; - u8 page[FLASH_PAGE_SIZE]; -}; - -/* Address - * +-------------------+ 0x000000 - * | Bootstrap: | - * | magic_number | - * | sram_start_addr | - * | code_len | - * | code_start_addr | - * | crc | - * +-------------------+ 0x000014 - * | rsrv | - * +-------------------+ 0x000040 - * | LIM | - * +-------------------+ 0x002000 - * | Dir1 | - * +-------------------+ 0x003000 - * | Dir2 | - * +-------------------+ 0x004000 - * | MIM1 | - * +-------------------+ 0x130000 - * | MIM2 | - * +-------------------+ 0x25C000 - * | Rest Images: | - * | TIM1/2 | - * | MFW_TRACE1/2 | - * | Eagle/Falcon FW | - * | PCIE/AVS FW | - * | MBA/CCM/L2B | - * | VPD | - * | optic_modules | - * | ... | - * +-------------------+ 0x400000 - */ -struct nvm_image { -/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/ - /* NVM Offset (size) */ - struct legacy_bootstrap_region bootstrap; - u8 rsrv[NVM_RSV_SIZE]; - u8 lim_image[LIM_MAX_SIZE]; - union nvm_dir_union dir[MAX_MFW_BUNDLES]; - - /* MIM1_IMAGE 0x004000 (0x12c000) */ - /* MIM2_IMAGE 0x130000 (0x12c000) */ -/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/ -}; /* 0x134 */ - -#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f)))) - -struct hw_set_info { - u32 reg_type; -#define GRC_REG_TYPE 1 -#define PHY_REG_TYPE 2 -#define PCI_REG_TYPE 4 - - u32 bank_num; - u32 pf_num; - u32 operation; -#define READ_OP 1 -#define WRITE_OP 2 -#define RMW_SET_OP 3 -#define RMW_CLR_OP 4 - - u32 reg_addr; - u32 reg_data; - - u32 reset_type; -#define POR_RESET_TYPE BIT(0) -#define HARD_RESET_TYPE BIT(1) -#define CORE_RESET_TYPE BIT(2) -#define MCP_RESET_TYPE BIT(3) -#define PERSET_ASSERT BIT(4) -#define PERSET_DEASSERT BIT(5) -}; - -struct hw_set_image { - u32 format_version; -#define HW_SET_IMAGE_VERSION 1 - u32 no_hw_sets; - - /* This array length depends on the no_hw_sets */ - struct hw_set_info hw_sets[1]; -}; - -int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u8 pf_id, u16 pf_wfq); -int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 0ada7fdb9..e17885321 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -446,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, idx_cmd, le32_to_cpu(command->opcode), le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length), + le16_to_cpu(command->length_dw), le32_to_cpu(command->src_addr_hi), le32_to_cpu(command->src_addr_lo), le32_to_cpu(command->dst_addr_hi), @@ -461,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn, idx_cmd, le32_to_cpu(command->opcode), le16_to_cpu(command->opcode_b), - le16_to_cpu(command->length), + le16_to_cpu(command->length_dw), le32_to_cpu(command->src_addr_hi), le32_to_cpu(command->src_addr_lo), le32_to_cpu(command->dst_addr_hi), @@ -645,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, return -EINVAL; } - cmd->length = cpu_to_le16((u16)length); + cmd->length_dw = cpu_to_le16((u16)length); qed_dmae_post_command(p_hwfn, p_ptt); @@ -769,6 +769,29 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, } int +qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + struct qed_dmae_params params; + int rc; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = flags; + + mutex_lock(&p_hwfn->dmae_info.mutex); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, + dest_addr, QED_DMAE_ADDRESS_GRC, + QED_DMAE_ADDRESS_HOST_VIRT, + size_in_dwords, ¶ms); + + mutex_unlock(&p_hwfn->dmae_info.mutex); + + return rc; +} + +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, dma_addr_t source_addr, @@ -791,16 +814,16 @@ qed_dmae_host2host(struct qed_hwfn *p_hwfn, } u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, - enum protocol_type proto, - union qed_qm_pq_params *p_params) + enum protocol_type proto, union qed_qm_pq_params *p_params) { u16 pq_id = 0; - if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) && - !p_params) { + if ((proto == PROTOCOLID_CORE || + proto == PROTOCOLID_ETH || + proto == PROTOCOLID_ISCSI || + proto == PROTOCOLID_ROCE) && !p_params) { DP_NOTICE(p_hwfn, - "Protocol %d received NULL PQ params\n", - proto); + "Protocol %d received NULL PQ params\n", proto); return 0; } @@ -808,6 +831,8 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, case PROTOCOLID_CORE: if (p_params->core.tc == LB_TC) pq_id = p_hwfn->qm_info.pure_lb_pq; + else if (p_params->core.tc == OOO_LB_TC) + pq_id = p_hwfn->qm_info.ooo_pq; else pq_id = p_hwfn->qm_info.offload_pq; break; @@ -817,6 +842,18 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, pq_id += p_hwfn->qm_info.vf_queues_offset + p_params->eth.vf_id; break; + case PROTOCOLID_ISCSI: + if (p_params->iscsi.q_idx == 1) + pq_id = p_hwfn->qm_info.pure_ack_pq; + break; + case PROTOCOLID_ROCE: + if (p_params->roce.dcqcn) + pq_id = p_params->roce.qpid; + else + pq_id = p_hwfn->qm_info.offload_pq; + if (pq_id > p_hwfn->qm_info.num_pf_rls) + pq_id = p_hwfn->qm_info.offload_pq; + break; default: pq_id = 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 4367363ad..d01557092 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -254,6 +254,10 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn); union qed_qm_pq_params { struct { + u8 q_idx; + } iscsi; + + struct { u8 tc; } core; @@ -262,11 +266,15 @@ union qed_qm_pq_params { u8 vf_id; u8 tc; } eth; + + struct { + u8 dcqcn; + u8 qpid; /* roce relative */ + } roce; }; u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, - enum protocol_type proto, - union qed_qm_pq_params *params); + enum protocol_type proto, union qed_qm_pq_params *params); int qed_init_fw_data(struct qed_dev *cdev, const u8 *fw_data); diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index e8a3b9da5..23e455f22 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -31,7 +31,6 @@ enum cminterface { }; /* general constants */ -#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ QM_PQ_ELEMENT_SIZE, \ 0x1000) : 0) @@ -44,28 +43,28 @@ enum cminterface { /* other PQ constants */ #define QM_OTHER_PQS_PER_PF 4 /* WFQ constants */ -#define QM_WFQ_UPPER_BOUND 6250000 +#define QM_WFQ_UPPER_BOUND 62500000 #define QM_WFQ_VP_PQ_VOQ_SHIFT 0 #define QM_WFQ_VP_PQ_PF_SHIFT 5 #define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) -#define QM_WFQ_MAX_INC_VAL 4375000 -#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val)) +#define QM_WFQ_MAX_INC_VAL 43750000 + /* RL constants */ -#define QM_RL_UPPER_BOUND 6250000 +#define QM_RL_UPPER_BOUND 62500000 #define QM_RL_PERIOD 5 /* in us */ #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) +#define QM_RL_MAX_INC_VAL 43750000 #define QM_RL_INC_VAL(rate) max_t(u32, \ - (((rate ? rate : 1000000) \ - * QM_RL_PERIOD) / 8), 1) -#define QM_RL_MAX_INC_VAL 4375000 + (u32)(((rate ? rate : \ + 1000000) * \ + QM_RL_PERIOD * \ + 101) / (8 * 100)), 1) /* AFullOprtnstcCrdMask constants */ #define QM_OPPOR_LINE_VOQ_DEF 1 #define QM_OPPOR_FW_STOP_DEF 0 #define QM_OPPOR_PQ_EMPTY_DEF 1 -#define EAGLE_WORKAROUND_TC 7 /* Command Queue constants */ #define PBF_CMDQ_PURE_LB_LINES 150 -#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 #define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ @@ -80,7 +79,6 @@ enum cminterface { /* BTB: blocks constants (block size = 256B) */ #define BTB_JUMBO_PKT_BLOCKS 38 #define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS -#define BTB_EAGLE_WORKAROUND_BLOCKS 4 #define BTB_PURE_LB_FACTOR 10 #define BTB_PURE_LB_RATIO 7 /* QM stop command constants */ @@ -107,9 +105,9 @@ enum cminterface { cmd ## _ ## field, \ value) /* QM: VOQ macros */ -#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \ - (max_phy_tcs_pr_port) \ - + (tc)) +#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \ + (max_phys_tcs_per_port) + \ + (tc)) #define LB_VOQ(port) ( \ MAX_PHYS_VOQS + (port)) #define VOQ(port, tc, max_phy_tcs_pr_port) \ @@ -120,8 +118,7 @@ enum cminterface { : LB_VOQ(port)) /******************** INTERNAL IMPLEMENTATION *********************/ /* Prepare PF RL enable/disable runtime init values */ -static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, - bool pf_rl_en) +static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); if (pf_rl_en) { @@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, (1 << MAX_NUM_VOQS) - 1); /* write RL period */ STORE_RT_REG(p_hwfn, - QM_REG_RLPFPERIOD_RT_OFFSET, - QM_RL_PERIOD_CLK_25M); + QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, QM_RL_PERIOD_CLK_25M); @@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, } /* Prepare PF WFQ enable/disable runtime init values */ -static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, - bool pf_wfq_en) +static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); /* set credit threshold for QM bypass flow */ @@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, } /* Prepare VPORT RL enable/disable runtime init values */ -static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, - bool vport_rl_en) +static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en) { STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, vport_rl_en ? 1 : 0); @@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, } /* Prepare VPORT WFQ enable/disable runtime init values */ -static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, - bool vport_wfq_en) +static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) { STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, vport_wfq_en ? 1 : 0); @@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, * the specified VOQ */ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, - u8 voq, - u16 cmdq_lines) + u8 voq, u16 cmdq_lines) { u32 qm_line_crd; @@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init( u8 max_phys_tcs_per_port, struct init_qm_port_params port_params[MAX_NUM_PORTS]) { - u8 tc, voq, port_id; + u8 tc, voq, port_id, num_tcs_in_port; /* clear PBF lines for all VOQs */ for (voq = 0; voq < MAX_NUM_VOQS; voq++) @@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init( for (port_id = 0; port_id < max_ports_per_engine; port_id++) { if (port_params[port_id].active) { u16 phys_lines, phys_lines_per_tc; - u8 phys_tcs = port_params[port_id].num_active_phys_tcs; - /* find #lines to divide between the active - * physical TCs. - */ + /* find #lines to divide between active phys TCs */ phys_lines = port_params[port_id].num_pbf_cmd_lines - PBF_CMDQ_PURE_LB_LINES; /* find #lines per active physical TC */ - phys_lines_per_tc = phys_lines / phys_tcs; + num_tcs_in_port = 0; + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + } + + phys_lines_per_tc = phys_lines / num_tcs_in_port; /* init registers per active TC */ - for (tc = 0; tc < phys_tcs; tc++) { + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) != 1) + continue; + voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); qed_cmdq_lines_voq_rt_init(p_hwfn, voq, phys_lines_per_tc); } + /* init registers for pure LB TC */ qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), PBF_CMDQ_PURE_LB_LINES); @@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init( struct init_qm_port_params port_params[MAX_NUM_PORTS]) { u32 usable_blocks, pure_lb_blocks, phys_blocks; - u8 tc, voq, port_id; + u8 tc, voq, port_id, num_tcs_in_port; for (port_id = 0; port_id < max_ports_per_engine; port_id++) { u32 temp; - u8 phys_tcs; if (!port_params[port_id].active) continue; - phys_tcs = port_params[port_id].num_active_phys_tcs; - /* subtract headroom blocks */ usable_blocks = port_params[port_id].num_btb_blocks - BTB_HEADROOM_BLOCKS; - /* find blocks per physical TC. use factor to avoid - * floating arithmethic. - */ + /* find blocks per physical TC */ + num_tcs_in_port = 0; + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + } + pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / - (phys_tcs * BTB_PURE_LB_FACTOR + + (num_tcs_in_port * BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, pure_lb_blocks / BTB_PURE_LB_FACTOR); - phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs; + phys_blocks = (usable_blocks - pure_lb_blocks) / + num_tcs_in_port; /* init physical TCs */ - for (tc = 0; tc < phys_tcs; tc++) { - voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port); + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) != 1) + continue; + + voq = PHYS_VOQ(port_id, tc, + max_phys_tcs_per_port); STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq), phys_blocks); } @@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init( memset(&tx_pq_map, 0, sizeof(tx_pq_map)); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, - is_vf_pq ? 1 : 0); + p_params->pq_params[i].rl_valid ? 1 : 0); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, - is_vf_pq ? p_params->pq_params[i].vport_id : 0); + p_params->pq_params[i].rl_valid ? + p_params->pq_params[i].vport_id : 0); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, p_params->pq_params[i].wrr_group); @@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init( /* store Tx PQ VF mask to size select register */ for (i = 0; i < num_tx_pq_vf_masks; i++) { if (tx_pq_vf_mask[i]) { - if (is_bb_a0) { - u32 curr_mask = 0, addr; - - addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4); - if (!p_params->is_first_pf) - curr_mask = qed_rd(p_hwfn, p_ptt, - addr); - - addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; - - STORE_RT_REG(p_hwfn, addr, - curr_mask | tx_pq_vf_mask[i]); - } else { - u32 addr; + u32 addr; - addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; - STORE_RT_REG(p_hwfn, addr, - tx_pq_vf_mask[i]); - } + addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i; + STORE_RT_REG(p_hwfn, addr, + tx_pq_vf_mask[i]); } } } @@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, u8 port_id, u8 pf_id, u32 num_pf_cids, - u32 num_tids, - u32 base_mem_addr_4kb) + u32 num_tids, u32 base_mem_addr_4kb) { u16 i, pq_id; @@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, (p_params->pf_id % MAX_NUM_PFS_BB); inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq); - if (inc_val > QM_WFQ_MAX_INC_VAL) { + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); return -1; } - STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, - inc_val); - STORE_RT_REG(p_hwfn, - QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, - QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); for (i = 0; i < num_tx_pqs; i++) { u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id, @@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB, - QM_WFQ_INIT_CRD(inc_val) | QM_WFQ_CRD_REG_SIGN_BIT); } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, + inc_val); + STORE_RT_REG(p_hwfn, + QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, + QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); return 0; } /* Prepare PF RL runtime init values for the specified PF. * Return -1 on error. */ -static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, - u8 pf_id, - u32 pf_rl) +static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); @@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 cmd_addr, - u32 cmd_data_lsb, - u32 cmd_data_msb) + u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) { if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) return false; @@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, u32 qed_qm_pf_mem_size(u8 pf_id, u32 num_pf_cids, u32 num_vf_cids, - u32 num_tids, - u16 num_pf_pqs, - u16 num_vf_pqs) + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) { return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + @@ -713,8 +701,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, } int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 pf_id, u16 pf_wfq) + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) { u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); @@ -728,9 +715,7 @@ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, } int qed_init_pf_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 pf_id, - u32 pf_rl) + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) { u32 inc_val = QM_RL_INC_VAL(pf_rl); @@ -749,8 +734,7 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 first_tx_pq_id[NUM_OF_TCS], - u16 vport_wfq) + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq) { u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); u8 tc; @@ -773,9 +757,7 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, } int qed_init_vport_rl(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u8 vport_id, - u32 vport_rl) + struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl) { u32 inc_val = QM_RL_INC_VAL(vport_rl); @@ -795,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn, bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_release_cmd, - bool is_tx_pq, - u16 start_pq, - u16 num_pqs) + bool is_tx_pq, u16 start_pq, u16 num_pqs) { u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; @@ -841,17 +821,15 @@ qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) #define PRS_ETH_TUNN_FIC_FORMAT -188897008 void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 dest_port) + struct qed_ptt *p_ptt, u16 dest_port) { qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); - qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); } void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool vxlan_enable) + struct qed_ptt *p_ptt, bool vxlan_enable) { unsigned long reg_val = 0; u8 shift; @@ -908,8 +886,7 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 dest_port) + struct qed_ptt *p_ptt, u16 dest_port) { qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); @@ -918,8 +895,7 @@ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool eth_geneve_enable, - bool ip_geneve_enable) + bool eth_geneve_enable, bool ip_geneve_enable) { unsigned long reg_val = 0; u8 shift; diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index d358c3bb1..9866a20d2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -543,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn) pxp_global_win[i]); } -int qed_init_fw_data(struct qed_dev *cdev, - const u8 *data) +int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) { struct qed_fw_data *fw = cdev->fw_data; struct bin_buffer_hdr *buf_hdr; @@ -555,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev, return -EINVAL; } - buf_hdr = (struct bin_buffer_hdr *)data; + /* First Dword contains metadata and should be skipped */ + buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32)); + + offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset; + fw->fw_ver_info = (struct fw_ver_info *)(data + offset); offset = buf_hdr[BIN_BUF_INIT_CMD].offset; fw->init_ops = (union init_op *)(data + offset); diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 09a6ad3d2..8fa50fa23 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -2418,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, { struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; + u8 timer_res; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); @@ -2443,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; } + /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ + if (cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + if (cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); } @@ -2484,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, /* Configure pi coalescing if set */ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { - u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >> - (QED_CAU_DEF_RX_TIMER_RES + 1); + u8 timeset, timer_res; u8 num_tc = 1, i; + /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ + if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, QED_COAL_RX_STATE_MACHINE, timeset); - timeset = p_hwfn->cdev->tx_coalesce_usecs >> - (QED_CAU_DEF_TX_TIMER_RES + 1); - + if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); for (i = 0; i < num_tc; i++) { qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, TX_PI(i), @@ -3199,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev) for_each_hwfn(cdev, i) cdev->hwfns[i].b_int_requested = false; } + +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx) +{ + struct cau_sb_entry sb_entry; + int rc; + + if (!p_hwfn->hw_init_done) { + DP_ERR(p_hwfn, "hardware not initialized yet\n"); + return -EINVAL; + } + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + if (tx) + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + else + SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); + return rc; + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 20b468637..0948be64d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -389,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u16 vf_number, u8 vf_valid); +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx); + #define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index aada4c7e0..401e73854 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -572,9 +572,12 @@ int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); - rc = qed_spq_post(p_hwfn, p_ent, NULL); + p_ramrod->vf_rx_prod_index = params->vf_qid; + if (params->vf_qid) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Queue is meant for VF rxq[%04x]\n", params->vf_qid); - return rc; + return qed_spq_post(p_hwfn, p_ent, NULL); } static int @@ -587,7 +590,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_hw_cid_data *p_rx_cid; - u64 init_prod_val = 0; + u32 init_prod_val = 0; u16 abs_l2_queue = 0; u8 abs_stats_id = 0; int rc; @@ -612,10 +615,10 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, *pp_prod = (u8 __iomem *)p_hwfn->regview + GTT_BAR0_MAP_REG_MSDM_RAM + - MSTORM_PRODS_OFFSET(abs_l2_queue); + MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ - __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)(&init_prod_val)); /* Allocate a CID for the queue */ @@ -756,9 +759,9 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_tx_cid; - u8 abs_vport_id; + u16 pq_id, abs_tx_q_id = 0; int rc = -EINVAL; - u16 pq_id; + u8 abs_vport_id; /* Store information for the stop */ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id]; @@ -769,6 +772,10 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, if (rc) return rc; + rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id); + if (rc) + return rc; + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = cid; @@ -788,6 +795,7 @@ int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id; + p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id); p_ramrod->pbl_size = cpu_to_le16(pbl_size); DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); @@ -1482,51 +1490,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, offsetof(struct public_port, stats), sizeof(port_stats)); - p_stats->rx_64_byte_packets += port_stats.pmm.r64; - p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; - p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; - p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; - p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; - p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; - p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; - p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; - p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; - p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; - p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; - p_stats->rx_crc_errors += port_stats.pmm.rfcs; - p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; - p_stats->rx_pause_frames += port_stats.pmm.rxpf; - p_stats->rx_pfc_frames += port_stats.pmm.rxpp; - p_stats->rx_align_errors += port_stats.pmm.raln; - p_stats->rx_carrier_errors += port_stats.pmm.rfcr; - p_stats->rx_oversize_packets += port_stats.pmm.rovr; - p_stats->rx_jabbers += port_stats.pmm.rjbr; - p_stats->rx_undersize_packets += port_stats.pmm.rund; - p_stats->rx_fragments += port_stats.pmm.rfrg; - p_stats->tx_64_byte_packets += port_stats.pmm.t64; - p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; - p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; - p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; - p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; - p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; - p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; - p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; - p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; - p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; - p_stats->tx_pause_frames += port_stats.pmm.txpf; - p_stats->tx_pfc_frames += port_stats.pmm.txpp; - p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; - p_stats->tx_total_collisions += port_stats.pmm.tncl; - p_stats->rx_mac_bytes += port_stats.pmm.rbyte; - p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; - p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; - p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; - p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; - p_stats->tx_mac_bytes += port_stats.pmm.tbyte; - p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; - p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; - p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; - p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + p_stats->rx_64_byte_packets += port_stats.eth.r64; + p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383; + p_stats->rx_crc_errors += port_stats.eth.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_stats->rx_pause_frames += port_stats.eth.rxpf; + p_stats->rx_pfc_frames += port_stats.eth.rxpp; + p_stats->rx_align_errors += port_stats.eth.raln; + p_stats->rx_carrier_errors += port_stats.eth.rfcr; + p_stats->rx_oversize_packets += port_stats.eth.rovr; + p_stats->rx_jabbers += port_stats.eth.rjbr; + p_stats->rx_undersize_packets += port_stats.eth.rund; + p_stats->rx_fragments += port_stats.eth.rfrg; + p_stats->tx_64_byte_packets += port_stats.eth.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383; + p_stats->tx_pause_frames += port_stats.eth.txpf; + p_stats->tx_pfc_frames += port_stats.eth.txpp; + p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec; + p_stats->tx_total_collisions += port_stats.eth.tncl; + p_stats->rx_mac_bytes += port_stats.eth.rbyte; + p_stats->rx_mac_uc_packets += port_stats.eth.rxuca; + p_stats->rx_mac_mc_packets += port_stats.eth.rxmca; + p_stats->rx_mac_bc_packets += port_stats.eth.rxbca; + p_stats->rx_mac_frames_ok += port_stats.eth.rxpok; + p_stats->tx_mac_bytes += port_stats.eth.tbyte; + p_stats->tx_mac_uc_packets += port_stats.eth.txuca; + p_stats->tx_mac_mc_packets += port_stats.eth.txmca; + p_stats->tx_mac_bc_packets += port_stats.eth.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf; for (j = 0; j < 8; j++) { p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; p_stats->brb_discards += port_stats.brb.brb_discard[j]; @@ -1656,6 +1664,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_tc = 1; if (IS_PF(cdev)) { + int max_vf_vlan_filters = 0; + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) info->num_queues += @@ -1668,7 +1678,12 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_queues = cdev->num_hwfns; } - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + if (IS_QED_SRIOV(cdev)) + max_vf_vlan_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_VLAN_FILTERS; + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) - + max_vf_vlan_filters; + ether_addr_copy(info->port_mac, cdev->hwfns[0].hw_info.hw_mac_addr); } else { @@ -2156,11 +2171,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, extern const struct qed_iov_hv_ops qed_iov_ops_pass; #endif +#ifdef CONFIG_DCB +extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; +#endif + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, #ifdef CONFIG_QED_SRIOV .iov = &qed_iov_ops_pass, #endif +#ifdef CONFIG_DCB + .dcb = &qed_dcbnl_ops_pass, +#endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, .check_mac = &qed_check_mac, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index a41c6b559..d5a32a1e6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -207,6 +207,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; + dev_info->rdma_supported = + (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); @@ -657,8 +659,13 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, struct qed_sb_cnt_info sb_cnt_info; int rc; int i; - memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = int_mode; for_each_hwfn(cdev, i) { memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); @@ -832,7 +839,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err2; } - data = cdev->firmware->data; + /* First Dword used to diffrentiate between various sources */ + data = cdev->firmware->data + sizeof(u32); } memset(&tunn_info, 0, sizeof(tunn_info)); @@ -900,7 +908,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (IS_PF(cdev)) { qed_free_stream_mem(cdev); - qed_sriov_disable(cdev, true); + if (IS_QED_ETH_IF(cdev)) + qed_sriov_disable(cdev, true); qed_nic_stop(cdev); qed_slowpath_irq_free(cdev); @@ -991,8 +1000,7 @@ static bool qed_can_link_change(struct qed_dev *cdev) return true; } -static int qed_set_link(struct qed_dev *cdev, - struct qed_link_params *params) +static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { struct qed_hwfn *hwfn; struct qed_mcp_link_params *link_params; @@ -1032,7 +1040,7 @@ static int qed_set_link(struct qed_dev *cdev, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; if (params->adv_speeds & 0) link_params->speed.advertised_speeds |= - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; @@ -1053,19 +1061,19 @@ static int qed_set_link(struct qed_dev *cdev, if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { switch (params->loopback_mode) { case QED_LINK_LOOPBACK_INT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; break; case QED_LINK_LOOPBACK_EXT_PHY: - link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; + link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; break; case QED_LINK_LOOPBACK_EXT: - link_params->loopback_mode = PMM_LOOPBACK_EXT; + link_params->loopback_mode = ETH_LOOPBACK_EXT; break; case QED_LINK_LOOPBACK_MAC: - link_params->loopback_mode = PMM_LOOPBACK_MAC; + link_params->loopback_mode = ETH_LOOPBACK_MAC; break; default: - link_params->loopback_mode = PMM_LOOPBACK_NONE; + link_params->loopback_mode = ETH_LOOPBACK_NONE; break; } } @@ -1185,7 +1193,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->advertised_caps |= 0; if (params.speed.advertised_speeds & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->advertised_caps |= 0; if (link_caps.speed_capabilities & @@ -1202,7 +1210,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) if_link->supported_caps |= 0; if (link_caps.speed_capabilities & - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) if_link->supported_caps |= 0; if (link.link_up) @@ -1301,6 +1309,38 @@ static int qed_drain(struct qed_dev *cdev) return 0; } +static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) +{ + *rx_coal = cdev->rx_coalesce_usecs; + *tx_coal = cdev->tx_coalesce_usecs; +} + +static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + u8 qid, u16 sb_id) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int hwfn_index; + int status = 0; + + hwfn_index = qid % cdev->num_hwfns; + hwfn = &cdev->hwfns[hwfn_index]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, + qid / cdev->num_hwfns, sb_id); + if (status) + goto out; + status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, + qid / cdev->num_hwfns, sb_id); +out: + qed_ptt_release(hwfn, ptt); + + return status; +} + static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) { struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); @@ -1347,5 +1387,7 @@ const struct qed_common_ops qed_common_ops_pass = { .update_msglvl = &qed_init_dp, .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, + .get_coalesce = &qed_get_coalesce, + .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 118236179..f776a7779 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -531,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, transceiver_data))); transceiver_state = GET_FIELD(transceiver_state, - PMM_TRANSCEIVER_STATE); + ETH_TRANSCEIVER_STATE); - if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) + if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) DP_NOTICE(p_hwfn, "Transceiver is present.\n"); else DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); @@ -668,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, qed_link_update(p_hwfn); } -int qed_mcp_set_link(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - bool b_up) +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) { struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; union drv_union_data union_data; - struct pmm_phy_cfg *phy_cfg; + struct eth_phy_cfg *phy_cfg; int rc = 0; u32 cmd; @@ -685,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) phy_cfg->speed = params->speed.forced_speed; - phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; - phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; - phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; + phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; + phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg->adv_speed = params->speed.advertised_speeds; phy_cfg->loopback_mode = params->loopback_mode; @@ -773,6 +771,34 @@ static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, return size; } +int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_pf) +{ + struct public_func shmem_info; + int i; + + /* Find first Ethernet interface in port */ + for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev); + i += p_hwfn->cdev->num_ports_in_engines) { + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID_BY_REL(p_hwfn, i)); + + if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) + continue; + + if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) == + FUNC_MF_CFG_PROTOCOL_ETHERNET) { + *p_pf = (u8)i; + return 0; + } + } + + DP_NOTICE(p_hwfn, + "Failed to find on port an ethernet interface in MF_SI mode\n"); + + return -EINVAL; +} + static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -951,7 +977,18 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { case FUNC_MF_CFG_PROTOCOL_ETHERNET: - *p_proto = QED_PCI_ETH; + if (test_bit(QED_DEV_CAP_ROCE, + &p_hwfn->hw_info.device_capabilities)) + *p_proto = QED_PCI_ETH_ROCE; + else + *p_proto = QED_PCI_ETH; + break; + case FUNC_MF_CFG_PROTOCOL_ISCSI: + *p_proto = QED_PCI_ISCSI; + break; + case FUNC_MF_CFG_PROTOCOL_ROCE: + DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); + rc = -EINVAL; break; default: rc = -EINVAL; @@ -1116,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, p_drv_version = &union_data.drv_version; p_drv_version->version = p_ver->version; - for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { - val = cpu_to_be32(p_ver->name[i]); + for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { + val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 6dd59eb7f..7f319aa1b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -457,4 +457,7 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_link_state *p_link, u8 min_bw); + +int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_pf); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 3a6c506f0..f6b86ca1f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -27,6 +27,35 @@ #define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ 0xff << 24) +#define CDU_REG_SEGMENT0_PARAMS \ + 0x580904UL +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \ + (0xfff << 0) +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \ + 0 +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \ + (0xff << 16) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \ + 16 +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \ + (0xff << 24) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \ + 24 +#define CDU_REG_SEGMENT1_PARAMS \ + 0x580908UL +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \ + (0xfff << 0) +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \ + 0 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \ + (0xff << 16) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \ + 16 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \ + (0xff << 24) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \ + 24 + #define XSDM_REG_OPERATION_GEN \ 0xf80408UL #define NIG_REG_RX_BRB_OUT_EN \ @@ -51,6 +80,8 @@ 0x1f00000UL #define BAR0_MAP_REG_TSDM_RAM \ 0x1c80000UL +#define BAR0_MAP_REG_XSDM_RAM \ + 0x1e00000UL #define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ 0x5011f4UL #define PRS_REG_SEARCH_TCP \ @@ -167,6 +198,10 @@ 0x1800004UL #define NIG_REG_CM_HDR \ 0x500840UL +#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \ + 0x50196cUL +#define NIG_REG_LLH_CLS_TYPE_DUALMODE \ + 0x501964UL #define NCSI_REG_CONFIG \ 0x040200UL #define PBF_REG_INIT \ @@ -219,6 +254,10 @@ 0x230000UL #define PRS_REG_SOFT_RST \ 0x1f0000UL +#define PRS_REG_MSG_INFO \ + 0x1f0a1cUL +#define PRS_REG_ROCE_DEST_QP_MAX_PF \ + 0x1f0430UL #define PSDM_REG_ENABLE_IN1 \ 0xfa0004UL #define PSEM_REG_ENABLE_IN \ @@ -227,6 +266,8 @@ 0x280020UL #define PSWRQ2_REG_CDUT_P_SIZE \ 0x24000cUL +#define PSWRQ2_REG_ILT_MEMORY \ + 0x260000UL #define PSWHST_REG_DISCARD_INTERNAL_WRITES \ 0x2a0040UL #define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ @@ -460,7 +501,7 @@ #define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2) #define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 -#define NIG_REG_VXLAN_PORT 0x50105cUL +#define NIG_REG_VXLAN_CTRL 0x50105cUL #define PBF_REG_VXLAN_PORT 0xd80518UL #define PBF_REG_NGE_PORT 0xd8051cUL #define PRS_REG_NGE_PORT 0x1f086cUL diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index ea4e9ce53..a548504c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -63,6 +63,32 @@ union ramrod_data { struct vport_update_ramrod_data vport_update; struct vport_filter_update_ramrod_data vport_filter_update; + struct rdma_init_func_ramrod_data rdma_init_func; + struct rdma_close_func_ramrod_data rdma_close_func; + struct rdma_register_tid_ramrod_data rdma_register_tid; + struct rdma_deregister_tid_ramrod_data rdma_deregister_tid; + struct roce_create_qp_resp_ramrod_data roce_create_qp_resp; + struct roce_create_qp_req_ramrod_data roce_create_qp_req; + struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp; + struct roce_modify_qp_req_ramrod_data roce_modify_qp_req; + struct roce_query_qp_resp_ramrod_data roce_query_qp_resp; + struct roce_query_qp_req_ramrod_data roce_query_qp_req; + struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; + struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; + struct rdma_create_cq_ramrod_data rdma_create_cq; + struct rdma_resize_cq_ramrod_data rdma_resize_cq; + struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; + struct rdma_srq_create_ramrod_data rdma_create_srq; + struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; + struct rdma_srq_modify_ramrod_data rdma_modify_srq; + + struct iscsi_slow_path_hdr iscsi_empty; + struct iscsi_init_ramrod_params iscsi_init; + struct iscsi_spe_func_dstry iscsi_destroy; + struct iscsi_spe_conn_offload iscsi_conn_offload; + struct iscsi_conn_update_ramrod_params iscsi_conn_update; + struct iscsi_spe_conn_termination iscsi_conn_terminate; + struct vf_start_ramrod_data vf_start; struct vf_stop_ramrod_data vf_stop; }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 67f6ce3c8..a52f3fc05 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -308,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + u8 page_cnt; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, @@ -332,7 +333,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->path_id = QED_PATH_ID(p_hwfn); p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); - p_ramrod->mf_mode = mode; + switch (mode) { case QED_MF_DEFAULT: case QED_MF_NPAR: @@ -350,24 +351,41 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, /* Place EQ address in RAMROD */ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, p_hwfn->p_eq->chain.pbl.p_phys_table); - p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; - + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); + p_ramrod->event_ring_num_pages = page_cnt; DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); - p_hwfn->hw_info.personality = PERSONALITY_ETH; if (IS_MF_SI(p_hwfn)) p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case QED_PCI_ISCSI: + p_ramrod->personality = PERSONALITY_ISCSI; + break; + case QED_PCI_ETH_ROCE: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, "Unkown personality %d\n", + p_hwfn->hw_info.personality); + p_ramrod->personality = PERSONALITY_ETH; + } + if (p_hwfn->cdev->p_iov_info) { struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; p_ramrod->num_vfs = (u8) p_iov->total_vfs; } + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 03601dfc0..d73456eab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -339,6 +339,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), &p_eq->chain)) { @@ -412,10 +413,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = p_hwfn->p_spq; - struct qed_spq_entry *p_virt = NULL; - dma_addr_t p_phys = 0; - unsigned int i = 0; + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + dma_addr_t p_phys = 0; + u32 i, capacity; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@ -427,7 +428,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); p_virt = p_spq->p_virt; - for (i = 0; i < p_spq->chain.capacity; i++) { + capacity = qed_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); list_add_tail(&p_virt->list, &p_spq->free_pool); @@ -455,9 +457,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) int qed_spq_alloc(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = NULL; - dma_addr_t p_phys = 0; - struct qed_spq_entry *p_virt = NULL; + struct qed_spq_entry *p_virt = NULL; + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + u32 capacity; /* SPQ struct */ p_spq = @@ -471,6 +474,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_SINGLE, + QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain)) { @@ -479,11 +483,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) } /* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = qed_chain_get_capacity(&p_spq->chain); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - &p_phys, - GFP_KERNEL); + &p_phys, GFP_KERNEL); if (!p_virt) goto spq_allocate_fail; @@ -503,16 +507,18 @@ spq_allocate_fail: void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + u32 capacity; if (!p_spq) return; - if (p_spq->p_virt) + if (p_spq->p_virt) { + capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - p_spq->p_virt, - p_spq->p_phys); + p_spq->p_virt, p_spq->p_phys); + } qed_chain_free(p_hwfn->cdev, &p_spq->chain); ; @@ -881,9 +887,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, - &p_consq->chain)) { + 0x80, &p_consq->chain)) { DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index c325ee857..15399da26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -21,18 +21,18 @@ #include "qed_vf.h" /* IOV ramrods */ -static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, - u32 concrete_vfid, u16 opaque_vfid) +static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { struct vf_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + u8 fp_minor; /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); - init_data.opaque_fid = opaque_vfid; + init_data.opaque_fid = p_vf->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -43,10 +43,39 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.vf_start; - p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); - p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid); + p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); - p_ramrod->personality = PERSONALITY_ETH; + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case QED_PCI_ETH_ROCE: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", + p_hwfn->hw_info.personality); + return -EINVAL; + } + + fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; + if (fp_minor > ETH_HSI_VER_MINOR) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", + p_vf->abs_vf_id, + ETH_HSI_VER_MAJOR, + fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + fp_minor = ETH_HSI_VER_MINOR; + } + + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Starting using HSI %02x.%02x\n", + p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -117,6 +146,45 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, return vf; } +static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 rx_qid) +{ + if (rx_qid >= p_vf->num_rxqs) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", + p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); + return rx_qid < p_vf->num_rxqs; +} + +static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 tx_qid) +{ + if (tx_qid >= p_vf->num_txqs) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", + p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); + return tx_qid < p_vf->num_txqs; +} + +static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 sb_idx) +{ + int i; + + for (i = 0; i < p_vf->num_sbs; i++) + if (p_vf->igu_sbs[i] == sb_idx) + return true; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", + p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); + + return false; +} + int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, int vfid, struct qed_ptt *p_ptt) { @@ -293,6 +361,9 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | (vf->abs_vf_id << 8); vf->vport_id = idx + 1; + + vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; + vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; } } @@ -598,17 +669,6 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, /* unpretend */ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); - if (vf->state != VF_STOPPED) { - DP_NOTICE(p_hwfn, "VF[%02x] is already started\n", - vf->abs_vf_id); - return -EINVAL; - } - - /* Start VF */ - rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid); - if (rc) - DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); - vf->state = VF_FREE; return rc; @@ -852,7 +912,6 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params params; struct qed_mcp_link_state link; struct qed_vf_info *vf = NULL; - int rc = 0; vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); if (!vf) { @@ -874,18 +933,8 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); - if (vf->state != VF_STOPPED) { - /* Stopping the VF */ - rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); - - if (rc != 0) { - DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", - rc); - return rc; - } - - vf->state = VF_STOPPED; - } + /* Forget the VF's acquisition message */ + memset(&vf->acquire, 0, sizeof(vf->acquire)); /* disablng interrupts and resetting permission table was done during * vf-close, however, we could get here without going through vf_close @@ -1116,8 +1165,6 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->vf_bulletin = 0; p_vf->vport_instance = 0; - p_vf->num_mac_filters = 0; - p_vf->num_vlan_filters = 0; p_vf->configured_features = 0; /* If VF previously requested less resources, go back to default */ @@ -1130,9 +1177,95 @@ static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, p_vf->vf_queues[i].rxq_active = 0; memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); + memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); } +static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + int i; + + /* Queue related information */ + p_resp->num_rxqs = p_vf->num_rxqs; + p_resp->num_txqs = p_vf->num_txqs; + p_resp->num_sbs = p_vf->num_sbs; + + for (i = 0; i < p_resp->num_sbs; i++) { + p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; + p_resp->hw_sbs[i].sb_qid = 0; + } + + /* These fields are filled for backward compatibility. + * Unused by modern vfs. + */ + for (i = 0; i < p_resp->num_rxqs; i++) { + qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, + (u16 *)&p_resp->hw_qid[i]); + p_resp->cid[i] = p_vf->vf_queues[i].fw_cid; + } + + /* Filter related information */ + p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, + p_req->num_mac_filters); + p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, + p_req->num_vlan_filters); + + /* This isn't really needed/enforced, but some legacy VFs might depend + * on the correct filling of this field. + */ + p_resp->num_mc_filters = QED_MAX_MC_ADDRS; + + /* Validate sufficient resources for VF */ + if (p_resp->num_rxqs < p_req->num_rxqs || + p_resp->num_txqs < p_req->num_txqs || + p_resp->num_sbs < p_req->num_sbs || + p_resp->num_mac_filters < p_req->num_mac_filters || + p_resp->num_vlan_filters < p_req->num_vlan_filters || + p_resp->num_mc_filters < p_req->num_mc_filters) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n", + p_vf->abs_vf_id, + p_req->num_rxqs, + p_resp->num_rxqs, + p_req->num_rxqs, + p_resp->num_txqs, + p_req->num_sbs, + p_resp->num_sbs, + p_req->num_mac_filters, + p_resp->num_mac_filters, + p_req->num_vlan_filters, + p_resp->num_vlan_filters, + p_req->num_mc_filters, p_resp->num_mc_filters); + return PFVF_STATUS_NO_RESOURCE; + } + + return PFVF_STATUS_SUCCESS; +} + +static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, + struct pfvf_stats_info *p_stats) +{ + p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); + p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + + offsetof(struct ustorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + + offsetof(struct pstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + p_stats->tstats.address = 0; + p_stats->tstats.len = 0; +} + static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -1141,25 +1274,27 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; - u8 i, vfpf_status = PFVF_STATUS_SUCCESS; + u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; struct pf_vf_resc *resc = &resp->resc; + int rc; + + memset(resp, 0, sizeof(*resp)); /* Validate FW compatibility */ - if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || - req->vfdev_info.fw_minor != FW_MINOR_VERSION || - req->vfdev_info.fw_revision != FW_REVISION_VERSION || - req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) { + if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { DP_INFO(p_hwfn, - "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n", + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", vf->abs_vf_id, - req->vfdev_info.fw_major, - req->vfdev_info.fw_minor, - req->vfdev_info.fw_revision, - req->vfdev_info.fw_engineering, - FW_MAJOR_VERSION, - FW_MINOR_VERSION, - FW_REVISION_VERSION, FW_ENGINEERING_VERSION); - vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + req->vfdev_info.eth_fp_hsi_major, + req->vfdev_info.eth_fp_hsi_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + + /* Write the PF version so that VF would know which version + * is supported. + */ + pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; + pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + goto out; } @@ -1169,16 +1304,13 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "VF[%d] is running an old driver that doesn't support 100g\n", vf->abs_vf_id); - vfpf_status = PFVF_STATUS_NOT_SUPPORTED; goto out; } - memset(resp, 0, sizeof(*resp)); + /* Store the acquire message */ + memcpy(&vf->acquire, req, sizeof(vf->acquire)); - /* Fill in vf info stuff */ vf->opaque_fid = req->vfdev_info.opaque_fid; - vf->num_mac_filters = 1; - vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; vf->vf_bulletin = req->bulletin_addr; vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? @@ -1194,26 +1326,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, if (p_hwfn->cdev->num_hwfns > 1) pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; - pfdev_info->stats_info.mstats.address = - PXP_VF_BAR0_START_MSDM_ZONE_B + - offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); - pfdev_info->stats_info.mstats.len = - sizeof(struct eth_mstorm_per_queue_stat); - - pfdev_info->stats_info.ustats.address = - PXP_VF_BAR0_START_USDM_ZONE_B + - offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); - pfdev_info->stats_info.ustats.len = - sizeof(struct eth_ustorm_per_queue_stat); - - pfdev_info->stats_info.pstats.address = - PXP_VF_BAR0_START_PSDM_ZONE_B + - offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); - pfdev_info->stats_info.pstats.len = - sizeof(struct eth_pstorm_per_queue_stat); - - pfdev_info->stats_info.tstats.address = 0; - pfdev_info->stats_info.tstats.len = 0; + qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); @@ -1221,36 +1334,31 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, pfdev_info->fw_minor = FW_MINOR_VERSION; pfdev_info->fw_rev = FW_REVISION_VERSION; pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + pfdev_info->minor_fp_hsi = min_t(u8, + ETH_HSI_VER_MINOR, + req->vfdev_info.eth_fp_hsi_minor); pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); pfdev_info->dev_type = p_hwfn->cdev->type; pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; - resc->num_rxqs = vf->num_rxqs; - resc->num_txqs = vf->num_txqs; - resc->num_sbs = vf->num_sbs; - for (i = 0; i < resc->num_sbs; i++) { - resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; - resc->hw_sbs[i].sb_qid = 0; - } + /* Fill resources available to VF; Make sure there are enough to + * satisfy the VF's request. + */ + vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, + &req->resc_request, resc); + if (vfpf_status != PFVF_STATUS_SUCCESS) + goto out; - for (i = 0; i < resc->num_rxqs; i++) { - qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, - (u16 *)&resc->hw_qid[i]); - resc->cid[i] = vf->vf_queues[i].fw_cid; + /* Start the VF in FW */ + rc = qed_sp_vf_start(p_hwfn, vf); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); + vfpf_status = PFVF_STATUS_FAILURE; + goto out; } - resc->num_mac_filters = min_t(u8, vf->num_mac_filters, - req->resc_request.num_mac_filters); - resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters, - req->resc_request.num_vlan_filters); - - /* This isn't really required as VF isn't limited, but some VFs might - * actually test this value, so need to provide it. - */ - resc->num_mc_filters = req->resc_request.num_mc_filters; - /* Fill agreed size of bulletin board in response */ resp->bulletin_size = vf->bulletin.size; qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); @@ -1296,7 +1404,7 @@ static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, params.anti_spoofing_en = val; rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); - if (rc) { + if (!rc) { p_vf->spoof_chk = val; p_vf->req_spoofchk_val = p_vf->spoof_chk; DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -1585,10 +1693,6 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, sizeof(struct pfvf_def_resp_tlv), status); } -#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A -#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ - (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) - static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf, u8 status) @@ -1606,16 +1710,11 @@ static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, /* Update the TLV with the response */ if (status == PFVF_STATUS_SUCCESS) { - u16 hw_qid = 0; - req = &mbx->req_virt->start_rxq; - qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid, - &hw_qid); - - p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) + - hw_qid * MSTORM_QZONE_SIZE + - offsetof(struct mstorm_eth_queue_zone, - rx_producers); + p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, + non_trigger.eth_rx_queue_producers) + + sizeof(struct eth_rx_prod_data) * req->rx_qid; } qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); @@ -1627,13 +1726,19 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, { struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; - u8 status = PFVF_STATUS_SUCCESS; + u8 status = PFVF_STATUS_NO_RESOURCE; struct vfpf_start_rxq_tlv *req; int rc; memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_rxq; + + if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) || + !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; + params.vf_qid = req->rx_qid; params.vport_id = vf->vport_id; params.sb = req->hw_sb; params.sb_idx = req->sb_index; @@ -1649,22 +1754,48 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, if (rc) { status = PFVF_STATUS_FAILURE; } else { + status = PFVF_STATUS_SUCCESS; vf->vf_queues[req->rx_qid].rxq_active = true; vf->num_active_rxqs++; } +out: qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); } +static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + + mbx->offset = (u8 *)mbx->reply_virt; + + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, + sizeof(*p_tlv)); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if (status == PFVF_STATUS_SUCCESS) { + u16 qid = mbx->req_virt->start_txq.tx_qid; + + p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid, + DQ_DEMS_LEGACY); + } + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status); +} + static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) { - u16 length = sizeof(struct pfvf_def_resp_tlv); struct qed_queue_start_common_params params; struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_NO_RESOURCE; union qed_qm_pq_params pq_params; - u8 status = PFVF_STATUS_SUCCESS; struct vfpf_start_txq_tlv *req; int rc; @@ -1675,6 +1806,11 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, memset(¶ms, 0, sizeof(params)); req = &mbx->req_virt->start_txq; + + if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) || + !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; params.vport_id = vf->vport_id; params.sb = req->hw_sb; @@ -1688,13 +1824,15 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, req->pbl_addr, req->pbl_size, &pq_params); - if (rc) + if (rc) { status = PFVF_STATUS_FAILURE; - else + } else { + status = PFVF_STATUS_SUCCESS; vf->vf_queues[req->tx_qid].txq_active = true; + } - qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, - length, status); +out: + qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status); } static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, @@ -2119,6 +2257,16 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, u16 length; int rc; + /* Valiate PF can send such a request */ + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "No VPORT instance available for VF[%d], failing vport update\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + memset(¶ms, 0, sizeof(params)); params.opaque_fid = vf->opaque_fid; params.vport_id = vf->vport_id; @@ -2161,15 +2309,12 @@ out: qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); } -static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, - struct qed_vf_info *p_vf, - struct qed_filter_ucast *p_params) +static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) { int i; - if (p_params->type == QED_FILTER_MAC) - return 0; - /* First remove entries and then add new ones */ if (p_params->opcode == QED_FILTER_REMOVE) { for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) @@ -2222,6 +2367,80 @@ static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, return 0; } +static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int i; + + /* If we're in forced-mode, we don't allow any change */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) + return 0; + + /* First remove entries and then add new ones */ + if (p_params->opcode == QED_FILTER_REMOVE) { + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(p_vf->shadow_config.macs[i], + p_params->mac)) { + memset(p_vf->shadow_config.macs[i], 0, + ETH_ALEN); + break; + } + } + + if (i == QED_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "MAC isn't configured\n"); + return -EINVAL; + } + } else if (p_params->opcode == QED_FILTER_REPLACE || + p_params->opcode == QED_FILTER_FLUSH) { + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) + memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN); + } + + /* List the new MAC address */ + if (p_params->opcode != QED_FILTER_ADD && + p_params->opcode != QED_FILTER_REPLACE) + return 0; + + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { + ether_addr_copy(p_vf->shadow_config.macs[i], + p_params->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Added MAC at %d entry in shadow\n", i); + break; + } + } + + if (i == QED_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); + return -EINVAL; + } + + return 0; +} + +static int +qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int rc = 0; + + if (p_params->type == QED_FILTER_MAC) { + rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); + if (rc) + return rc; + } + + if (p_params->type == QED_FILTER_VLAN) + rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); + + return rc; +} + int qed_iov_chk_ucast(struct qed_hwfn *hwfn, int vfid, struct qed_filter_ucast *params) { @@ -2366,11 +2585,27 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) { u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; qed_iov_vf_cleanup(p_hwfn, p_vf); + if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { + /* Stopping the VF */ + rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, + p_vf->opaque_fid); + + if (rc) { + DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", + rc); + status = PFVF_STATUS_FAILURE; + } + + p_vf->state = VF_STOPPED; + } + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, - length, PFVF_STATUS_SUCCESS); + length, status); } static int @@ -2622,7 +2857,6 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, { struct qed_iov_vf_mbx *mbx; struct qed_vf_info *p_vf; - int i; p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); if (!p_vf) @@ -2631,9 +2865,8 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, mbx = &p_vf->vf_mbx; /* qed_iov_process_mbx_request */ - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, - "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id); mbx->first_tlv = mbx->req_virt->first_tlv; @@ -2687,15 +2920,28 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, * support them. Or this may be because someone wrote a crappy * VF driver and is sending garbage over the channel. */ - DP_ERR(p_hwfn, - "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", - mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); - - for (i = 0; i < 20; i++) { + DP_NOTICE(p_hwfn, + "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", + p_vf->abs_vf_id, + mbx->first_tlv.tl.type, + mbx->first_tlv.tl.length, + mbx->first_tlv.padding, mbx->first_tlv.reply_address); + + /* Try replying in case reply address matches the acquisition's + * posted address. + */ + if (p_vf->acquire.first_tlv.reply_address && + (mbx->first_tlv.reply_address == + p_vf->acquire.first_tlv.reply_address)) { + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_NOT_SUPPORTED); + } else { DP_VERBOSE(p_hwfn, QED_MSG_IOV, - "%x ", - mbx->req_virt->tlv_buf_size.tlv_buffer[i]); + "VF[%02x]: Can't respond to TLV - no valid reply address\n", + p_vf->abs_vf_id); } } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c90b2b6ad..0dd23e409 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -10,6 +10,9 @@ #define _QED_SRIOV_H #include <linux/types.h> #include "qed_vf.h" + +#define QED_ETH_VF_NUM_MAC_FILTERS 1 +#define QED_ETH_VF_NUM_VLAN_FILTERS 2 #define QED_VF_ARRAY_LENGTH (3) #ifdef CONFIG_QED_SRIOV @@ -24,7 +27,6 @@ #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) #define QED_MAX_VF_CHAINS_PER_PF 16 -#define QED_ETH_VF_NUM_VLAN_FILTERS 2 #define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) @@ -120,6 +122,8 @@ struct qed_vf_shadow_config { /* Shadow copy of all guest vlans */ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; + /* Shadow copy of all configured MACs; Empty if forcing MACs */ + u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; u8 inner_vlan_removal; }; @@ -133,6 +137,9 @@ struct qed_vf_info { struct qed_bulletin bulletin; dma_addr_t vf_bulletin; + /* PF saves a copy of the last VF acquire message */ + struct vfpf_acquire_tlv acquire; + u32 concrete_fid; u16 opaque_fid; u16 mtu; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 72e69c0ec..9b780b31b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -117,36 +117,64 @@ exit: } #define VF_ACQUIRE_THRESH 3 -#define VF_ACQUIRE_MAC_FILTERS 1 +static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n", + p_req->num_rxqs, + p_resp->num_rxqs, + p_req->num_rxqs, + p_resp->num_txqs, + p_req->num_sbs, + p_resp->num_sbs, + p_req->num_mac_filters, + p_resp->num_mac_filters, + p_req->num_vlan_filters, + p_resp->num_vlan_filters, + p_req->num_mc_filters, p_resp->num_mc_filters); + + /* humble our request */ + p_req->num_txqs = p_resp->num_txqs; + p_req->num_rxqs = p_resp->num_rxqs; + p_req->num_sbs = p_resp->num_sbs; + p_req->num_mac_filters = p_resp->num_mac_filters; + p_req->num_vlan_filters = p_resp->num_vlan_filters; + p_req->num_mc_filters = p_resp->num_mc_filters; +} static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; - u8 rx_count = 1, tx_count = 1, num_sbs = 1; - u8 num_mac = VF_ACQUIRE_MAC_FILTERS; + struct vf_pf_resc_request *p_resc; bool resources_acquired = false; struct vfpf_acquire_tlv *req; int rc = 0, attempts = 0; /* clear mailbox and prep first tlv */ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + p_resc = &req->resc_request; /* starting filling the request */ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; - req->resc_request.num_rxqs = rx_count; - req->resc_request.num_txqs = tx_count; - req->resc_request.num_sbs = num_sbs; - req->resc_request.num_mac_filters = num_mac; - req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; + p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; req->vfdev_info.fw_major = FW_MAJOR_VERSION; req->vfdev_info.fw_minor = FW_MINOR_VERSION; req->vfdev_info.fw_revision = FW_REVISION_VERSION; req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; /* Fill capability field with any non-deprecated config we support */ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; @@ -185,21 +213,21 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) resources_acquired = true; } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && attempts < VF_ACQUIRE_THRESH) { - DP_VERBOSE(p_hwfn, - QED_MSG_IOV, - "PF unwilling to fullfill resource request. Try PF recommended amount\n"); - - /* humble our request */ - req->resc_request.num_txqs = resp->resc.num_txqs; - req->resc_request.num_rxqs = resp->resc.num_rxqs; - req->resc_request.num_sbs = resp->resc.num_sbs; - req->resc_request.num_mac_filters = - resp->resc.num_mac_filters; - req->resc_request.num_vlan_filters = - resp->resc.num_vlan_filters; + qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, + &resp->resc); /* Clear response buffer */ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) && + pfdev_info->major_fp_hsi && + (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { + DP_NOTICE(p_hwfn, + "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", + pfdev_info->major_fp_hsi, + pfdev_info->minor_fp_hsi, + ETH_HSI_VER_MAJOR, + ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi); + return -EINVAL; } else { DP_ERR(p_hwfn, "PF returned error %d to VF acquisition request\n", @@ -225,6 +253,13 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) } } + if (ETH_HSI_VER_MINOR && + (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { + DP_INFO(p_hwfn, + "PF is using older fastpath HSI; %02x.%02x is configured\n", + ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); + } + return 0; } @@ -353,7 +388,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, /* Learn the address of the producer from the response */ if (pp_prod) { - u64 init_prod_val = 0; + u32 init_prod_val = 0; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; DP_VERBOSE(p_hwfn, QED_MSG_IOV, @@ -361,7 +396,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, rx_qid, *pp_prod, resp->offset); /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ - __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), (u32 *)&init_prod_val); } @@ -405,8 +440,8 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; struct vfpf_start_txq_tlv *req; - struct pfvf_def_resp_tlv *resp; int rc; /* clear mailbox and prep first tlv */ @@ -424,20 +459,24 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); - resp = &p_iov->pf2vf_reply->default_resp; + resp = &p_iov->pf2vf_reply->queue_start; rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); if (rc) - return rc; + goto exit; - if (resp->hdr.status != PFVF_STATUS_SUCCESS) - return -EINVAL; + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } if (pp_doorbell) { - u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; - *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + - qed_db_addr(cid, DQ_DEMS_LEGACY); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", + tx_queue_id, *pp_doorbell, resp->offset); } +exit: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index b82fda964..b23ce58e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -96,7 +96,9 @@ struct vfpf_acquire_tlv { u32 driver_version; u16 opaque_fid; /* ME register value */ u8 os_type; /* VFPF_ACQUIRE_OS_* value */ - u8 padding[5]; + u8 eth_fp_hsi_major; + u8 eth_fp_hsi_minor; + u8 padding[3]; } vfdev_info; struct vf_pf_resc_request resc_request; @@ -171,7 +173,14 @@ struct pfvf_acquire_resp_tlv { struct pfvf_stats_info stats_info; u8 port_mac[ETH_ALEN]; - u8 padding2[2]; + + /* It's possible PF had to configure an older fastpath HSI + * [in case VF is newer than PF]. This is communicated back + * to the VF. It can also be used in case of error due to + * non-matching versions to shed light in VF about failure. + */ + u8 major_fp_hsi; + u8 minor_fp_hsi; } pfdev_info; struct pf_vf_resc { diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile index 06ff90d87..74a49850d 100644 --- a/drivers/net/ethernet/qlogic/qede/Makefile +++ b/drivers/net/ethernet/qlogic/qede/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_QEDE) := qede.o qede-y := qede_main.o qede_ethtool.o +qede-$(CONFIG_DCB) += qede_dcbnl.o diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 47d6b2225..02b06d4e4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -24,7 +24,7 @@ #include <linux/qed/qed_eth_if.h> #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 7 +#define QEDE_MINOR_VERSION 10 #define QEDE_REVISION_VERSION 1 #define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ @@ -143,6 +143,8 @@ struct qede_dev { struct mutex qede_lock; u32 state; /* Protected by qede_lock */ u16 rx_buf_size; + u32 rx_copybreak; + /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ #define ETH_OVERHEAD (ETH_HLEN + 8 + 8) /* Max supported alignment is 256 (8 shift) @@ -235,6 +237,7 @@ struct qede_rx_queue { u64 rx_hw_errors; u64 rx_alloc_errors; + u64 rx_ip_frags; }; union db_prod { @@ -304,6 +307,9 @@ union qede_reload_args { u16 mtu; }; +#ifdef CONFIG_DCB +void qede_set_dcbnl_ops(struct net_device *ndev); +#endif void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_set_ethtool_ops(struct net_device *netdev); void qede_reload(struct qede_dev *edev, @@ -329,6 +335,7 @@ void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define QEDE_MIN_PKT_LEN 64 #define QEDE_RX_HDR_SIZE 256 #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c new file mode 100644 index 000000000..03e8c0212 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c @@ -0,0 +1,348 @@ +/* QLogic qede NIC Driver +* Copyright (c) 2015 QLogic Corporation +* +* This software is available under the terms of the GNU General Public License +* (GPL) Version 2, available from the file COPYING in the main directory of +* this source tree. +*/ + +#include <linux/types.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <net/dcbnl.h> +#include "qede.h" + +static u8 qede_dcbnl_getstate(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getstate(edev->cdev); +} + +static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setstate(edev->cdev, state); +} + +static void qede_dcbnl_getpermhwaddr(struct net_device *netdev, + u8 *perm_addr) +{ + memcpy(perm_addr, netdev->dev_addr, netdev->addr_len); +} + +static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type, + pgid, bw_pct, up_map); +} + +static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio, + u8 *prio_type, u8 *pgid, u8 *bw_pct, + u8 *up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct, + up_map); +} + +static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev, + int pgid, u8 *bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio, + u8 *setting) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->getpfccfg(edev->cdev, prio, setting); +} + +static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio, + u8 setting) +{ + struct qede_dev *edev = netdev_priv(netdev); + + edev->ops->dcb->setpfccfg(edev->cdev, prio, setting); +} + +static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getcap(edev->cdev, capid, cap); +} + +static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num); +} + +static u8 qede_dcbnl_getpfcstate(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getpfcstate(edev->cdev); +} + +static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getapp(edev->cdev, idtype, id); +} + +static u8 qede_dcbnl_getdcbx(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getdcbx(edev->cdev); +} + +static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid, + bw_pct, up_map); +} + +static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid, + bw_pct, up_map); +} + +static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct); +} + +static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid, + u8 bw_pct) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct); +} + +static u8 qede_dcbnl_setall(struct net_device *netdev) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setall(edev->cdev); +} + +static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num); +} + +static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setpfcstate(edev->cdev, state); +} + +static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval, + u8 up) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up); +} + +static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setdcbx(edev->cdev, state); +} + +static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid, + u8 *flags) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags); +} + +static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags); +} + +static int qede_dcbnl_peer_getappinfo(struct net_device *netdev, + struct dcb_peer_app_info *info, + u16 *count) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count); +} + +static int qede_dcbnl_peer_getapptable(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->peer_getapptable(edev->cdev, app); +} + +static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev, + struct cee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev, + struct cee_pg *pg) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg); +} + +static int qede_dcbnl_ieee_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_setpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getets(edev->cdev, ets); +} + +static int qede_dcbnl_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setets(edev->cdev, ets); +} + +static int qede_dcbnl_ieee_getapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_getapp(edev->cdev, app); +} + +static int qede_dcbnl_ieee_setapp(struct net_device *netdev, + struct dcb_app *app) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_setapp(edev->cdev, app); +} + +static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev, + struct ieee_pfc *pfc) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc); +} + +static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct qede_dev *edev = netdev_priv(netdev); + + return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets); +} + +static const struct dcbnl_rtnl_ops qede_dcbnl_ops = { + .ieee_getpfc = qede_dcbnl_ieee_getpfc, + .ieee_setpfc = qede_dcbnl_ieee_setpfc, + .ieee_getets = qede_dcbnl_ieee_getets, + .ieee_setets = qede_dcbnl_ieee_setets, + .ieee_getapp = qede_dcbnl_ieee_getapp, + .ieee_setapp = qede_dcbnl_ieee_setapp, + .getdcbx = qede_dcbnl_getdcbx, + .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc, + .ieee_peer_getets = qede_dcbnl_ieee_peer_getets, + .getstate = qede_dcbnl_getstate, + .setstate = qede_dcbnl_setstate, + .getpermhwaddr = qede_dcbnl_getpermhwaddr, + .getpgtccfgtx = qede_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx, + .getpgtccfgrx = qede_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx, + .getpfccfg = qede_dcbnl_getpfccfg, + .setpfccfg = qede_dcbnl_setpfccfg, + .getcap = qede_dcbnl_getcap, + .getnumtcs = qede_dcbnl_getnumtcs, + .getpfcstate = qede_dcbnl_getpfcstate, + .getapp = qede_dcbnl_getapp, + .getdcbx = qede_dcbnl_getdcbx, + .setpgtccfgtx = qede_dcbnl_setpgtccfgtx, + .setpgtccfgrx = qede_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx, + .setall = qede_dcbnl_setall, + .setnumtcs = qede_dcbnl_setnumtcs, + .setpfcstate = qede_dcbnl_setpfcstate, + .setapp = qede_dcbnl_setapp, + .setdcbx = qede_dcbnl_setdcbx, + .setfeatcfg = qede_dcbnl_setfeatcfg, + .getfeatcfg = qede_dcbnl_getfeatcfg, + .peer_getappinfo = qede_dcbnl_peer_getappinfo, + .peer_getapptable = qede_dcbnl_peer_getapptable, + .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc, + .cee_peer_getpg = qede_dcbnl_cee_peer_getpg, +}; + +void qede_set_dcbnl_ops(struct net_device *dev) +{ + dev->dcbnl_ops = &qede_dcbnl_ops; +} diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index ad3cae3b7..f8492cac9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -37,6 +37,7 @@ static const struct { } qede_rqstats_arr[] = { QEDE_RQSTAT(rx_hw_errors), QEDE_RQSTAT(rx_alloc_errors), + QEDE_RQSTAT(rx_ip_frags), }; #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr) @@ -426,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev) return current_link.link_up; } +static int qede_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 rxc, txc; + + memset(coal, 0, sizeof(struct ethtool_coalesce)); + edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); + + coal->rx_coalesce_usecs = rxc; + coal->tx_coalesce_usecs = txc; + + return 0; +} + +static int qede_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qede_dev *edev = netdev_priv(dev); + int i, rc = 0; + u16 rxc, txc; + u8 sb_id; + + if (!netif_running(dev)) { + DP_INFO(edev, "Interface is down\n"); + return -EINVAL; + } + + if (coal->rx_coalesce_usecs > QED_COALESCE_MAX || + coal->tx_coalesce_usecs > QED_COALESCE_MAX) { + DP_INFO(edev, + "Can't support requested %s coalesce value [max supported value %d]\n", + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" + : "tx", + QED_COALESCE_MAX); + return -EINVAL; + } + + rxc = (u16)coal->rx_coalesce_usecs; + txc = (u16)coal->tx_coalesce_usecs; + for_each_rss(i) { + sb_id = edev->fp_array[i].sb_info->igu_sb_id; + rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, + (u8)i, sb_id); + if (rc) { + DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); + return rc; + } + } + + return rc; +} + static void qede_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) { @@ -910,6 +964,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, memset(first_bd, 0, sizeof(*first_bd)); val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; first_bd->data.bd_flags.bitfields = val; + val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; + first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); /* Map skb linear data for DMA and set in the first BD */ mapping = dma_map_single(&edev->pdev->dev, skb->data, @@ -1129,6 +1185,48 @@ static void qede_self_test(struct net_device *dev, } } +static int qede_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct qede_dev *edev = netdev_priv(dev); + u32 val; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + val = *(u32 *)data; + if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Invalid rx copy break value, range is [%u, %u]", + QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE); + return -EINVAL; + } + + edev->rx_copybreak = *(u32 *)data; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int qede_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = edev->rx_copybreak; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, @@ -1137,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .set_msglevel = qede_set_msglevel, .nway_reset = qede_nway_reset, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_pauseparam = qede_get_pauseparam, @@ -1155,6 +1255,8 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; static const struct ethtool_ops qede_vf_ethtool_ops = { @@ -1177,6 +1279,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, + .get_tunable = qede_get_tunable, + .set_tunable = qede_set_tunable, }; void qede_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f8e11f953..9544e4c41 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -24,12 +24,7 @@ #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> -#ifdef CONFIG_QEDE_VXLAN -#include <net/vxlan.h> -#endif -#ifdef CONFIG_QEDE_GENEVE -#include <net/geneve.h> -#endif +#include <net/udp_tunnel.h> #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@ -490,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, } #endif +static inline void qede_update_tx_producer(struct qede_tx_queue *txq) +{ + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); +} + /* Main transmit function */ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, @@ -548,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { DP_NOTICE(edev, "SKB mapping failed\n"); qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } nbd++; @@ -579,8 +593,6 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { - u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; - /* We don't re-calculate IP checksum as it is already done by * the upper stack */ @@ -590,14 +602,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (xmit_type & XMIT_ENC) { first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - } else { - /* In cases when OS doesn't indicate for inner offloads - * when packet is tunnelled, we need to override the HW - * tunnel configuration so that packets are treated as - * regular non tunnelled packets and no inner offloads - * are done by the hardware. - */ - first_bd->data.bitfields |= cpu_to_le16(temp); + first_bd->data.bitfields |= + 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; } /* If the packet is IPv6 with extension header, indicate that @@ -655,6 +661,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, tx_data_bd = (struct eth_tx_bd *)third_bd; data_split = true; } + } else { + first_bd->data.bitfields |= + (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << + ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; } /* Handle fragmented skb */ @@ -666,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (rc) { qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } @@ -690,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, if (rc) { qede_free_failed_tx_pkt(edev, txq, first_bd, nbd, data_split); + qede_update_tx_producer(txq); return NETDEV_TX_OK; } } @@ -710,23 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, txq->tx_db.data.bd_prod = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); - /* wmb makes sure that the BDs data is updated before updating the - * producer, otherwise FW may read old data from the BDs. - */ - wmb(); - barrier(); - writel(txq->tx_db.raw, txq->doorbell_addr); - - /* mmiowb is needed to synchronize doorbell writes from more than one - * processor. It guarantees that the write arrives to the device before - * the queue lock is released and another start_xmit is called (possibly - * on another CPU). Without this barrier, the next doorbell can bypass - * this doorbell. This is applicable to IA64/Altix systems. - */ - mmiowb(); + if (!skb->xmit_more || netif_xmit_stopped(netdev_txq)) + qede_update_tx_producer(txq); if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1))) { + if (skb->xmit_more) + qede_update_tx_producer(txq); + netif_tx_stop_queue(netdev_txq); DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED, "Stop queue was called\n"); @@ -1357,6 +1360,20 @@ static u8 qede_check_csum(u16 flag) return qede_check_tunn_csum(flag); } +static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe, + u16 flag) +{ + u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; + + if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK << + ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) || + (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK << + PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT))) + return true; + + return false; +} + static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_dev *edev = fp->edev; @@ -1435,6 +1452,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) csum_flag = qede_check_csum(parse_flag); if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular, + parse_flag)) { + rxq->rx_ip_frags++; + goto alloc_skb; + } + DP_NOTICE(edev, "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", sw_comp_cons, parse_flag); @@ -1443,6 +1466,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) goto next_cqe; } +alloc_skb: skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, @@ -1453,7 +1477,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) } /* Copy data into SKB */ - if (len + pad <= QEDE_RX_HDR_SIZE) { + if (len + pad <= edev->rx_copybreak) { memcpy(skb_put(skb, len), page_address(data) + pad + sw_rx_data->page_offset, len); @@ -1585,56 +1609,49 @@ next_cqe: /* don't consume bd rx buffer */ static int qede_poll(struct napi_struct *napi, int budget) { - int work_done = 0; struct qede_fastpath *fp = container_of(napi, struct qede_fastpath, - napi); + napi); struct qede_dev *edev = fp->edev; + int rx_work_done = 0; + u8 tc; - while (1) { - u8 tc; - - for (tc = 0; tc < edev->num_tc; tc++) - if (qede_txq_has_work(&fp->txqs[tc])) - qede_tx_int(edev, &fp->txqs[tc]); - - if (qede_has_rx_work(fp->rxq)) { - work_done += qede_rx_int(fp, budget - work_done); - - /* must not complete if we consumed full budget */ - if (work_done >= budget) - break; - } + for (tc = 0; tc < edev->num_tc; tc++) + if (qede_txq_has_work(&fp->txqs[tc])) + qede_tx_int(edev, &fp->txqs[tc]); + + rx_work_done = qede_has_rx_work(fp->rxq) ? + qede_rx_int(fp, budget) : 0; + if (rx_work_done < budget) { + qed_sb_update_sb_idx(fp->sb_info); + /* *_has_*_work() reads the status block, + * thus we need to ensure that status block indices + * have been actually read (qed_sb_update_sb_idx) + * prior to this check (*_has_*_work) so that + * we won't write the "newer" value of the status block + * to HW (if there was a DMA right after + * qede_has_rx_work and if there is no rmb, the memory + * reading (qed_sb_update_sb_idx) may be postponed + * to right before *_ack_sb). In this case there + * will never be another interrupt until there is + * another update of the status block, while there + * is still unhandled work. + */ + rmb(); /* Fall out from the NAPI loop if needed */ - if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) { - qed_sb_update_sb_idx(fp->sb_info); - /* *_has_*_work() reads the status block, - * thus we need to ensure that status block indices - * have been actually read (qed_sb_update_sb_idx) - * prior to this check (*_has_*_work) so that - * we won't write the "newer" value of the status block - * to HW (if there was a DMA right after - * qede_has_rx_work and if there is no rmb, the memory - * reading (qed_sb_update_sb_idx) may be postponed - * to right before *_ack_sb). In this case there - * will never be another interrupt until there is - * another update of the status block, while there - * is still unhandled work. - */ - rmb(); - - if (!(qede_has_rx_work(fp->rxq) || - qede_has_tx_work(fp))) { - napi_complete(napi); - /* Update and reenable interrupts */ - qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, - 1 /*update*/); - break; - } + if (!(qede_has_rx_work(fp->rxq) || + qede_has_tx_work(fp))) { + napi_complete(napi); + + /* Update and reenable interrupts */ + qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, + 1 /*update*/); + } else { + rx_work_done = budget; } } - return work_done; + return rx_work_done; } static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie) @@ -2050,10 +2067,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) } /* Remove vlan */ - rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); - if (rc) { - DP_ERR(edev, "Failed to remove VLAN %d\n", vid); - return -EINVAL; + if (vlan->configured) { + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, + vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + return -EINVAL; + } } qede_del_vlan_from_list(edev, vlan); @@ -2116,75 +2136,75 @@ int qede_set_features(struct net_device *dev, netdev_features_t features) return 0; } -#ifdef CONFIG_QEDE_VXLAN -static void qede_add_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_add(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - if (edev->vxlan_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (edev->vxlan_dst_port) + return; - edev->vxlan_dst_port = t_port; + edev->vxlan_dst_port = t_port; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", + t_port); - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (edev->geneve_dst_port) + return; -static void qede_del_vxlan_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + edev->geneve_dst_port = t_port; - if (t_port != edev->vxlan_dst_port) + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: return; + } - edev->vxlan_dst_port = 0; - - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); - - set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif -#ifdef CONFIG_QEDE_GENEVE -static void qede_add_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) +static void qede_udp_tunnel_del(struct net_device *dev, + struct udp_tunnel_info *ti) { struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + u16 t_port = ntohs(ti->port); - if (edev->geneve_dst_port) - return; + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + if (t_port != edev->vxlan_dst_port) + return; - edev->geneve_dst_port = t_port; + edev->vxlan_dst_port = 0; - DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); - schedule_delayed_work(&edev->sp_task, 0); -} + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", + t_port); -static void qede_del_geneve_port(struct net_device *dev, - sa_family_t sa_family, __be16 port) -{ - struct qede_dev *edev = netdev_priv(dev); - u16 t_port = ntohs(port); + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + break; + case UDP_TUNNEL_TYPE_GENEVE: + if (t_port != edev->geneve_dst_port) + return; - if (t_port != edev->geneve_dst_port) - return; + edev->geneve_dst_port = 0; - edev->geneve_dst_port = 0; + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", + t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + break; + default: + return; + } - DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); - set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); } -#endif static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, @@ -2208,14 +2228,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_get_vf_config = qede_get_vf_config, .ndo_set_vf_rate = qede_set_vf_rate, #endif -#ifdef CONFIG_QEDE_VXLAN - .ndo_add_vxlan_port = qede_add_vxlan_port, - .ndo_del_vxlan_port = qede_del_vxlan_port, -#endif -#ifdef CONFIG_QEDE_GENEVE - .ndo_add_geneve_port = qede_add_geneve_port, - .ndo_del_geneve_port = qede_del_geneve_port, -#endif + .ndo_udp_tunnel_add = qede_udp_tunnel_add, + .ndo_udp_tunnel_del = qede_udp_tunnel_del, }; /* ------------------------------------------------------------------------- @@ -2505,8 +2519,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, edev->ops->register_ops(cdev, &qede_ll_ops, edev); +#ifdef CONFIG_DCB + if (!IS_VF(edev)) + qede_set_dcbnl_ops(edev->ndev); +#endif + INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task); mutex_init(&edev->qede_lock); + edev->rx_copybreak = QEDE_RX_HDR_SIZE; DP_INFO(edev, "Ending successfully qede probe\n"); @@ -2823,6 +2843,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@ -2834,6 +2855,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); @@ -2885,9 +2907,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, NUM_TX_BDS_MAX, - sizeof(*p_virt), - &txq->tx_pbl); + sizeof(*p_virt), &txq->tx_pbl); if (rc) goto err; @@ -3253,6 +3275,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats) start.vport_id = 0; start.drop_ttl0 = true; start.remove_inner_vlan = vlan_removal_en; + start.clear_stats = clear_stats; rc = edev->ops->vport_start(cdev, &start); @@ -3578,12 +3601,8 @@ static int qede_open(struct net_device *ndev) if (rc) return rc; -#ifdef CONFIG_QEDE_VXLAN - vxlan_get_rx_port(ndev); -#endif -#ifdef CONFIG_QEDE_GENEVE - geneve_get_rx_port(ndev); -#endif + udp_tunnel_get_rx_info(ndev); + return 0; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 94128abf2..18ebcc710 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 64 -#define QLCNIC_LINUX_VERSIONID "5.3.64" +#define _QLCNIC_LINUX_SUBVERSION 65 +#define QLCNIC_LINUX_VERSIONID "5.3.65" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) @@ -1026,10 +1026,8 @@ struct qlcnic_ipaddr { #define QLCNIC_HAS_PHYS_PORT_ID 0x40000 #define QLCNIC_TSS_RSS 0x80000 -#ifdef CONFIG_QLCNIC_VXLAN #define QLCNIC_ADD_VXLAN_PORT 0x100000 #define QLCNIC_DEL_VXLAN_PORT 0x200000 -#endif #define QLCNIC_VLAN_FILTERING 0x800000 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index f9640d5ce..bdbcd2b08 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2159,7 +2159,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac, struct qlcnic_cmd_args cmd; u32 mac_low, mac_high; - function = 0; err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS); if (err) return err; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 13dc6646a..74105c6d0 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -1020,7 +1020,6 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter, return 0; } -#ifdef CONFIG_QLCNIC_VXLAN #define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1 #define QLC_83XX_MATCH_ENCAP_ID BIT_2 #define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3 @@ -1089,14 +1088,12 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, return ret; } -#endif static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) { if (adapter->fhash.fnum) qlcnic_prune_lb_filters(adapter); -#ifdef CONFIG_QLCNIC_VXLAN if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) { if (qlcnic_set_vxlan_port(adapter)) return; @@ -1112,7 +1109,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) adapter->ahw->vxlan_port = 0; adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT; } -#endif } /** diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 9777e5713..f4aa6331b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -45,7 +45,6 @@ struct qlcnic_dcb { static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) { kfree(dcb); - dcb = NULL; } static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 87c642d3b..fedd73667 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -102,7 +102,6 @@ #define QLCNIC_RESPONSE_DESC 0x05 #define QLCNIC_LRO_DESC 0x12 -#define QLCNIC_TX_POLL_BUDGET 128 #define QLCNIC_TCP_HDR_SIZE 20 #define QLCNIC_TCP_TS_OPTION_SIZE 12 #define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63) @@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget) struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_adapter *adapter; - budget = QLCNIC_TX_POLL_BUDGET; tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); adapter = tx_ring->adapter; work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 9fedc8f62..6d03721a6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -16,9 +16,7 @@ #include <linux/aer.h> #include <linux/log2.h> #include <linux/pci.h> -#ifdef CONFIG_QLCNIC_VXLAN #include <net/vxlan.h> -#endif #include "qlcnic.h" #include "qlcnic_sriov.h" @@ -474,13 +472,15 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev, return 0; } -#ifdef CONFIG_QLCNIC_VXLAN static void qlcnic_add_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + /* Adapter supports only one VXLAN port. Use very first port * for enabling offload */ @@ -488,23 +488,26 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev, return; if (!ahw->vxlan_port_count) { ahw->vxlan_port_count = 1; - ahw->vxlan_port = ntohs(port); + ahw->vxlan_port = ntohs(ti->port); adapter->flags |= QLCNIC_ADD_VXLAN_PORT; return; } - if (ahw->vxlan_port == ntohs(port)) + if (ahw->vxlan_port == ntohs(ti->port)) ahw->vxlan_port_count++; } static void qlcnic_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) + struct udp_tunnel_info *ti) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_hardware_context *ahw = adapter->ahw; + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count || - (ahw->vxlan_port != ntohs(port))) + (ahw->vxlan_port != ntohs(ti->port))) return; ahw->vxlan_port_count--; @@ -519,7 +522,6 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb, features = vlan_features_check(skb, features); return vxlan_features_check(skb, features); } -#endif static const struct net_device_ops qlcnic_netdev_ops = { .ndo_open = qlcnic_open, @@ -539,11 +541,9 @@ static const struct net_device_ops qlcnic_netdev_ops = { .ndo_fdb_del = qlcnic_fdb_del, .ndo_fdb_dump = qlcnic_fdb_dump, .ndo_get_phys_port_id = qlcnic_get_phys_port_id, -#ifdef CONFIG_QLCNIC_VXLAN - .ndo_add_vxlan_port = qlcnic_add_vxlan_port, - .ndo_del_vxlan_port = qlcnic_del_vxlan_port, + .ndo_udp_tunnel_add = qlcnic_add_vxlan_port, + .ndo_udp_tunnel_del = qlcnic_del_vxlan_port, .ndo_features_check = qlcnic_features_check, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = qlcnic_poll_controller, #endif @@ -2015,10 +2015,8 @@ qlcnic_attach(struct qlcnic_adapter *adapter) qlcnic_create_sysfs_entries(adapter); -#ifdef CONFIG_QLCNIC_VXLAN if (qlcnic_encap_rx_offload(adapter)) - vxlan_get_rx_port(netdev); -#endif + udp_tunnel_get_rx_info(netdev); adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h index 017d8c2c8..24061b9b9 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h @@ -156,10 +156,8 @@ struct qlcnic_vf_info { spinlock_t vlan_list_lock; /* Lock for VLAN list */ }; -struct qlcnic_async_work_list { +struct qlcnic_async_cmd { struct list_head list; - struct work_struct work; - void *ptr; struct qlcnic_cmd_args *cmd; }; @@ -168,7 +166,10 @@ struct qlcnic_back_channel { struct workqueue_struct *bc_trans_wq; struct workqueue_struct *bc_async_wq; struct workqueue_struct *bc_flr_wq; - struct list_head async_list; + struct qlcnic_adapter *adapter; + struct list_head async_cmd_list; + struct work_struct vf_async_work; + spinlock_t queue_lock; /* async_cmd_list queue lock */ }; struct qlcnic_sriov { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729b..d7107055e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -29,6 +29,7 @@ #define QLC_83XX_VF_RESET_FAIL_THRESH 8 #define QLC_BC_CMD_MAX_RETRY_CNT 5 +static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work); static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *); static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32); static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *); @@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) } bc->bc_async_wq = wq; - INIT_LIST_HEAD(&bc->async_list); + INIT_LIST_HEAD(&bc->async_cmd_list); + INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd); + spin_lock_init(&bc->queue_lock); + bc->adapter = adapter; for (i = 0; i < num_vfs; i++) { vf = &sriov->vf_info[i]; @@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc) { - struct list_head *head = &bc->async_list; - struct qlcnic_async_work_list *entry; + struct list_head *head = &bc->async_cmd_list; + struct qlcnic_async_cmd *entry; flush_workqueue(bc->bc_async_wq); + cancel_work_sync(&bc->vf_async_work); + + spin_lock(&bc->queue_lock); while (!list_empty(head)) { - entry = list_entry(head->next, struct qlcnic_async_work_list, + entry = list_entry(head->next, struct qlcnic_async_cmd, list); - cancel_work_sync(&entry->work); list_del(&entry->list); + kfree(entry->cmd); kfree(entry); } + spin_unlock(&bc->queue_lock); } void qlcnic_sriov_vf_set_multi(struct net_device *netdev) @@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev) static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work) { - struct qlcnic_async_work_list *entry; - struct qlcnic_adapter *adapter; + struct qlcnic_async_cmd *entry, *tmp; + struct qlcnic_back_channel *bc; struct qlcnic_cmd_args *cmd; + struct list_head *head; + LIST_HEAD(del_list); + + bc = container_of(work, struct qlcnic_back_channel, vf_async_work); + head = &bc->async_cmd_list; + + spin_lock(&bc->queue_lock); + list_splice_init(head, &del_list); + spin_unlock(&bc->queue_lock); + + list_for_each_entry_safe(entry, tmp, &del_list, list) { + list_del(&entry->list); + cmd = entry->cmd; + __qlcnic_sriov_issue_cmd(bc->adapter, cmd); + kfree(entry); + } + + if (!list_empty(head)) + queue_work(bc->bc_async_wq, &bc->vf_async_work); - entry = container_of(work, struct qlcnic_async_work_list, work); - adapter = entry->ptr; - cmd = entry->cmd; - __qlcnic_sriov_issue_cmd(adapter, cmd); return; } -static struct qlcnic_async_work_list * -qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc) +static struct qlcnic_async_cmd * +qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc, + struct qlcnic_cmd_args *cmd) { - struct list_head *node; - struct qlcnic_async_work_list *entry = NULL; - u8 empty = 0; + struct qlcnic_async_cmd *entry = NULL; - list_for_each(node, &bc->async_list) { - entry = list_entry(node, struct qlcnic_async_work_list, list); - if (!work_pending(&entry->work)) { - empty = 1; - break; - } - } + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + return NULL; - if (!empty) { - entry = kzalloc(sizeof(struct qlcnic_async_work_list), - GFP_ATOMIC); - if (entry == NULL) - return NULL; - list_add_tail(&entry->list, &bc->async_list); - } + entry->cmd = cmd; + + spin_lock(&bc->queue_lock); + list_add_tail(&entry->list, &bc->async_cmd_list); + spin_unlock(&bc->queue_lock); return entry; } static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc, - work_func_t func, void *data, struct qlcnic_cmd_args *cmd) { - struct qlcnic_async_work_list *entry = NULL; + struct qlcnic_async_cmd *entry = NULL; - entry = qlcnic_sriov_get_free_node_async_work(bc); - if (!entry) + entry = qlcnic_sriov_alloc_async_cmd(bc, cmd); + if (!entry) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); return; + } - entry->ptr = data; - entry->cmd = cmd; - INIT_WORK(&entry->work, func); - queue_work(bc->bc_async_wq, &entry->work); + queue_work(bc->bc_async_wq, &bc->vf_async_work); } static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, @@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter, if (adapter->need_fw_reset) return -EIO; - qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd, - adapter, cmd); + qlcnic_sriov_schedule_async_cmd(bc, cmd); + return 0; } diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index fd5d1c93b..fd4a8e473 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1892,7 +1892,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, skb->len += length; skb->data_len += length; skb->truesize += length; - length -= length; ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, &hlen); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 6b541e57c..cb29ee24c 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -4,7 +4,7 @@ * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> * Copyright (C) 2007 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> - * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org> + * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -48,8 +48,8 @@ #include <asm/processor.h> #define DRV_NAME "r6040" -#define DRV_VERSION "0.28" -#define DRV_RELDATE "07Oct2011" +#define DRV_VERSION "0.29" +#define DRV_RELDATE "04Jul2016" /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (6000 * HZ / 1000) @@ -162,7 +162,7 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," "Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>," - "Florian Fainelli <florian@openwrt.org>"); + "Florian Fainelli <f.fainelli@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); @@ -200,7 +200,6 @@ struct r6040_private { struct mii_bus *mii_bus; struct napi_struct napi; void __iomem *base; - struct phy_device *phydev; int old_link; int old_duplex; }; @@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev) iowrite16(adrp[1], ioaddr + MID_0M); iowrite16(adrp[2], ioaddr + MID_0H); - phy_stop(lp->phydev); + phy_stop(dev->phydev); } static int r6040_close(struct net_device *dev) @@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev) static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct r6040_private *lp = netdev_priv(dev); - - if (!lp->phydev) + if (!dev->phydev) return -EINVAL; - return phy_mii_ioctl(lp->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static int r6040_rx(struct net_device *dev, int limit) @@ -617,10 +614,15 @@ static void r6040_tx(struct net_device *dev) if (descptr->status & DSC_OWNER_MAC) break; /* Not complete */ skb_ptr = descptr->skb_ptr; + + /* Statistic Counter */ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb_ptr->len; + pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf), skb_ptr->len, PCI_DMA_TODEVICE); /* Free buffer */ - dev_kfree_skb_irq(skb_ptr); + dev_kfree_skb(skb_ptr); descptr->skb_ptr = NULL; /* To next descriptor */ descptr = descptr->vndescp; @@ -641,12 +643,15 @@ static int r6040_poll(struct napi_struct *napi, int budget) void __iomem *ioaddr = priv->base; int work_done; + r6040_tx(dev); + work_done = r6040_rx(dev, budget); if (work_done < budget) { - napi_complete(napi); - /* Enable RX interrupt */ - iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER); + napi_complete_done(napi, work_done); + /* Enable RX/TX interrupt */ + iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS, + ioaddr + MIER); } return work_done; } @@ -673,7 +678,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) } /* RX interrupt request */ - if (status & RX_INTS) { + if (status & (RX_INTS | TX_INTS)) { if (status & RX_NO_DESC) { /* RX descriptor unavailable */ dev->stats.rx_dropped++; @@ -684,15 +689,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id) if (likely(napi_schedule_prep(&lp->napi))) { /* Mask off RX interrupt */ - misr &= ~RX_INTS; - __napi_schedule(&lp->napi); + misr &= ~(RX_INTS | TX_INTS); + __napi_schedule_irqoff(&lp->napi); } } - /* TX interrupt request */ - if (status & TX_INTS) - r6040_tx(dev); - /* Restore RDC MAC interrupt */ iowrite16(misr, ioaddr + MIER); @@ -732,7 +733,7 @@ static int r6040_up(struct net_device *dev) /* Initialize all MAC registers */ r6040_init_mac_regs(dev); - phy_start(lp->phydev); + phy_start(dev->phydev); return 0; } @@ -813,6 +814,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, void __iomem *ioaddr = lp->base; unsigned long flags; + if (skb_put_padto(skb, ETH_ZLEN) < 0) + return NETDEV_TX_OK; + /* Critical Section */ spin_lock_irqsave(&lp->lock, flags); @@ -824,17 +828,10 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - /* Statistic Counter */ - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; /* Set TX descriptor & Transmit it */ lp->tx_free_desc--; descptr = lp->tx_insert_ptr; - if (skb->len < ETH_ZLEN) - descptr->len = ETH_ZLEN; - else - descptr->len = skb->len; - + descptr->len = skb->len; descptr->skb_ptr = skb; descptr->buf = cpu_to_le32(pci_map_single(lp->pdev, skb->data, skb->len, PCI_DMA_TODEVICE)); @@ -843,7 +840,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); /* Trigger the MAC to check the TX descriptor */ - iowrite16(TM2TX, ioaddr + MTPR); + if (!skb->xmit_more || netif_queue_stopped(dev)) + iowrite16(TM2TX, ioaddr + MTPR); lp->tx_insert_ptr = descptr->vndescp; /* If no tx resource, stop */ @@ -957,26 +955,12 @@ static void netdev_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info)); } -static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct r6040_private *rp = netdev_priv(dev); - - return phy_ethtool_gset(rp->phydev, cmd); -} - -static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct r6040_private *rp = netdev_priv(dev); - - return phy_ethtool_sset(rp->phydev, cmd); -} - static const struct ethtool_ops netdev_ethtool_ops = { .get_drvinfo = netdev_get_drvinfo, - .get_settings = netdev_get_settings, - .set_settings = netdev_set_settings, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops r6040_netdev_ops = { @@ -998,7 +982,7 @@ static const struct net_device_ops r6040_netdev_ops = { static void r6040_adjust_link(struct net_device *dev) { struct r6040_private *lp = netdev_priv(dev); - struct phy_device *phydev = lp->phydev; + struct phy_device *phydev = dev->phydev; int status_changed = 0; void __iomem *ioaddr = lp->base; @@ -1018,14 +1002,8 @@ static void r6040_adjust_link(struct net_device *dev) lp->old_duplex = phydev->duplex; } - if (status_changed) { - pr_info("%s: link %s", dev->name, phydev->link ? - "UP" : "DOWN"); - if (phydev->link) - pr_cont(" - %d/%s", phydev->speed, - DUPLEX_FULL == phydev->duplex ? "full" : "half"); - pr_cont("\n"); - } + if (status_changed) + phy_print_status(phydev); } static int r6040_mii_probe(struct net_device *dev) @@ -1057,7 +1035,6 @@ static int r6040_mii_probe(struct net_device *dev) | SUPPORTED_TP); phydev->advertising = phydev->supported; - lp->phydev = phydev; lp->old_link = 0; lp->old_duplex = -1; diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index deae10d74..5297bf772 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) unsigned int rx_tail = cp->rx_tail; int rx; -rx_status_loop: rx = 0; +rx_status_loop: cpw16(IntrStatus, cp_rx_intr_mask); while (rx < budget) { diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index ef668d300..da4c2d8a4 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -1667,6 +1667,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) int i; u8 tmp8; + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n", RTL_R8(ChipCmd), RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus)); @@ -1696,10 +1700,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work) spin_unlock_irq(&tp->lock); /* ...and finally, reset everything */ - if (netif_running(dev)) { - rtl8139_hw_start (dev); - netif_wake_queue (dev); - } + napi_enable(&tp->napi); + rtl8139_hw_start(dev); + netif_wake_queue(dev); + spin_unlock_bh(&tp->rx_lock); } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e4030c3d7..c071624e7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1731,13 +1731,21 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp) static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; + + pm_runtime_get_noresume(d); rtl_lock_work(tp); wol->supported = WAKE_ANY; - wol->wolopts = __rtl8169_get_wol(tp); + if (pm_runtime_active(d)) + wol->wolopts = __rtl8169_get_wol(tp); + else + wol->wolopts = tp->saved_wolopts; rtl_unlock_work(tp); + + pm_runtime_put_noidle(d); } static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) @@ -1827,6 +1835,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; + + pm_runtime_get_noresume(d); rtl_lock_work(tp); @@ -1834,12 +1845,17 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) tp->features |= RTL_FEATURE_WOL; else tp->features &= ~RTL_FEATURE_WOL; - __rtl8169_set_wol(tp, wol->wolopts); + if (pm_runtime_active(d)) + __rtl8169_set_wol(tp, wol->wolopts); + else + tp->saved_wolopts = wol->wolopts; rtl_unlock_work(tp); device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + pm_runtime_put_noidle(d); + return 0; } @@ -2274,11 +2290,17 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; struct rtl8169_counters *counters = tp->counters; ASSERT_RTNL(); - rtl8169_update_counters(dev); + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl8169_update_counters(dev); + + pm_runtime_put_noidle(d); data[0] = le64_to_cpu(counters->tx_packets); data[1] = le64_to_cpu(counters->rx_packets); @@ -4440,6 +4462,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) static int rtl_set_mac_address(struct net_device *dev, void *p) { struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &tp->pci_dev->dev; struct sockaddr *addr = p; if (!is_valid_ether_addr(addr->sa_data)) @@ -4447,7 +4470,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p) memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rtl_rar_set(tp, dev->dev_addr); + pm_runtime_get_noresume(d); + + if (pm_runtime_active(d)) + rtl_rar_set(tp, dev->dev_addr); + + pm_runtime_put_noidle(d); return 0; } @@ -7850,6 +7878,7 @@ static int rtl8169_runtime_resume(struct device *device) struct pci_dev *pdev = to_pci_dev(device); struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); + rtl_rar_set(tp, dev->dev_addr); if (!tp->TxDescArray) return 0; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 867caf6e7..1e1cc0fad 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -362,8 +362,6 @@ static void ravb_emac_init(struct net_device *ndev) ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); - ravb_write(ndev, 1, MPR); - /* E-MAC status register clear */ ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); @@ -402,7 +400,8 @@ static int ravb_dmac_init(struct net_device *ndev) #endif /* Set AVB RX */ - ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR); + ravb_write(ndev, + RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR); /* Set FIFO size */ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC); @@ -1006,6 +1005,7 @@ static int ravb_phy_init(struct net_device *ndev) } phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, priv->phy_interface); + of_node_put(pn); if (!phydev) { netdev_err(ndev, "failed to connect PHY\n"); return -ENOENT; @@ -1909,7 +1909,6 @@ static int ravb_probe(struct platform_device *pdev) /* The Ether-specific entries in the device structure. */ ndev->base_addr = res->start; - ndev->dma = -1; chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); @@ -2111,8 +2110,7 @@ static int ravb_runtime_nop(struct device *dev) } static const struct dev_pm_ops ravb_dev_pm_ops = { - .runtime_suspend = ravb_runtime_nop, - .runtime_resume = ravb_runtime_nop, + SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL) }; #define RAVB_PM_OPS (&ravb_dev_pm_ops) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 04cd39f66..054e795df 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = { [ARSTR] = 0x0000, [TSU_CTRST] = 0x0004, + [TSU_FWSLC] = 0x0038, [TSU_VTAG0] = 0x0058, [TSU_ADSBSY] = 0x0060, [TSU_TEN] = 0x0064, + [TSU_POST1] = 0x0070, + [TSU_POST2] = 0x0074, + [TSU_POST3] = 0x0078, + [TSU_POST4] = 0x007c, [TSU_ADRH0] = 0x0100, [TXNLCR0] = 0x0080, @@ -1780,6 +1785,7 @@ static int sh_eth_phy_init(struct net_device *ndev) sh_eth_adjust_link, 0, mdp->phy_interface); + of_node_put(pn); if (!phydev) phydev = ERR_PTR(-ENOENT); } else { @@ -2785,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp) { if (sh_eth_is_rz_fast_ether(mdp)) { sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ + sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, + TSU_FWSLC); /* Enable POST registers */ return; } @@ -2996,7 +3004,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (devno < 0) devno = 0; - ndev->dma = -1; ret = platform_get_irq(pdev, 0); if (ret < 0) goto out_release; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 28b775e5a..f0b09b05e 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1996,7 +1996,8 @@ static int rocker_port_change_proto_down(struct net_device *dev, return 0; } -static void rocker_port_neigh_destroy(struct neighbour *n) +static void rocker_port_neigh_destroy(struct net_device *dev, + struct neighbour *n) { struct rocker_port *rocker_port = netdev_priv(n->dev); int err; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h index 45019649b..5cb51b609 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -475,7 +475,6 @@ struct sxgbe_priv_data { int rxcsum_insertion; spinlock_t stats_lock; /* lock for tx/rx statatics */ - struct phy_device *phydev; int oldlink; int speed; int oldduplex; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index c0981ae45..542b67d43 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev, edata->eee_active = priv->eee_active; edata->tx_lpi_timer = priv->tx_lpi_timer; - return phy_ethtool_get_eee(priv->phydev, edata); + return phy_ethtool_get_eee(dev->phydev, edata); } static int sxgbe_set_eee(struct net_device *dev, @@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev, priv->tx_lpi_timer = edata->tx_lpi_timer; } - return phy_ethtool_set_eee(priv->phydev, edata); + return phy_ethtool_set_eee(dev->phydev, edata); } static void sxgbe_getdrvinfo(struct net_device *dev, @@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev, strlcpy(info->version, DRV_VERSION, sizeof(info->version)); } -static int sxgbe_getsettings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct sxgbe_priv_data *priv = netdev_priv(dev); - - if (priv->phydev) - return phy_ethtool_gset(priv->phydev, cmd); - - return -EOPNOTSUPP; -} - -static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct sxgbe_priv_data *priv = netdev_priv(dev); - - if (priv->phydev) - return phy_ethtool_sset(priv->phydev, cmd); - - return -EOPNOTSUPP; -} - static u32 sxgbe_getmsglevel(struct net_device *dev) { struct sxgbe_priv_data *priv = netdev_priv(dev); @@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev, char *p; if (priv->eee_enabled) { - int val = phy_get_eee_err(priv->phydev); + int val = phy_get_eee_err(dev->phydev); if (val) priv->xstats.eee_wakeup_error_n = val; @@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev) static const struct ethtool_ops sxgbe_ethtool_ops = { .get_drvinfo = sxgbe_getdrvinfo, - .get_settings = sxgbe_getsettings, - .set_settings = sxgbe_setsettings, .get_msglevel = sxgbe_getmsglevel, .set_msglevel = sxgbe_setmsglevel, .get_link = ethtool_op_get_link, @@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = { .get_regs_len = sxgbe_get_regs_len, .get_eee = sxgbe_get_eee, .set_eee = sxgbe_set_eee, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; void sxgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 413ea14ab..ea44a2456 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg) */ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) { + struct net_device *ndev = priv->dev; bool ret = false; /* MAC core supports the EEE feature. */ if (priv->hw_cap.eee) { /* Check if the PHY supports EEE */ - if (phy_init_eee(priv->phydev, 1)) + if (phy_init_eee(ndev->phydev, 1)) return false; priv->eee_active = 1; @@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv) { + struct net_device *ndev = priv->dev; + /* When the EEE has been already initialised we have to * modify the PLS bit in the LPI ctrl & status reg according * to the PHY link status. For this reason. */ if (priv->eee_enabled) - priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); + priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link); } /** @@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize) static void sxgbe_adjust_link(struct net_device *dev) { struct sxgbe_priv_data *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u8 new_state = 0; u8 speed = 0xff; @@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev) netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n", __func__, phydev->phy_id, phydev->link); - /* save phy device in private structure */ - priv->phydev = phydev; - return 0; } @@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev) priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); - if (priv->phydev) - phy_start(priv->phydev); + if (dev->phydev) + phy_start(dev->phydev); /* initialise TX coalesce parameters */ sxgbe_tx_init_coalesce(priv); @@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev) init_error: free_dma_desc_resources(priv); - if (priv->phydev) - phy_disconnect(priv->phydev); + if (dev->phydev) + phy_disconnect(dev->phydev); phy_error: clk_disable_unprepare(priv->sxgbe_clk); @@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev) del_timer_sync(&priv->eee_ctrl_timer); /* Stop and disconnect the PHY */ - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); } netif_tx_stop_all_queues(dev); @@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev) */ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct sxgbe_priv_data *priv = netdev_priv(dev); int ret = -EOPNOTSUPP; if (!netif_running(dev)) @@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: - if (!priv->phydev) + if (!dev->phydev) return -EINVAL; - ret = phy_mii_ioctl(priv->phydev, rq, cmd); + ret = phy_mii_ioctl(dev->phydev, rq, cmd); break; default: break; diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 1f3091274..e00a669e9 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -50,14 +50,34 @@ enum { #define HUNT_FILTER_TBL_ROWS 8192 #define EFX_EF10_FILTER_ID_INVALID 0xffff + +#define EFX_EF10_FILTER_DEV_UC_MAX 32 +#define EFX_EF10_FILTER_DEV_MC_MAX 256 + +/* VLAN list entry */ +struct efx_ef10_vlan { + struct list_head list; + u16 vid; +}; + +/* Per-VLAN filters information */ +struct efx_ef10_filter_vlan { + struct list_head list; + u16 vid; + u16 uc[EFX_EF10_FILTER_DEV_UC_MAX]; + u16 mc[EFX_EF10_FILTER_DEV_MC_MAX]; + u16 ucdef; + u16 bcast; + u16 mcdef; +}; + struct efx_ef10_dev_addr { u8 addr[ETH_ALEN]; - u16 id; }; struct efx_ef10_filter_table { -/* The RX match field masks supported by this fw & hw, in order of priority */ - enum efx_filter_match_flags rx_match_flags[ +/* The MCDI match masks supported by this fw & hw, in order of priority */ + u32 rx_match_mcdi_flags[ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM]; unsigned int rx_match_count; @@ -73,16 +93,16 @@ struct efx_ef10_filter_table { } *entry; wait_queue_head_t waitq; /* Shadow of net_device address lists, guarded by mac_lock */ -#define EFX_EF10_FILTER_DEV_UC_MAX 32 -#define EFX_EF10_FILTER_DEV_MC_MAX 256 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX]; struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX]; int dev_uc_count; int dev_mc_count; -/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */ - u16 ucdef_id; - u16 bcast_id; - u16 mcdef_id; + bool uc_promisc; + bool mc_promisc; +/* Whether in multicast promiscuous mode when last changed */ + bool mc_promisc_last; + bool vlan_filter; + struct list_head vlan_list; }; /* An arbitrary search limit for the software hash table */ @@ -90,6 +110,10 @@ struct efx_ef10_filter_table { static void efx_ef10_rx_free_indir_table(struct efx_nic *efx); static void efx_ef10_filter_table_remove(struct efx_nic *efx); +static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid); +static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan); +static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid); static int efx_ef10_get_warm_boot_count(struct efx_nic *efx) { @@ -275,6 +299,131 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev, ? 1 : 0); } +static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan; + + WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); + + list_for_each_entry(vlan, &nic_data->vlan_list, list) { + if (vlan->vid == vid) + return vlan; + } + + return NULL; +} + +static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan; + int rc; + + mutex_lock(&nic_data->vlan_lock); + + vlan = efx_ef10_find_vlan(efx, vid); + if (vlan) { + /* We add VID 0 on init. 8021q adds it on module init + * for all interfaces with VLAN filtring feature. + */ + if (vid == 0) + goto done_unlock; + netif_warn(efx, drv, efx->net_dev, + "VLAN %u already added\n", vid); + rc = -EALREADY; + goto fail_exist; + } + + rc = -ENOMEM; + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + goto fail_alloc; + + vlan->vid = vid; + + list_add_tail(&vlan->list, &nic_data->vlan_list); + + if (efx->filter_state) { + mutex_lock(&efx->mac_lock); + down_write(&efx->filter_sem); + rc = efx_ef10_filter_add_vlan(efx, vlan->vid); + up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + if (rc) + goto fail_filter_add_vlan; + } + +done_unlock: + mutex_unlock(&nic_data->vlan_lock); + return 0; + +fail_filter_add_vlan: + list_del(&vlan->list); + kfree(vlan); +fail_alloc: +fail_exist: + mutex_unlock(&nic_data->vlan_lock); + return rc; +} + +static void efx_ef10_del_vlan_internal(struct efx_nic *efx, + struct efx_ef10_vlan *vlan) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + + WARN_ON(!mutex_is_locked(&nic_data->vlan_lock)); + + if (efx->filter_state) { + down_write(&efx->filter_sem); + efx_ef10_filter_del_vlan(efx, vlan->vid); + up_write(&efx->filter_sem); + } + + list_del(&vlan->list); + kfree(vlan); +} + +static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan; + int rc = 0; + + /* 8021q removes VID 0 on module unload for all interfaces + * with VLAN filtering feature. We need to keep it to receive + * untagged traffic. + */ + if (vid == 0) + return 0; + + mutex_lock(&nic_data->vlan_lock); + + vlan = efx_ef10_find_vlan(efx, vid); + if (!vlan) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u to be deleted not found\n", vid); + rc = -ENOENT; + } else { + efx_ef10_del_vlan_internal(efx, vlan); + } + + mutex_unlock(&nic_data->vlan_lock); + + return rc; +} + +static void efx_ef10_cleanup_vlans(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct efx_ef10_vlan *vlan, *next_vlan; + + mutex_lock(&nic_data->vlan_lock); + list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list) + efx_ef10_del_vlan_internal(efx, vlan); + mutex_unlock(&nic_data->vlan_lock); +} + static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag, NULL); static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL); @@ -421,8 +570,30 @@ static int efx_ef10_probe(struct efx_nic *efx) #endif ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr); + INIT_LIST_HEAD(&nic_data->vlan_list); + mutex_init(&nic_data->vlan_lock); + + /* Add unspecified VID to support VLAN filtering being disabled */ + rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC); + if (rc) + goto fail_add_vid_unspec; + + /* If VLAN filtering is enabled, we need VID 0 to get untagged + * traffic. It is added automatically if 8021q module is loaded, + * but we can't rely on it since module may be not loaded. + */ + rc = efx_ef10_add_vlan(efx, 0); + if (rc) + goto fail_add_vid_0; + return 0; +fail_add_vid_0: + efx_ef10_cleanup_vlans(efx); +fail_add_vid_unspec: + mutex_destroy(&nic_data->vlan_lock); + efx_ptp_remove(efx); + efx_mcdi_mon_remove(efx); fail5: device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag); fail4: @@ -676,6 +847,9 @@ static void efx_ef10_remove(struct efx_nic *efx) } #endif + efx_ef10_cleanup_vlans(efx); + mutex_destroy(&nic_data->vlan_lock); + efx_ptp_remove(efx); efx_mcdi_mon_remove(efx); @@ -704,6 +878,45 @@ static int efx_ef10_probe_pf(struct efx_nic *efx) return efx_ef10_probe(efx); } +int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, + u32 *port_flags, u32 *vadaptor_flags, + unsigned int *vlan_tags) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN); + MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN); + size_t outlen; + int rc; + + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) { + MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID, + port_id); + + rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), &outlen); + if (rc) + return rc; + + if (outlen < sizeof(outbuf)) { + rc = -EIO; + return rc; + } + } + + if (port_flags) + *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS); + if (vadaptor_flags) + *vadaptor_flags = + MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS); + if (vlan_tags) + *vlan_tags = + MCDI_DWORD(outbuf, + VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS); + + return 0; +} + int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id) { MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN); @@ -1304,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask) } #if BITS_PER_LONG == 64 + BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2); mask[0] = raw_mask[0]; mask[1] = raw_mask[1]; #else + BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3); mask[0] = raw_mask[0] & 0xffffffff; mask[1] = raw_mask[0] >> 32; mask[2] = raw_mask[1] & 0xffffffff; - mask[3] = raw_mask[1] >> 32; #endif } @@ -3040,15 +3254,55 @@ static int efx_ef10_filter_push(struct efx_nic *efx, return rc; } -static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table, - enum efx_filter_match_flags match_flags) +static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec) { + unsigned int match_flags = spec->match_flags; + u32 mcdi_flags = 0; + + if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) { + match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG; + mcdi_flags |= + is_multicast_ether_addr(spec->loc_mac) ? + (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) : + (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN); + } + +#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \ + unsigned int old_match_flags = match_flags; \ + match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \ + if (match_flags != old_match_flags) \ + mcdi_flags |= \ + (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \ + mcdi_field ## _LBN); \ + } + MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP); + MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP); + MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC); + MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT); + MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC); + MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT); + MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE); + MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN); + MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN); + MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO); +#undef MAP_FILTER_TO_MCDI_FLAG + + /* Did we map them all? */ + WARN_ON_ONCE(match_flags); + + return mcdi_flags; +} + +static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table, + const struct efx_filter_spec *spec) +{ + u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec); unsigned int match_pri; for (match_pri = 0; match_pri < table->rx_match_count; match_pri++) - if (table->rx_match_flags[match_pri] == match_flags) + if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags) return match_pri; return -EPROTONOSUPPORT; @@ -3074,7 +3328,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, EFX_FILTER_FLAG_RX) return -EINVAL; - rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags); + rc = efx_ef10_filter_pri(table, spec); if (rc < 0) return rc; match_pri = rc; @@ -3313,7 +3567,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, spec = efx_ef10_filter_entry_spec(table, filter_idx); if (!spec || (!by_index && - efx_ef10_filter_rx_match_pri(table, spec->match_flags) != + efx_ef10_filter_pri(table, spec) != filter_id / HUNT_FILTER_TBL_ROWS)) { rc = -ENOENT; goto out_unlock; @@ -3394,12 +3648,13 @@ static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id) return filter_id % HUNT_FILTER_TBL_ROWS; } -static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx, - enum efx_filter_priority priority, - u32 filter_id) +static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx, + enum efx_filter_priority priority, + u32 filter_id) { - return efx_ef10_filter_remove_internal(efx, 1U << priority, - filter_id, true); + if (filter_id == EFX_EF10_FILTER_ID_INVALID) + return; + efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true); } static int efx_ef10_filter_get_safe(struct efx_nic *efx, @@ -3414,7 +3669,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx, spin_lock_bh(&efx->filter_lock); saved_spec = efx_ef10_filter_entry_spec(table, filter_idx); if (saved_spec && saved_spec->priority == priority && - efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) == + efx_ef10_filter_pri(table, saved_spec) == filter_id / HUNT_FILTER_TBL_ROWS) { *spec = *saved_spec; rc = 0; @@ -3487,8 +3742,7 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, count = -EMSGSIZE; break; } - buf[count++] = (efx_ef10_filter_rx_match_pri( - table, spec->match_flags) * + buf[count++] = (efx_ef10_filter_pri(table, spec) * HUNT_FILTER_TBL_ROWS + filter_idx); } @@ -3724,15 +3978,58 @@ static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags) return match_flags; } +static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan, *next_vlan; + + /* See comment in efx_ef10_filter_table_remove() */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + if (!table) + return; + + list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list) + efx_ef10_filter_del_vlan_internal(efx, vlan); +} + +static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table, + enum efx_filter_match_flags match_flags) +{ + unsigned int match_pri; + int mf; + + for (match_pri = 0; + match_pri < table->rx_match_count; + match_pri++) { + mf = efx_ef10_filter_match_flags_from_mcdi( + table->rx_match_mcdi_flags[match_pri]); + if (mf == match_flags) + return true; + } + + return false; +} + static int efx_ef10_filter_table_probe(struct efx_nic *efx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + struct net_device *net_dev = efx->net_dev; unsigned int pd_match_pri, pd_match_count; struct efx_ef10_filter_table *table; + struct efx_ef10_vlan *vlan; size_t outlen; int rc; + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + if (efx->filter_state) /* already probed */ + return 0; + table = kzalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; @@ -3765,24 +4062,48 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx) "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n", __func__, mcdi_flags, pd_match_pri, rc, table->rx_match_count); - table->rx_match_flags[table->rx_match_count++] = rc; + table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags; + table->rx_match_count++; } } + if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) && + !(efx_ef10_filter_match_supported(table, + (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) && + efx_ef10_filter_match_supported(table, + (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) { + netif_info(efx, probe, net_dev, + "VLAN filters are not supported in this firmware variant\n"); + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } + table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry)); if (!table->entry) { rc = -ENOMEM; goto fail; } - table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; - table->bcast_id = EFX_EF10_FILTER_ID_INVALID; - table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + table->mc_promisc_last = false; + table->vlan_filter = + !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); + INIT_LIST_HEAD(&table->vlan_list); efx->filter_state = table; init_waitqueue_head(&table->waitq); + + list_for_each_entry(vlan, &nic_data->vlan_list, list) { + rc = efx_ef10_filter_add_vlan(efx, vlan->vid); + if (rc) + goto fail_add_vlan; + } + return 0; +fail_add_vlan: + efx_ef10_filter_cleanup_vlans(efx); + efx->filter_state = NULL; fail: kfree(table); return rc; @@ -3843,7 +4164,6 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx) nic_data->must_restore_filters = false; } -/* Caller must hold efx->filter_sem for write */ static void efx_ef10_filter_table_remove(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; @@ -3852,7 +4172,17 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) unsigned int filter_idx; int rc; + efx_ef10_filter_cleanup_vlans(efx); efx->filter_state = NULL; + /* If we were called without locking, then it's not safe to free + * the table as others might be using it. So we just WARN, leak + * the memory, and potentially get an inconsistent filter table + * state. + * This should never actually happen. + */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + if (!table) return; @@ -3880,37 +4210,54 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx) kfree(table); } -#define EFX_EF10_FILTER_DO_MARK_OLD(id) \ - if (id != EFX_EF10_FILTER_ID_INVALID) { \ - filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \ - if (!table->entry[filter_idx].spec) \ - netif_dbg(efx, drv, efx->net_dev, \ - "%s: marked null spec old %04x:%04x\n", \ - __func__, id, filter_idx); \ - table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\ - } -static void efx_ef10_filter_mark_old(struct efx_nic *efx) +static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id) { struct efx_ef10_filter_table *table = efx->filter_state; - unsigned int filter_idx, i; + unsigned int filter_idx; - if (!table) - return; + if (*id != EFX_EF10_FILTER_ID_INVALID) { + filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id); + if (!table->entry[filter_idx].spec) + netif_dbg(efx, drv, efx->net_dev, + "marked null spec old %04x:%04x\n", *id, + filter_idx); + table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; + *id = EFX_EF10_FILTER_ID_INVALID; + } +} + +/* Mark old per-VLAN filters that may need to be removed */ +static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + unsigned int i; - /* Mark old filters that may need to be removed */ - spin_lock_bh(&efx->filter_lock); for (i = 0; i < table->dev_uc_count; i++) - EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id); + efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]); for (i = 0; i < table->dev_mc_count; i++) - EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id); - EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id); - EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id); - EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id); + efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]); + efx_ef10_filter_mark_one_old(efx, &vlan->ucdef); + efx_ef10_filter_mark_one_old(efx, &vlan->bcast); + efx_ef10_filter_mark_one_old(efx, &vlan->mcdef); +} + +/* Mark old filters that may need to be removed. + * Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_mark_old(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan; + + spin_lock_bh(&efx->filter_lock); + list_for_each_entry(vlan, &table->vlan_list, list) + _efx_ef10_filter_vlan_mark_old(efx, vlan); spin_unlock_bh(&efx->filter_lock); } -#undef EFX_EF10_FILTER_DO_MARK_OLD -static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) +static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; @@ -3918,45 +4265,38 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc) int addr_count; unsigned int i; - table->ucdef_id = EFX_EF10_FILTER_ID_INVALID; addr_count = netdev_uc_count(net_dev); - if (net_dev->flags & IFF_PROMISC) - *promisc = true; + table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); table->dev_uc_count = 1 + addr_count; ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); i = 1; netdev_for_each_uc_addr(uc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_UC_MAX) { - *promisc = true; + table->uc_promisc = true; break; } ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); - table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID; i++; } } -static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) +static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) { struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *mc; unsigned int i, addr_count; - table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; - table->bcast_id = EFX_EF10_FILTER_ID_INVALID; - if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) - *promisc = true; + table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); addr_count = netdev_mc_count(net_dev); i = 0; netdev_for_each_mc_addr(mc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { - *promisc = true; + table->mc_promisc = true; break; } ether_addr_copy(table->dev_mc_list[i].addr, mc->addr); - table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID; i++; } @@ -3964,7 +4304,8 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc) } static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, - bool multicast, bool rollback) + struct efx_ef10_filter_vlan *vlan, + bool multicast, bool rollback) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_dev_addr *addr_list; @@ -3973,14 +4314,17 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, u8 baddr[ETH_ALEN]; unsigned int i, j; int addr_count; + u16 *ids; int rc; if (multicast) { addr_list = table->dev_mc_list; addr_count = table->dev_mc_count; + ids = vlan->mc; } else { addr_list = table->dev_uc_list; addr_count = table->dev_uc_count; + ids = vlan->uc; } filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; @@ -3988,8 +4332,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - addr_list[i].addr); + efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { if (rollback) { @@ -3998,12 +4341,10 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc); /* Fall back to promiscuous */ for (j = 0; j < i; j++) { - if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) - continue; efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - addr_list[j].id); - addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + ids[j]); + ids[j] = EFX_EF10_FILTER_ID_INVALID; } return rc; } else { @@ -4011,40 +4352,40 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, rc = EFX_EF10_FILTER_ID_INVALID; } } - addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc); + ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc); } if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); + efx_filter_set_eth_local(&spec, vlan->vid, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { netif_warn(efx, drv, efx->net_dev, "Broadcast filter insert failed rc=%d\n", rc); /* Fall back to promiscuous */ for (j = 0; j < i; j++) { - if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID) - continue; efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - addr_list[j].id); - addr_list[j].id = EFX_EF10_FILTER_ID_INVALID; + ids[j]); + ids[j] = EFX_EF10_FILTER_ID_INVALID; } return rc; } else { - table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID(vlan->bcast != + EFX_EF10_FILTER_ID_INVALID); + vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); } } return 0; } -static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, - bool rollback) +static int efx_ef10_filter_insert_def(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan, + bool multicast, bool rollback) { - struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; enum efx_filter_flags filter_flags; struct efx_filter_spec spec; @@ -4060,6 +4401,9 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, else efx_filter_set_uc_def(&spec); + if (vlan->vid != EFX_FILTER_VID_UNSPEC) + efx_filter_set_eth_local(&spec, vlan->vid, NULL); + rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING, @@ -4067,14 +4411,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, "%scast mismatch filter insert failed rc=%d\n", multicast ? "Multi" : "Uni", rc); } else if (multicast) { - table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID); + vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc); if (!nic_data->workaround_26807) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); - efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, - baddr); + efx_filter_set_eth_local(&spec, vlan->vid, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); if (rc < 0) { netif_warn(efx, drv, efx->net_dev, @@ -4084,17 +4428,20 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, /* Roll back the mc_def filter */ efx_ef10_filter_remove_unsafe( efx, EFX_FILTER_PRI_AUTO, - table->mcdef_id); - table->mcdef_id = EFX_EF10_FILTER_ID_INVALID; + vlan->mcdef); + vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; return rc; } } else { - table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc); + EFX_WARN_ON_PARANOID(vlan->bcast != + EFX_EF10_FILTER_ID_INVALID); + vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc); } } rc = 0; } else { - table->ucdef_id = rc; + EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID); + vlan->ucdef = rc; rc = 0; } return rc; @@ -4203,64 +4550,55 @@ reset_nic: /* Caller must hold efx->filter_sem for read if race against * efx_ef10_filter_table_remove() is possible */ -static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan) { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct net_device *net_dev = efx->net_dev; - bool uc_promisc = false, mc_promisc = false; - if (!efx_dev_registered(efx)) - return; - - if (!table) - return; - - efx_ef10_filter_mark_old(efx); - - /* Copy/convert the address lists; add the primary station - * address and broadcast address + /* Do not install unspecified VID if VLAN filtering is enabled. + * Do not install all specified VIDs if VLAN filtering is disabled. */ - netif_addr_lock_bh(net_dev); - efx_ef10_filter_uc_addr_list(efx, &uc_promisc); - efx_ef10_filter_mc_addr_list(efx, &mc_promisc); - netif_addr_unlock_bh(net_dev); + if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter) + return; /* Insert/renew unicast filters */ - if (uc_promisc) { - efx_ef10_filter_insert_def(efx, false, false); - efx_ef10_filter_insert_addr_list(efx, false, false); + if (table->uc_promisc) { + efx_ef10_filter_insert_def(efx, vlan, false, false); + efx_ef10_filter_insert_addr_list(efx, vlan, false, false); } else { /* If any of the filters failed to insert, fall back to * promiscuous mode - add in the uc_def filter. But keep * our individual unicast filters. */ - if (efx_ef10_filter_insert_addr_list(efx, false, false)) - efx_ef10_filter_insert_def(efx, false, false); + if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false)) + efx_ef10_filter_insert_def(efx, vlan, false, false); } /* Insert/renew multicast filters */ /* If changing promiscuous state with cascaded multicast filters, remove * old filters first, so that packets are dropped rather than duplicated */ - if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc) + if (nic_data->workaround_26807 && + table->mc_promisc_last != table->mc_promisc) efx_ef10_filter_remove_old(efx); - if (mc_promisc) { + if (table->mc_promisc) { if (nic_data->workaround_26807) { /* If we failed to insert promiscuous filters, rollback * and fall back to individual multicast filters */ - if (efx_ef10_filter_insert_def(efx, true, true)) { + if (efx_ef10_filter_insert_def(efx, vlan, true, true)) { /* Changing promisc state, so remove old filters */ efx_ef10_filter_remove_old(efx); - efx_ef10_filter_insert_addr_list(efx, true, false); + efx_ef10_filter_insert_addr_list(efx, vlan, + true, false); } } else { /* If we failed to insert promiscuous filters, don't * rollback. Regardless, also insert the mc_list */ - efx_ef10_filter_insert_def(efx, true, false); - efx_ef10_filter_insert_addr_list(efx, true, false); + efx_ef10_filter_insert_def(efx, vlan, true, false); + efx_ef10_filter_insert_addr_list(efx, vlan, true, false); } } else { /* If any filters failed to insert, rollback and fall back to @@ -4268,17 +4606,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) * that fails, roll back again and insert as many of our * individual multicast filters as we can. */ - if (efx_ef10_filter_insert_addr_list(efx, true, true)) { + if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) { /* Changing promisc state, so remove old filters */ if (nic_data->workaround_26807) efx_ef10_filter_remove_old(efx); - if (efx_ef10_filter_insert_def(efx, true, true)) - efx_ef10_filter_insert_addr_list(efx, true, false); + if (efx_ef10_filter_insert_def(efx, vlan, true, true)) + efx_ef10_filter_insert_addr_list(efx, vlan, + true, false); } } +} + +/* Caller must hold efx->filter_sem for read if race against + * efx_ef10_filter_table_remove() is possible + */ +static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct net_device *net_dev = efx->net_dev; + struct efx_ef10_filter_vlan *vlan; + bool vlan_filter; + + if (!efx_dev_registered(efx)) + return; + + if (!table) + return; + + efx_ef10_filter_mark_old(efx); + + /* Copy/convert the address lists; add the primary station + * address and broadcast address + */ + netif_addr_lock_bh(net_dev); + efx_ef10_filter_uc_addr_list(efx); + efx_ef10_filter_mc_addr_list(efx); + netif_addr_unlock_bh(net_dev); + + /* If VLAN filtering changes, all old filters are finally removed. + * Do it in advance to avoid conflicts for unicast untagged and + * VLAN 0 tagged filters. + */ + vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER); + if (table->vlan_filter != vlan_filter) { + table->vlan_filter = vlan_filter; + efx_ef10_filter_remove_old(efx); + } + + list_for_each_entry(vlan, &table->vlan_list, list) + efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); efx_ef10_filter_remove_old(efx); - efx->mc_promisc = mc_promisc; + table->mc_promisc_last = table->mc_promisc; +} + +static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan; + + WARN_ON(!rwsem_is_locked(&efx->filter_sem)); + + list_for_each_entry(vlan, &table->vlan_list, list) { + if (vlan->vid == vid) + return vlan; + } + + return NULL; +} + +static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_filter_table *table = efx->filter_state; + struct efx_ef10_filter_vlan *vlan; + unsigned int i; + + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return -EINVAL; + + vlan = efx_ef10_filter_find_vlan(efx, vid); + if (WARN_ON(vlan)) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u already added\n", vid); + return -EALREADY; + } + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) + return -ENOMEM; + + vlan->vid = vid; + + for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) + vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID; + for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) + vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID; + vlan->ucdef = EFX_EF10_FILTER_ID_INVALID; + vlan->bcast = EFX_EF10_FILTER_ID_INVALID; + vlan->mcdef = EFX_EF10_FILTER_ID_INVALID; + + list_add_tail(&vlan->list, &table->vlan_list); + + if (efx_dev_registered(efx)) + efx_ef10_filter_vlan_sync_rx_mode(efx, vlan); + + return 0; +} + +static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx, + struct efx_ef10_filter_vlan *vlan) +{ + unsigned int i; + + /* See comment in efx_ef10_filter_table_remove() */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + list_del(&vlan->list); + + for (i = 0; i < ARRAY_SIZE(vlan->uc); i++) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->uc[i]); + for (i = 0; i < ARRAY_SIZE(vlan->mc); i++) + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, + vlan->mc[i]); + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef); + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast); + efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef); + + kfree(vlan); +} + +static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid) +{ + struct efx_ef10_filter_vlan *vlan; + + /* See comment in efx_ef10_filter_table_remove() */ + if (!efx_rwsem_assert_write_locked(&efx->filter_sem)) + return; + + vlan = efx_ef10_filter_find_vlan(efx, vid); + if (!vlan) { + netif_err(efx, drv, efx->net_dev, + "VLAN %u not found in filter state\n", vid); + return; + } + + efx_ef10_filter_del_vlan_internal(efx, vlan); } static int efx_ef10_set_mac_address(struct efx_nic *efx) @@ -4290,6 +4764,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); + + mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); efx_ef10_filter_table_remove(efx); @@ -4302,6 +4778,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_ef10_filter_table_probe(efx); up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); + if (was_enabled) efx_net_open(efx->net_dev); netif_device_attach(efx->net_dev); @@ -4703,6 +5181,29 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx, } } +static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid) +{ + if (proto != htons(ETH_P_8021Q)) + return -EINVAL; + + return efx_ef10_add_vlan(efx, vid); +} + +static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid) +{ + if (proto != htons(ETH_P_8021Q)) + return -EINVAL; + + return efx_ef10_del_vlan(efx, vid); +} + +#define EF10_OFFLOAD_FEATURES \ + (NETIF_F_IP_CSUM | \ + NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_IPV6_CSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_NTUPLE) + const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .is_vf = true, .mem_bar = EFX_MEM_VF_BAR, @@ -4780,6 +5281,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { #endif .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf, + .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, + .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV .vswitching_probe = efx_ef10_vswitching_probe_vf, .vswitching_restore = efx_ef10_vswitching_restore_vf, @@ -4798,8 +5301,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = { .always_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, - .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXHASH | NETIF_F_NTUPLE), + .offload_features = EF10_OFFLOAD_FEATURES, .mcdi_max_ver = 2, .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | @@ -4891,6 +5393,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ptp_write_host_time = efx_ef10_ptp_write_host_time, .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events, .ptp_set_ts_config = efx_ef10_ptp_set_ts_config, + .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid, + .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV .sriov_configure = efx_ef10_sriov_configure, .sriov_init = efx_ef10_sriov_init, @@ -4919,8 +5423,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .always_rx_scatter = true, .max_interrupt_mode = EFX_INT_MODE_MSIX, .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH, - .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXHASH | NETIF_F_NTUPLE), + .offload_features = EF10_OFFLOAD_FEATURES, .mcdi_max_ver = 2, .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS, .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE | diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 3c17f274e..a949b9d27 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -232,6 +232,35 @@ fail: return rc; } +static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx) +{ + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u32 port_flags; + int rc; + + rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + if (rc) + goto fail_vadaptor_alloc; + + rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id, + &port_flags, NULL, NULL); + if (rc) + goto fail_vadaptor_query; + + if (port_flags & + (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN)) + efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + else + efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + return 0; + +fail_vadaptor_query: + efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED); +fail_vadaptor_alloc: + return rc; +} + /* On top of the default firmware vswitch setup, create a VEB vswitch and * expansion vport for use by this function. */ @@ -243,7 +272,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) { /* vswitch not needed as we have no VFs */ - efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + efx_ef10_vadaptor_alloc_set_features(efx); return 0; } @@ -263,7 +292,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx) goto fail3; ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr); - rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + rc = efx_ef10_vadaptor_alloc_set_features(efx); if (rc) goto fail4; @@ -282,9 +311,7 @@ fail1: int efx_ef10_vswitching_probe_vf(struct efx_nic *efx) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - - return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id); + return efx_ef10_vadaptor_alloc_set_features(efx); } int efx_ef10_vswitching_restore_pf(struct efx_nic *efx) @@ -554,6 +581,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan, efx_device_detach_sync(vf->efx); efx_net_stop(vf->efx->net_dev); + mutex_lock(&vf->efx->mac_lock); down_write(&vf->efx->filter_sem); vf->efx->type->filter_table_remove(vf->efx); @@ -630,6 +658,7 @@ restore_filters: goto reset_nic_up_write; up_write(&vf->efx->filter_sem); + mutex_unlock(&vf->efx->mac_lock); up_write(&vf->efx->filter_sem); @@ -642,9 +671,10 @@ restore_filters: return rc; reset_nic_up_write: - if (vf->efx) + if (vf->efx) { up_write(&vf->efx->filter_sem); - + mutex_unlock(&vf->efx->mac_lock); + } reset_nic: if (vf->efx) { netif_err(efx, drv, efx->net_dev, diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h index 6d25b92cb..9ceb7ef0a 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.h +++ b/drivers/net/ethernet/sfc/ef10_sriov.h @@ -70,6 +70,9 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx, int efx_ef10_vport_del_mac(struct efx_nic *efx, unsigned int port_id, u8 *mac); int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id); +int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id, + u32 *port_flags, u32 *vadaptor_flags, + unsigned int *vlan_tags); int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id); #endif /* EF10_SRIOV_H */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 097f363f1..14b821b1c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -600,6 +600,7 @@ fail: */ static void efx_start_datapath(struct efx_nic *efx) { + netdev_features_t old_features = efx->net_dev->features; bool old_rx_scatter = efx->rx_scatter; struct efx_tx_queue *tx_queue; struct efx_rx_queue *rx_queue; @@ -644,6 +645,15 @@ static void efx_start_datapath(struct efx_nic *efx) efx->rx_dma_len, efx->rx_page_buf_step, efx->rx_bufs_per_page, efx->rx_pages_per_batch); + /* Restore previously fixed features in hw_features and remove + * features which are fixed now + */ + efx->net_dev->hw_features |= efx->net_dev->features; + efx->net_dev->hw_features &= ~efx->fixed_features; + efx->net_dev->features |= efx->fixed_features; + if (efx->net_dev->features != old_features) + netdev_features_change(efx->net_dev); + /* RX filters may also have scatter-enabled flags */ if (efx->rx_scatter != old_rx_scatter) efx->type->filter_update_rx_scatter(efx); @@ -1719,6 +1729,7 @@ static int efx_probe_filters(struct efx_nic *efx) spin_lock_init(&efx->filter_lock); init_rwsem(&efx->filter_sem); + mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); rc = efx->type->filter_table_probe(efx); if (rc) @@ -1757,6 +1768,7 @@ static int efx_probe_filters(struct efx_nic *efx) #endif out_unlock: up_write(&efx->filter_sem); + mutex_unlock(&efx->mac_lock); return rc; } @@ -2312,14 +2324,46 @@ static void efx_set_rx_mode(struct net_device *net_dev) static int efx_set_features(struct net_device *net_dev, netdev_features_t data) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; /* If disabling RX n-tuple filtering, clear existing filters */ - if (net_dev->features & ~data & NETIF_F_NTUPLE) - return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (net_dev->features & ~data & NETIF_F_NTUPLE) { + rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); + if (rc) + return rc; + } + + /* If Rx VLAN filter is changed, update filters via mac_reconfigure */ + if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) { + /* efx_set_rx_mode() will schedule MAC work to update filters + * when a new features are finally set in net_dev. + */ + efx_set_rx_mode(net_dev); + } return 0; } +static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_add_vid) + return efx->type->vlan_rx_add_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + +static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid) +{ + struct efx_nic *efx = netdev_priv(net_dev); + + if (efx->type->vlan_rx_kill_vid) + return efx->type->vlan_rx_kill_vid(efx, proto, vid); + else + return -EOPNOTSUPP; +} + static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, @@ -2332,6 +2376,8 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_features = efx_set_features, + .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, #ifdef CONFIG_SFC_SRIOV .ndo_set_vf_mac = efx_sriov_set_vf_mac, .ndo_set_vf_vlan = efx_sriov_set_vf_vlan, @@ -3147,17 +3193,25 @@ static int efx_pci_probe(struct pci_dev *pci_dev, return -ENOMEM; efx = netdev_priv(net_dev); efx->type = (const struct efx_nic_type *) entry->driver_data; + efx->fixed_features |= NETIF_F_HIGHDMA; net_dev->features |= (efx->type->offload_features | NETIF_F_SG | - NETIF_F_HIGHDMA | NETIF_F_TSO | - NETIF_F_RXCSUM); + NETIF_F_TSO | NETIF_F_RXCSUM); if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); - /* All offloads can be toggled */ - net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; + + net_dev->hw_features = net_dev->features & ~efx->fixed_features; + + /* Disable VLAN filtering by default. It may be enforced if + * the feature is fixed (i.e. VLAN filters are required to + * receive VLAN tagged packets due to vPort restrictions). + */ + net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + net_dev->features |= efx->fixed_features; + pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); rc = efx_init_struct(efx, pci_dev, net_dev); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 5e3f93f04..c3ae739e9 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -274,4 +274,13 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) netif_tx_unlock_bh(dev); } +static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem) +{ + if (WARN_ON(down_read_trylock(sem))) { + up_read(sem); + return false; + } + return true; +} + #endif /* EFX_EFX_H */ diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 4cc772164..c9a5b003c 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -273,6 +273,9 @@ * have already installed filters. See the comment at * MC_CMD_WORKAROUND_BUG26807. */ #define MC_CMD_ERR_FILTERS_PRESENT 0x1014 +/* The clock whose frequency you've attempted to set set + * doesn't exist on this NIC */ +#define MC_CMD_ERR_NO_CLOCK 0x1015 #define MC_CMD_ERR_CODE_OFST 0 @@ -292,9 +295,11 @@ /* Point to the copycode entry point. */ #define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4) #define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4) +#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4) /* Points to the recovery mode entry point. */ #define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4) #define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4) +#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4) /* The command set exported by the boot ROM (MCDI v0) */ #define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \ @@ -686,6 +691,12 @@ #define FCDI_EVENT_CODE_PTP_STATUS 0x9 /* enum: Port id config to map MC-FC port idx */ #define FCDI_EVENT_CODE_PORT_CONFIG 0xa +/* enum: Boot result or error code */ +#define FCDI_EVENT_CODE_BOOT_RESULT 0xb +#define FCDI_EVENT_REBOOT_SRC_LBN 36 +#define FCDI_EVENT_REBOOT_SRC_WIDTH 8 +#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */ +#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */ #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0 #define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32 @@ -717,6 +728,11 @@ #define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0 #define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0 #define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32 +#define FCDI_EVENT_BOOT_RESULT_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */ +#define FCDI_EVENT_BOOT_RESULT_LBN 0 +#define FCDI_EVENT_BOOT_RESULT_WIDTH 32 /* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events * to the MC. Note that this structure | is overlayed over a normal FCDI event @@ -1649,15 +1665,30 @@ /* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16 -/* Uncorrected error on transmit timestamps in NIC clock format */ +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0 -/* Uncorrected error on receive timestamps in NIC clock format */ +/* Uncorrected error on PTP receive timestamps in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4 /* Uncorrected error on PPS output in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8 /* Uncorrected error on PPS input in NIC clock format */ #define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12 +/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24 +/* Uncorrected error on PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0 +/* Uncorrected error on PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4 +/* Uncorrected error on PPS output in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8 +/* Uncorrected error on PPS input in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12 +/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16 +/* Uncorrected error on non-PTP receive timestamps in NIC clock format */ +#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20 + /* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */ #define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4 /* Results of testing */ @@ -2158,8 +2189,12 @@ /* MC_CMD_DRV_ATTACH_IN msgrequest */ #define MC_CMD_DRV_ATTACH_IN_LEN 12 -/* new state (0=detached, 1=attached) to set if UPDATE=1 */ +/* new state to set if UPDATE=1 */ #define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0 +#define MC_CMD_DRV_ATTACH_LBN 0 +#define MC_CMD_DRV_ATTACH_WIDTH 1 +#define MC_CMD_DRV_PREBOOT_LBN 1 +#define MC_CMD_DRV_PREBOOT_WIDTH 1 /* 1 to set new state, or 0 to just report the existing state */ #define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4 /* preferred datapath firmware (for Huntington; ignored for Siena) */ @@ -2181,12 +2216,12 @@ /* MC_CMD_DRV_ATTACH_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_OUT_LEN 4 -/* previous or existing state (0=detached, 1=attached) */ +/* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0 /* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */ #define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8 -/* previous or existing state (0=detached, 1=attached) */ +/* previous or existing state, see the bitmask at NEW_STATE */ #define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0 /* Flags associated with this function */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4 @@ -2198,6 +2233,10 @@ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1 /* enum: The function can perform privileged operations */ #define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2 +/* enum: The function does not have an active port associated with it. The port + * refers to the Sorrento external FPGA port. + */ +#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3 /***********************************/ @@ -2892,7 +2931,7 @@ */ #define MC_CMD_SET_MAC 0x2c -#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK +#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL /* MC_CMD_SET_MAC_IN msgrequest */ #define MC_CMD_SET_MAC_IN_LEN 28 @@ -2927,9 +2966,66 @@ #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0 #define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* MC_CMD_SET_MAC_EXT_IN msgrequest */ +#define MC_CMD_SET_MAC_EXT_IN_LEN 32 +/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of + * EtherII, VLAN, bug16011 padding). + */ +#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0 +#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8 +#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20 +/* enum: Flow control is off. */ +/* MC_CMD_FCNTL_OFF 0x0 */ +/* enum: Respond to flow control. */ +/* MC_CMD_FCNTL_RESPOND 0x1 */ +/* enum: Respond to and Issue flow control. */ +/* MC_CMD_FCNTL_BIDIR 0x2 */ +/* enum: Auto neg flow control. */ +/* MC_CMD_FCNTL_AUTO 0x3 */ +/* enum: Priority flow control (eftest builds only). */ +/* MC_CMD_FCNTL_QBB 0x4 */ +/* enum: Issue flow control. */ +/* MC_CMD_FCNTL_GENERATE 0x5 */ +#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1 +/* Select which parameters to configure. A parameter will only be modified if + * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in + * capabilities then this field is ignored (and all flags are assumed to be + * set). + */ +#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0 +#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2 +#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4 +#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1 + /* MC_CMD_SET_MAC_OUT msgresponse */ #define MC_CMD_SET_MAC_OUT_LEN 0 +/* MC_CMD_SET_MAC_V2_OUT msgresponse */ +#define MC_CMD_SET_MAC_V2_OUT_LEN 4 +/* MTU as configured after processing the request. See comment at + * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL + * to 0. + */ +#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0 + /***********************************/ /* MC_CMD_PHY_STATS @@ -3521,6 +3617,26 @@ #define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16 #define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20 +/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */ +#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28 +#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4 +#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8 +#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0 +#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7 +#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16 +#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20 +/* Writes must be multiples of this size. Added to support the MUM on Sorrento. + */ +#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24 + /***********************************/ /* MC_CMD_NVRAM_UPDATE_START @@ -3561,6 +3677,37 @@ /* amount to read in bytes */ #define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8 +/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */ +#define MC_CMD_NVRAM_READ_IN_V2_LEN 16 +#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0 +/* Enum values, see field(s): */ +/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */ +#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4 +/* amount to read in bytes */ +#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8 +/* Optional control info. If a partition is stored with an A/B versioning + * scheme (i.e. in more than one physical partition in NVRAM) the host can set + * this to control which underlying physical partition is used to read data + * from. This allows it to perform a read-modify-write-verify with the write + * lock continuously held by calling NVRAM_UPDATE_START, reading the old + * contents using MODE=TARGET_CURRENT, overwriting the old partition and then + * verifying by reading with MODE=TARGET_BACKUP. + */ +#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12 +/* enum: Same as omitting MODE: caller sees data in current partition unless it + * holds the write lock in which case it sees data in the partition it is + * updating. + */ +#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0 +/* enum: Read from the current partition of an A/B pair, even if holding the + * write lock. + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1 +/* enum: Read from the non-current (i.e. to be updated) partition of an A/B + * pair + */ +#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2 + /* MC_CMD_NVRAM_READ_OUT msgresponse */ #define MC_CMD_NVRAM_READ_OUT_LENMIN 1 #define MC_CMD_NVRAM_READ_OUT_LENMAX 252 @@ -3895,6 +4042,8 @@ #define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39 /* enum: CCOM AVREG 1v8 supply (external ADC): mV */ #define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a +/* enum: CCOM RTS temperature: degC */ +#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b /* enum: Not a sensor: reserved for the next page flag */ #define MC_CMD_SENSOR_PAGE1_NEXT 0x3f /* enum: controller internal temperature sensor voltage on master core @@ -3931,6 +4080,12 @@ #define MC_CMD_SENSOR_PHY0_VCC 0x4c /* enum: Voltage supplied to the QSFP #1 from their power supply: mV */ #define MC_CMD_SENSOR_PHY1_VCC 0x4d +/* enum: Controller die temperature (TDIODE): degC */ +#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e +/* enum: Board temperature (front): degC */ +#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f +/* enum: Board temperature (back): degC */ +#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */ #define MC_CMD_SENSOR_ENTRY_OFST 4 #define MC_CMD_SENSOR_ENTRY_LEN 8 @@ -4007,7 +4162,7 @@ /* MC_CMD_READ_SENSORS_EXT_IN msgrequest */ #define MC_CMD_READ_SENSORS_EXT_IN_LEN 12 -/* DMA address of host buffer for sensor readings */ +/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */ #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0 #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8 #define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0 @@ -4608,6 +4763,10 @@ * operations */ #define MC_CMD_MUM_OP_QSFP 0xc +/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage + * level) from MUM + */ +#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd /* MC_CMD_MUM_IN_NULL msgrequest */ #define MC_CMD_MUM_IN_NULL_LEN 4 @@ -4793,6 +4952,10 @@ #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0 #define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2 +#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1 /* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */ #define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8 @@ -4862,6 +5025,11 @@ #define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4 #define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8 +/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */ +#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4 +/* MUM cmd header */ +/* MC_CMD_MUM_IN_CMD_OFST 0 */ + /* MC_CMD_MUM_OUT msgresponse */ #define MC_CMD_MUM_OUT_LEN 0 @@ -5004,6 +5172,69 @@ #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4 #define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0 +/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num)) +/* Discrete (soldered) DDR resistor strap info */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16 +/* Number of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4 +/* Array of SODIMM info records */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8 +/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0 +/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1 +/* enum: Total number of SODIMM banks */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */ +/* enum: Values 5-15 are reserved for future usage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4 +/* enum: No module present */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0 +/* enum: Module present supported and powered on */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1 +/* enum: Module present but bad type */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2 +/* enum: Module present but incompatible voltage */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3 +/* enum: Module present but unknown SPD */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4 +/* enum: Module present but slot cannot support it */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5 +/* enum: Modules may or may not be present, but cannot establish contact by I2C + */ +#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52 +#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12 + /* MC_CMD_RESOURCE_SPECIFIER enum */ /* enum: Any */ #define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff @@ -5076,6 +5307,8 @@ #define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500 /* enum: Expansion ROM configuration data for port 0 */ #define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600 +/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */ +#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600 /* enum: Expansion ROM configuration data for port 1 */ #define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601 /* enum: Expansion ROM configuration data for port 2 */ @@ -5084,6 +5317,8 @@ #define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603 /* enum: Non-volatile log output partition */ #define NVRAM_PARTITION_TYPE_LOG 0x700 +/* enum: Non-volatile log output of second core on dual-core device */ +#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701 /* enum: Device state dump output partition */ #define NVRAM_PARTITION_TYPE_DUMP 0x800 /* enum: Application license key storage partition */ @@ -5116,6 +5351,20 @@ #define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05 /* enum: MUM fuses and lockbits partition. */ #define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06 +/* enum: UEFI expansion ROM if separate from PXE */ +#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00 +/* enum: Spare partition 0 */ +#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000 +/* enum: Spare partition 1 */ +#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100 +/* enum: Spare partition 2 */ +#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200 +/* enum: Spare partition 3 */ +#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300 +/* enum: Spare partition 4 */ +#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400 +/* enum: Spare partition 5 */ +#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500 /* enum: Start of reserved value range (firmware may use for any purpose) */ #define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00 /* enum: End of reserved value range (firmware may use for any purpose) */ @@ -5149,6 +5398,90 @@ #define LICENSED_APP_ID_ID_LBN 0 #define LICENSED_APP_ID_ID_WIDTH 32 +/* LICENSED_FEATURES structuredef */ +#define LICENSED_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_FEATURES_MASK_OFST 0 +#define LICENSED_FEATURES_MASK_LEN 8 +#define LICENSED_FEATURES_MASK_LO_OFST 0 +#define LICENSED_FEATURES_MASK_HI_OFST 4 +#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_PIO_LBN 1 +#define LICENSED_FEATURES_PIO_WIDTH 1 +#define LICENSED_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_FEATURES_CLOCK_LBN 3 +#define LICENSED_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9 +#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1 +#define LICENSED_FEATURES_MASK_LBN 0 +#define LICENSED_FEATURES_MASK_WIDTH 64 + +/* LICENSED_V3_APPS structuredef */ +#define LICENSED_V3_APPS_LEN 8 +/* Bitmask of licensed applications */ +#define LICENSED_V3_APPS_MASK_OFST 0 +#define LICENSED_V3_APPS_MASK_LEN 8 +#define LICENSED_V3_APPS_MASK_LO_OFST 0 +#define LICENSED_V3_APPS_MASK_HI_OFST 4 +#define LICENSED_V3_APPS_ONLOAD_LBN 0 +#define LICENSED_V3_APPS_ONLOAD_WIDTH 1 +#define LICENSED_V3_APPS_PTP_LBN 1 +#define LICENSED_V3_APPS_PTP_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2 +#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1 +#define LICENSED_V3_APPS_SOLARSECURE_LBN 3 +#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1 +#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4 +#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5 +#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6 +#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7 +#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1 +#define LICENSED_V3_APPS_MASK_LBN 0 +#define LICENSED_V3_APPS_MASK_WIDTH 64 + +/* LICENSED_V3_FEATURES structuredef */ +#define LICENSED_V3_FEATURES_LEN 8 +/* Bitmask of licensed firmware features */ +#define LICENSED_V3_FEATURES_MASK_OFST 0 +#define LICENSED_V3_FEATURES_MASK_LEN 8 +#define LICENSED_V3_FEATURES_MASK_LO_OFST 0 +#define LICENSED_V3_FEATURES_MASK_HI_OFST 4 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0 +#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1 +#define LICENSED_V3_FEATURES_PIO_LBN 1 +#define LICENSED_V3_FEATURES_PIO_WIDTH 1 +#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2 +#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1 +#define LICENSED_V3_FEATURES_CLOCK_LBN 3 +#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4 +#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5 +#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1 +#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6 +#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7 +#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8 +#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1 +#define LICENSED_V3_FEATURES_MASK_LBN 0 +#define LICENSED_V3_FEATURES_MASK_WIDTH 64 + /* TX_TIMESTAMP_EVENT structuredef */ #define TX_TIMESTAMP_EVENT_LEN 6 /* lower 16 bits of timestamp data */ @@ -5258,6 +5591,8 @@ #define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5 #define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6 +#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1 #define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20 /* enum: Disabled */ #define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0 @@ -5362,6 +5697,8 @@ #define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9 #define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10 +#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -5422,6 +5759,8 @@ #define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */ #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18 #define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19 +#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -5535,6 +5874,8 @@ #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11 #define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12 +#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1 /* Owner ID to use if in buffer mode (zero if physical) */ #define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20 /* The port ID associated with the v-adaptor which should contain this DMAQ. */ @@ -5747,6 +6088,46 @@ #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44 #define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64 +/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REQUEST_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size REPLY_BLOCK_SIZE. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20 +/* Must be a power of 2 */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24 +/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS + * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if + * host intends to complete proxied operations by using MC_CMD_PROXY_CMD. + */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32 +/* Must be a power of 2, or zero if this buffer is not provided */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36 +/* Applies to all three buffers */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40 +/* A bit mask defining which MCDI operations may be proxied */ +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64 +#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108 + /* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */ #define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0 @@ -6323,6 +6704,15 @@ * client */ #define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2 +/* enum: read properties relating to security rules (Medford-only; for use by + * SolarSecure apps, not directly by drivers. See SF-114946-SW.) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3 +/* enum: read the list of supported RX filter matches for VXLAN/NVGRE + * encapsulated frames, which follow a different match sequence to normal + * frames (Medford only) + */ +#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */ #define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8 @@ -6356,7 +6746,10 @@ /***********************************/ /* MC_CMD_PARSER_DISP_RW - * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging + * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging. + * Please note that this interface is only of use to debug tools which have + * knowledge of firmware and hardware data structures; nothing here is intended + * for use by normal driver code. */ #define MC_CMD_PARSER_DISP_RW 0xe5 @@ -6374,6 +6767,12 @@ #define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2 /* enum: Lookup engine (with requested metadata format) */ #define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3 +/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0 +/* enum: RX1 dispatcher CPU (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4 +/* enum: Miscellaneous other state (only valid for Medford) */ +#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5 /* identifies the type of operation requested */ #define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4 /* enum: read a word of DICPU DMEM or a LUE entry */ @@ -6382,8 +6781,12 @@ #define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1 /* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */ #define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2 -/* data memory address or LUE index */ +/* data memory address (DICPU targets) or LUE index (LUE targets) */ #define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8 +/* selector (for MISC_STATE target) */ +#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8 +/* enum: Port to datapath mapping */ +#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1 /* value to write (for DMEM writes) */ #define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12 /* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */ @@ -6408,6 +6811,12 @@ */ #define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20 #define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32 +/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */ +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4 +#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4 +#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */ +#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */ /***********************************/ @@ -7071,6 +7480,24 @@ #define MC_CMD_GET_CAPABILITIES_OUT_LEN 20 /* First word of flags. */ #define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 #define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 #define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13 @@ -7138,6 +7565,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 /* enum: RXDP Test firmware image 8 */ #define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b /* TxDPCPU firmware id. */ #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6 #define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2 @@ -7153,6 +7582,8 @@ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 /* enum: TXDP Test firmware image 2 */ #define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2 #define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0 @@ -7227,6 +7658,258 @@ /* Licensed capabilities */ #define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16 +/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */ +#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0 + +/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72 +/* First word of flags. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1 +/* RxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2 +/* enum: Standard RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0 +/* enum: Low latency RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1 +/* enum: Packed stream RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2 +/* enum: BIST RXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a +/* enum: RXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101 +/* enum: RXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102 +/* enum: RXDP Test firmware image 3 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103 +/* enum: RXDP Test firmware image 4 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104 +/* enum: RXDP Test firmware image 5 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105 +/* enum: RXDP Test firmware image 6 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106 +/* enum: RXDP Test firmware image 7 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107 +/* enum: RXDP Test firmware image 8 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108 +/* enum: RXDP Test firmware image 9 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b +/* TxDPCPU firmware id. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2 +/* enum: Standard TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0 +/* enum: Low latency TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1 +/* enum: High packet rate TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3 +/* enum: BIST TXDP firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d +/* enum: TXDP Test firmware image 1 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101 +/* enum: TXDP Test firmware image 2 */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102 +/* enum: TXDP CSR bus test firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial RX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: RX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant RX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +/* enum: Low latency RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 +/* enum: Packed stream RX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6 +/* enum: RX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* enum: RX PD firmware parsing but not filtering network overlay tunnel + * encapsulations (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4 +/* enum: reserved value - do not use (may indicate alternative interpretation + * of REV field in future) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0 +/* enum: Trivial TX PD firmware for early Huntington development (Huntington + * development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 +/* enum: TX PD firmware with approximately Siena-compatible behaviour + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 +/* enum: Virtual switching (full feature) TX PD production firmware */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3 +/* enum: siena_compat variant TX PD firmware using PM rather than MAC + * (Huntington development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */ +/* enum: TX PD firmware handling layer 2 only for high packet rate performance + * tests (Medford development only) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7 +/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe +/* Hardware capabilities of NIC */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12 +/* Licensed capabilities */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16 +/* Second word of flags. Not present on older firmware (check the length). */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1 +/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present + * on older firmware (check the length). + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2 +/* One byte per PF containing the number of the external port assigned to this + * PF, indexed by PF number. Special values indicate that a PF is either not + * present or not assigned. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff +/* enum: PF does not exist. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe +/* enum: PF does exist but is not assigned to any external port. */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd +/* enum: This value indicates that PF is assigned, but it cannot be expressed + * in this field. It is intended for a possible future situation where a more + * complex scheme of PFs to ports mapping is being used. The future driver + * should look for a new field supporting the new scheme. The current/old + * driver should treat this value as PF_NOT_ASSIGNED. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc +/* One byte per PF containing the number of its VFs, indexed by PF number. A + * special value indicates that a PF is not present. + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16 +/* enum: The caller is not permitted to access information on this PF. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */ +/* enum: PF does not exist. */ +/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */ +/* Number of VIs available for each external port */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4 +/* Size of RX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ RX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1 +/* Size of TX descriptor cache expressed as binary logarithm The actual size + * equals (2 ^ TX_DESC_CACHE_SIZE) + */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1 +/* Total number of available PIO buffers */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2 +/* Size of a single PIO buffer */ +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70 +#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2 + /***********************************/ /* MC_CMD_V2_EXTN @@ -7475,6 +8158,25 @@ /***********************************/ +/* MC_CMD_VSWITCH_QUERY + * read some config of v-switch. For now this command is an empty placeholder. + * It may be used to check if a v-switch is connected to a given EVB port (if + * not, then the command returns ENOENT). + */ +#define MC_CMD_VSWITCH_QUERY 0x63 + +#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VSWITCH_QUERY_IN msgrequest */ +#define MC_CMD_VSWITCH_QUERY_IN_LEN 4 +/* The port to which the v-switch is connected. */ +#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */ +#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0 + + +/***********************************/ /* MC_CMD_VPORT_ALLOC * allocate a v-port. */ @@ -7510,6 +8212,8 @@ #define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0 #define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1 +#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1 /* The number of VLAN tags to insert/remove. An error will be returned if * incompatible with the number of VLAN tags specified for the upstream * v-switch. @@ -7561,6 +8265,8 @@ #define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0 #define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1 +#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1 /* The number of VLAN tags to strip on receive */ #define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12 /* The number of VLAN tags to transparently insert/remove. */ @@ -7639,6 +8345,29 @@ /***********************************/ +/* MC_CMD_VADAPTOR_QUERY + * read some config of v-adaptor. + */ +#define MC_CMD_VADAPTOR_QUERY 0x61 + +#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */ +#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4 +/* The port to which the v-adaptor is connected. */ +#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0 + +/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */ +#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0 +/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */ +#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4 +/* The number of VLAN tags that may still be added */ +#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8 + + +/***********************************/ /* MC_CMD_EVB_PORT_ASSIGN * assign a port to a PCI function. */ @@ -7875,10 +8604,17 @@ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8 /* The handle of the RSS context */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0 -/* Hash control flags. The _EN bits are always supported. The _MODE bits only - * work when the firmware reports ADDITIONAL_RSS_MODES in - * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0. - * See the RSS_MODE structure for the meaning of the mode bits. +/* Hash control flags. The _EN bits are always supported, but new modes are + * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES: + * in this case, the MODE fields may be set to non-zero values, and will take + * effect regardless of the settings of the _EN flags. See the RSS_MODE + * structure for the meaning of the mode bits. Drivers must check the + * capability before trying to set any _MODE fields, as older firmware will + * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In + * the case where all the _MODE flags are zero, the _EN flags take effect, + * providing backward compatibility for existing drivers. (Setting all _MODE + * *and* all _EN flags to zero is valid, to disable RSS spreading for that + * particular packet type.) */ #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0 @@ -7923,11 +8659,18 @@ /* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8 -/* Hash control flags. If any _MODE bits are non-zero (which will only be true - * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be - * disregarded (but are guaranteed to be consistent with the _MODE bits if - * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was - * allocated). +/* Hash control flags. If all _MODE bits are zero (which will always be true + * for older firmware which does not report the ADDITIONAL_RSS_MODES + * capability), the _EN bits report the state. If any _MODE bits are non-zero + * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES) + * then the _EN bits should be disregarded, although the _MODE flags are + * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS + * context and in the case where the _EN flags were used in the SET. This + * provides backward compatibility: old drivers will not be attempting to + * derive any meaning from the _MODE bits (and can never set them to any value + * not representable by the _EN bits); new drivers can always determine the + * mode by looking only at the _MODE bits; the value returned by a GET can + * always be used for a SET regardless of old/new driver vs. old/new firmware. */ #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4 #define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0 @@ -8155,6 +8898,74 @@ /***********************************/ +/* MC_CMD_VPORT_RECONFIGURE + * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port + * has already been passed to another function (v-port's user), then that + * function will be reset before applying the changes. + */ +#define MC_CMD_VPORT_RECONFIGURE 0xeb + +#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */ +#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44 +/* The handle of the v-port */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0 +/* Flags requesting what should be changed. */ +#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1 +#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1 +/* The number of VLAN tags to insert/remove. An error will be returned if + * incompatible with the number of VLAN tags specified for the upstream + * v-switch. + */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8 +/* The actual VLAN tags to insert/remove */ +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16 +#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16 +/* The number of MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16 +/* MAC addresses to add */ +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6 +#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4 + +/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */ +#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4 +#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0 +#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1 + + +/***********************************/ +/* MC_CMD_EVB_PORT_QUERY + * read some config of v-port. + */ +#define MC_CMD_EVB_PORT_QUERY 0x62 + +#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */ +#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4 +/* The handle of the v-port */ +#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0 + +/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */ +#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8 +/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */ +#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0 +/* The number of VLAN tags that may be used on a v-adaptor connected to this + * EVB port. + */ +#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4 + + +/***********************************/ /* MC_CMD_DUMP_BUFTBL_ENTRIES * Dump buffer table entries, mainly for command client debug use. Dumps * absolute entries, and does not use chunk handles. All entries must be in @@ -8196,6 +9007,14 @@ #define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0 #define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1 +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2 +/* enum: pad to 64 bytes */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0 +/* enum: pad to 128 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1 +/* enum: pad to 256 bytes (Medford only) */ +#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2 /* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */ #define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0 @@ -8217,6 +9036,10 @@ #define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0 #define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1 +#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2 +/* Enum values, see field(s): */ +/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */ /***********************************/ @@ -8788,32 +9611,38 @@ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: Attenuation (0-15, TBD for Medford) */ +/* enum: Attenuation (0-15, Huntington) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0 -/* enum: CTLE Boost (0-15, TBD for Medford) */ +/* enum: CTLE Boost (0-15, Huntington) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1 -/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD - * for Medford) +/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max + * positive, Medford - 0-31) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2 -/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-31) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3 -/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4 -/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5 -/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for - * Medford) +/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max + * positive, Medford - 0-16) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6 -/* enum: Edge DFE DLEV (TBD for Medford) */ +/* enum: Edge DFE DLEV (0-128 for Medford) */ #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7 +/* enum: Variable Gain Amplifier (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8 +/* enum: CTLE EQ Capacitor (0-15, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (0-7, Medford) */ +#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -8885,26 +9714,32 @@ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8 -/* enum: TX Amplitude */ +/* enum: TX Amplitude (Huntington, Medford) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0 -/* enum: De-Emphasis Tap1 Magnitude (0-7) */ +/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1 /* enum: De-Emphasis Tap1 Fine */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2 -/* enum: De-Emphasis Tap2 Magnitude (0-6) */ +/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3 -/* enum: De-Emphasis Tap2 Fine */ +/* enum: De-Emphasis Tap2 Fine (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4 -/* enum: Pre-Emphasis Magnitude */ +/* enum: Pre-Emphasis Magnitude (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5 -/* enum: Pre-Emphasis Fine */ +/* enum: Pre-Emphasis Fine (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6 -/* enum: TX Slew Rate Coarse control */ +/* enum: TX Slew Rate Coarse control (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7 -/* enum: TX Slew Rate Fine control */ +/* enum: TX Slew Rate Fine control (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8 -/* enum: TX Termination Impedance control */ +/* enum: TX Termination Impedance control (Huntington) */ #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9 +/* enum: TX Amplitude Fine control (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa +/* enum: Pre-shoot Tap (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb +/* enum: De-emphasis Tap (Medford) */ +#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3 #define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */ @@ -9086,8 +9921,16 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5 /* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6 +/* enum: DFE DLev */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7 +/* enum: Figure of Merit */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8 +/* enum: CTLE EQ Capacitor (HF Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9 +/* enum: CTLE EQ Resistor (DC Gain) */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */ @@ -9096,12 +9939,57 @@ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */ #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */ -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12 -#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */ +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24 #define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8 +/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num)) +/* Requested operation */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1 +/* Align the arguments to 32 bits */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3 +/* RXEQ Parameter */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5 +/* Enum values, see field(s): */ +/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24 +#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8 + +/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */ +#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0 + /* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */ #define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4 /* Requested operation */ @@ -9176,6 +10064,7 @@ /***********************************/ /* MC_CMD_LICENSING * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - not used for V3 licensing */ #define MC_CMD_LICENSING 0xf3 @@ -9220,6 +10109,93 @@ /***********************************/ +/* MC_CMD_LICENSING_V3 + * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition + * - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_V3 0xd0 + +#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_V3_IN msgrequest */ +#define MC_CMD_LICENSING_V3_IN_LEN 4 +/* identifies the type of operation requested */ +#define MC_CMD_LICENSING_V3_IN_OP_OFST 0 +/* enum: re-read and apply licenses after a license key partition update; note + * that this operation returns a zero-length response + */ +#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0 +/* enum: report counts of installed licenses */ +#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1 + +/* MC_CMD_LICENSING_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_V3_OUT_LEN 88 +/* count of keys which are valid */ +#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0 +/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with + * MC_CMD_FC_OP_LICENSE) + */ +#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4 +/* count of keys which are invalid due to being unverifiable */ +#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8 +/* count of keys which are invalid due to being for the wrong node */ +#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12 +/* licensing state (for diagnostics; the exact meaning of the bits in this + * field are private to the firmware) + */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16 +/* licensing subsystem self-test report (for manftest) */ +#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20 +/* enum: licensing subsystem self-test failed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0 +/* enum: licensing subsystem self-test passed */ +#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1 +/* bitmask of licensed applications */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24 +/* bitmask of licensed features */ +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56 +#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60 +/* reserved for future use */ +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64 +#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24 + + +/***********************************/ +/* MC_CMD_LICENSING_GET_ID_V3 + * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license + * partition - V3 licensing (Medford) + */ +#define MC_CMD_LICENSING_GET_ID_V3 0xd1 + +#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */ +#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0 + +/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num)) +/* type of license (eg 3) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0 +/* length of the license ID (in bytes) */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4 +/* the unique license ID of the adapter */ +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0 +#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244 + + +/***********************************/ /* MC_CMD_MC2MC_PROXY * Execute an arbitrary MCDI command on the slave MC of a dual-core device. * This will fail on a single-core system. @@ -9239,7 +10215,7 @@ /* MC_CMD_GET_LICENSED_APP_STATE * Query the state of an individual licensed application. (Note that the actual * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation - * or a reboot of the MC.) + * or a reboot of the MC.) Not used for V3 licensing */ #define MC_CMD_GET_LICENSED_APP_STATE 0xf5 @@ -9261,8 +10237,68 @@ /***********************************/ +/* MC_CMD_GET_LICENSED_V3_APP_STATE + * Query the state of an individual licensed application. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2 + +#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8 +/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit + * mask + */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4 +/* state of this application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0 +/* enum: no (or invalid) license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0 +/* enum: a valid license is present for the application */ +#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1 + + +/***********************************/ +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES + * Query the state of an one or more licensed features. (Note that the actual + * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE + * operation or a reboot of the MC.) Used for V3 licensing (Medford) + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3 + +#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8 +/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or + * more bits set + */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4 + +/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8 +/* states of these features - bit set for licensed, clear for not licensed */ +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0 +#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4 + + +/***********************************/ /* MC_CMD_LICENSED_APP_OP - * Perform an action for an individual licensed application. + * Perform an action for an individual licensed application - not used for V3 + * licensing. */ #define MC_CMD_LICENSED_APP_OP 0xf6 @@ -9328,6 +10364,67 @@ /***********************************/ +/* MC_CMD_LICENSED_V3_VALIDATE_APP + * Perform validation for an individual licensed application - V3 licensing + * (Medford) + */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4 + +#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72 +/* application ID expressed as a single bit mask */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4 +/* challenge for validation */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64 + +/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72 +/* application expiry time */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0 +/* application expiry units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4 +/* enum: expiry units are accounting units */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0 +/* enum: expiry units are calendar days */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1 +/* validation response to challenge */ +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8 +#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64 + + +/***********************************/ +/* MC_CMD_LICENSED_V3_MASK_FEATURES + * Mask features - V3 licensing (Medford) + */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5 + +#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12 +/* mask to be applied to features to be changed */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0 +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4 +/* whether to turn on or turn off the masked features */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8 +/* enum: turn the features off */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0 +/* enum: turn the features back on */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1 + +/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */ +#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0 + + +/***********************************/ /* MC_CMD_SET_PORT_SNIFF_CONFIG * Configure RX port sniffing for the physical port associated with the calling * function. Only a privileged function may change the port sniffing @@ -9696,12 +10793,27 @@ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */ -#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */ +/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 #define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */ #define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */ +/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC + * adress. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800 +/* enum: Privilege that allows a Function to change the MAC address configured + * in its associated vAdapter/vPort. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000 +/* enum: Privilege that allows a Function to install filters that specify VLANs + * that are not in the permit list for the associated vPort. This privilege is + * primarily to support ESX where vPorts are created that restrict traffic to + * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT. + */ +#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000 /* enum: Set this bit to indicate that a new privilege mask is to be set, * otherwise the command will only read the existing mask. */ @@ -9951,7 +11063,7 @@ /* Sector type */ #define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4 /* Enum values, see field(s): */ -/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ +/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */ /* Sector size */ #define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8 /* Sector data */ @@ -10067,4 +11179,123 @@ #define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0 +/***********************************/ +/* MC_CMD_EXEC_SIGNED + * Check the CMAC of the contents of IMEM and DMEM against the value supplied + * and if correct begin execution from the start of IMEM. The caller supplies a + * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC + * computation runs from the start of IMEM, and from the start of DMEM + 16k, + * to match flash booting. The command will respond with EINVAL if the CMAC + * does match, otherwise it will respond with success before it jumps to IMEM. + */ +#define MC_CMD_EXEC_SIGNED 0x10c + +#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_EXEC_SIGNED_IN msgrequest */ +#define MC_CMD_EXEC_SIGNED_IN_LEN 28 +/* the length of code to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0 +/* the length of date to include in the CMAC */ +#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4 +/* the XPM sector containing the key to use */ +#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8 +/* the expected CMAC value */ +#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12 +#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16 + +/* MC_CMD_EXEC_SIGNED_OUT msgresponse */ +#define MC_CMD_EXEC_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_PREPARE_SIGNED + * Prepare to upload a signed image. This will scrub the specified length of + * the data region, which must be at least as large as the DATALEN supplied to + * MC_CMD_EXEC_SIGNED. + */ +#define MC_CMD_PREPARE_SIGNED 0x10d + +#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_PREPARE_SIGNED_IN msgrequest */ +#define MC_CMD_PREPARE_SIGNED_IN_LEN 4 +/* the length of data area to clear */ +#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0 + +/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */ +#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0 + + +/***********************************/ +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS + * Configure UDP ports for tunnel encapsulation hardware acceleration. The + * parser-dispatcher will attempt to parse traffic on these ports as tunnel + * encapsulation PDUs and filter them using the tunnel encapsulation filter + * chain rather than the standard filter chain. Note that this command can + * cause all functions to see a reset. (Available on Medford only.) + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117 + +#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num)) +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1 +/* The number of entries in the ENTRIES array */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2 +/* Entries defining the UDP port to protocol mapping, each laid out as a + * TUNNEL_ENCAP_UDP_PORT_ENTRY + */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16 + +/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2 +/* Flags */ +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0 +#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1 + + +/***********************************/ +/* MC_CMD_RX_BALANCING + * Configure a port upconverter to distribute the packets on both RX engines. + * Packets are distributed based on a table with the destination vFIFO. The + * index of the table is a hash of source and destination of IPV4 and VLAN + * priority. + */ +#define MC_CMD_RX_BALANCING 0x118 + +#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN + +/* MC_CMD_RX_BALANCING_IN msgrequest */ +#define MC_CMD_RX_BALANCING_IN_LEN 4 +/* The RX port whose upconverter table will be modified */ +#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0 +#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1 +/* The VLAN priority associated to the table index and vFIFO */ +#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1 +#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1 +/* The resulting bit of SRC^DST for indexing the table */ +#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2 +#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1 +/* The RX engine to which the vFIFO in the table entry will point to */ +#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3 +#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1 + +/* MC_CMD_RX_BALANCING_OUT msgresponse */ +#define MC_CMD_RX_BALANCING_OUT_LEN 0 + + #endif /* MCDI_PCOL_H */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index d13ddf970..9ff062a36 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -868,6 +868,7 @@ struct vfdi_status; * be held to modify it. * @port_initialized: Port initialized? * @net_dev: Operating system network device. Consider holding the rtnl lock + * @fixed_features: Features which cannot be turned off * @stats_buffer: DMA buffer for statistics * @phy_type: PHY type * @phy_op: PHY interface @@ -916,7 +917,6 @@ struct vfdi_status; * @stats_lock: Statistics update lock. Must be held when calling * efx_nic_type::{update,start,stop}_stats. * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb - * @mc_promisc: Whether in multicast promiscuous mode when last changed * * This is stored in the private area of the &struct net_device. */ @@ -1008,6 +1008,8 @@ struct efx_nic { bool port_initialized; struct net_device *net_dev; + netdev_features_t fixed_features; + struct efx_buffer stats_buffer; u64 rx_nodesc_drops_total; u64 rx_nodesc_drops_while_down; @@ -1065,7 +1067,6 @@ struct efx_nic { int last_irq_cpu; spinlock_t stats_lock; atomic_t n_rx_noskb_drops; - bool mc_promisc; }; static inline int efx_dev_registered(struct efx_nic *efx) @@ -1333,6 +1334,8 @@ struct efx_nic_type { int (*ptp_set_ts_config)(struct efx_nic *efx, struct hwtstamp_config *init); int (*sriov_configure)(struct efx_nic *efx, int num_vfs); + int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid); + int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid); int (*sriov_init)(struct efx_nic *efx); void (*sriov_fini)(struct efx_nic *efx); bool (*sriov_wanted)(struct efx_nic *efx); @@ -1521,4 +1524,16 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } +/* Get all supported features. + * If a feature is not fixed, it is present in hw_features. + * If a feature is fixed, it does not present in hw_features, but + * always in features. + */ +static inline netdev_features_t efx_supported_features(const struct efx_nic *efx) +{ + const struct net_device *net_dev = efx->net_dev; + + return net_dev->features | net_dev->hw_features; +} + #endif /* EFX_NET_DRIVER_H */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 0b536e27d..96944c3c9 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -519,6 +519,9 @@ enum { #ifdef CONFIG_SFC_SRIOV * @vf: Pointer to VF data structure #endif + * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero + * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock. + * @vlan_lock: Lock to serialize access to vlan_list. */ struct efx_ef10_nic_data { struct efx_buffer mcdi_buf; @@ -550,6 +553,8 @@ struct efx_ef10_nic_data { struct ef10_vf *vf; #endif u8 vport_mac[ETH_ALEN]; + struct list_head vlan_list; + struct mutex vlan_lock; }; int efx_init_sriov(void); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index b69d0e1e8..503a3b6dc 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2195,6 +2195,12 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device * } } +static const struct acpi_device_id smc91x_acpi_match[] = { + { "LNRO0003", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match); + #if IS_BUILTIN(CONFIG_OF) static const struct of_device_id smc91x_match[] = { { .compatible = "smsc,lan91c94", }, @@ -2281,7 +2287,6 @@ static int smc_drv_probe(struct platform_device *pdev) #if IS_BUILTIN(CONFIG_OF) match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev); if (match) { - struct device_node *np = pdev->dev.of_node; u32 val; /* Optional pwrdwn GPIO configured? */ @@ -2307,7 +2312,8 @@ static int smc_drv_probe(struct platform_device *pdev) usleep_range(750, 1000); /* Combination of IO widths supported, default to 16-bit */ - if (!of_property_read_u32(np, "reg-io-width", &val)) { + if (!device_property_read_u32(&pdev->dev, "reg-io-width", + &val)) { if (val & 1) lp->cfg.flags |= SMC91X_USE_8BIT; if ((val == 0) || (val & 2)) @@ -2485,7 +2491,8 @@ static struct platform_driver smc_driver = { .driver = { .name = CARDNAME, .pm = &smc_drv_pm_ops, - .of_match_table = of_match_ptr(smc91x_match), + .of_match_table = of_match_ptr(smc91x_match), + .acpi_match_table = smc91x_acpi_match, }, }; diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index e17671c9d..ea8465467 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -470,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, #endif #if ! SMC_CAN_USE_8BIT +#undef SMC_inb #define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) +#undef SMC_outb #define SMC_outb(x, ioaddr, reg) BUG() #define SMC_insb(a, r, p, l) BUG() #define SMC_outsb(a, r, p, l) BUG() diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index b5ab5e120..4f8910b7d 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -114,7 +114,6 @@ struct smsc911x_data { /* spinlock to ensure register accesses are serialised */ spinlock_t dev_lock; - struct phy_device *phy_dev; struct mii_bus *mii_bus; unsigned int using_extphy; int last_duplex; @@ -833,7 +832,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata) static int smsc911x_phy_loopbacktest(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; + struct phy_device *phy_dev = dev->phydev; int result = -EIO; unsigned int i, val; unsigned long flags; @@ -903,7 +902,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev) static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) { - struct phy_device *phy_dev = pdata->phy_dev; + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; u32 afc = smsc911x_reg_read(pdata, AFC_CFG); u32 flow; unsigned long flags; @@ -944,7 +944,7 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata) static void smsc911x_phy_adjust_link(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; + struct phy_device *phy_dev = dev->phydev; unsigned long flags; int carrier; @@ -1037,7 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev) SUPPORTED_Asym_Pause); phydev->advertising = phydev->supported; - pdata->phy_dev = phydev; pdata->last_duplex = -1; pdata->last_carrier = -1; @@ -1100,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev, goto err_out_free_bus_2; } - if (smsc911x_mii_probe(dev) < 0) { - SMSC_WARN(pdata, probe, "Error registering mii bus"); - goto err_out_unregister_bus_3; - } - return 0; -err_out_unregister_bus_3: - mdiobus_unregister(pdata->mii_bus); err_out_free_bus_2: mdiobus_free(pdata->mii_bus); err_out_1: @@ -1338,9 +1330,11 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata) static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) { + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; int rc = 0; - if (!pdata->phy_dev) + if (!phy_dev) return rc; /* If the internal PHY is in General Power-Down mode, all, except the @@ -1350,7 +1344,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) * In that case, clear the bit 0.11, so the PHY powers up and we can * access to the phy registers. */ - rc = phy_read(pdata->phy_dev, MII_BMCR); + rc = phy_read(phy_dev, MII_BMCR); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); return rc; @@ -1360,7 +1354,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) * disable the general power down-mode. */ if (rc & BMCR_PDOWN) { - rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); + rc = phy_write(phy_dev, MII_BMCR, rc & ~BMCR_PDOWN); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); return rc; @@ -1374,12 +1368,14 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata) static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) { + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; int rc = 0; - if (!pdata->phy_dev) + if (!phy_dev) return rc; - rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); @@ -1389,7 +1385,7 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) /* Only disable if energy detect mode is already enabled */ if (rc & MII_LAN83C185_EDPWRDOWN) { /* Disable energy detect mode for this SMSC Transceivers */ - rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS, rc & (~MII_LAN83C185_EDPWRDOWN)); if (rc < 0) { @@ -1405,12 +1401,14 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) { + struct net_device *ndev = pdata->dev; + struct phy_device *phy_dev = ndev->phydev; int rc = 0; - if (!pdata->phy_dev) + if (!phy_dev) return rc; - rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS); + rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) { SMSC_WARN(pdata, drv, "Failed reading PHY control reg"); @@ -1420,7 +1418,7 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata) /* Only enable if energy detect mode is already disabled */ if (!(rc & MII_LAN83C185_EDPWRDOWN)) { /* Enable energy detect mode for this SMSC Transceivers */ - rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, + rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); if (rc < 0) { @@ -1509,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev) smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); } +static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct smsc911x_data *pdata = netdev_priv(dev); + u32 intsts = smsc911x_reg_read(pdata, INT_STS); + u32 inten = smsc911x_reg_read(pdata, INT_EN); + int serviced = IRQ_NONE; + u32 temp; + + if (unlikely(intsts & inten & INT_STS_SW_INT_)) { + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_SW_INT_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); + pdata->software_irq_signal = 1; + smp_wmb(); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { + /* Called when there is a multicast update scheduled and + * it is now safe to complete the update */ + SMSC_TRACE(pdata, intr, "RX Stop interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); + if (pdata->multicast_update_pending) + smsc911x_rx_multicast_update_workaround(pdata); + serviced = IRQ_HANDLED; + } + + if (intsts & inten & INT_STS_TDFA_) { + temp = smsc911x_reg_read(pdata, FIFO_INT); + temp |= FIFO_INT_TX_AVAIL_LEVEL_; + smsc911x_reg_write(pdata, FIFO_INT, temp); + smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); + netif_wake_queue(dev); + serviced = IRQ_HANDLED; + } + + if (unlikely(intsts & inten & INT_STS_RXE_)) { + SMSC_TRACE(pdata, intr, "RX Error interrupt"); + smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); + serviced = IRQ_HANDLED; + } + + if (likely(intsts & inten & INT_STS_RSFL_)) { + if (likely(napi_schedule_prep(&pdata->napi))) { + /* Disable Rx interrupts */ + temp = smsc911x_reg_read(pdata, INT_EN); + temp &= (~INT_EN_RSFL_EN_); + smsc911x_reg_write(pdata, INT_EN, temp); + /* Schedule a NAPI poll */ + __napi_schedule(&pdata->napi); + } else { + SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); + } + serviced = IRQ_HANDLED; + } + + return serviced; +} + static int smsc911x_open(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); unsigned int timeout; unsigned int temp; unsigned int intcfg; + int retval; + int irq_flags; - /* if the phy is not yet registered, retry later*/ - if (!pdata->phy_dev) { - SMSC_WARN(pdata, hw, "phy_dev is NULL"); - return -EAGAIN; + /* find and start the given phy */ + if (!dev->phydev) { + retval = smsc911x_mii_probe(dev); + if (retval < 0) { + SMSC_WARN(pdata, probe, "Error starting phy"); + goto out; + } } /* Reset the LAN911x */ - if (smsc911x_soft_reset(pdata)) { + retval = smsc911x_soft_reset(pdata); + if (retval) { SMSC_WARN(pdata, hw, "soft reset failed"); - return -EIO; + goto mii_free_out; } smsc911x_reg_write(pdata, HW_CFG, 0x00050000); @@ -1581,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev) pdata->software_irq_signal = 0; smp_wmb(); + irq_flags = irq_get_trigger_type(dev->irq); + retval = request_irq(dev->irq, smsc911x_irqhandler, + irq_flags | IRQF_SHARED, dev->name, dev); + if (retval) { + SMSC_WARN(pdata, probe, + "Unable to claim requested irq: %d", dev->irq); + goto mii_free_out; + } + temp = smsc911x_reg_read(pdata, INT_EN); temp |= INT_EN_SW_INT_EN_; smsc911x_reg_write(pdata, INT_EN, temp); @@ -1595,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev) if (!pdata->software_irq_signal) { netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n", dev->irq); - return -ENODEV; + retval = -ENODEV; + goto irq_stop_out; } SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d", dev->irq); @@ -1608,7 +1683,7 @@ static int smsc911x_open(struct net_device *dev) pdata->last_carrier = -1; /* Bring the PHY up */ - phy_start(pdata->phy_dev); + phy_start(dev->phydev); temp = smsc911x_reg_read(pdata, HW_CFG); /* Preserve TX FIFO size and external PHY configuration */ @@ -1641,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev) netif_start_queue(dev); return 0; + +irq_stop_out: + free_irq(dev->irq, dev); +mii_free_out: + phy_disconnect(dev->phydev); + dev->phydev = NULL; +out: + return retval; } /* Entry point for stopping the interface */ @@ -1662,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev) dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP); smsc911x_tx_update_txcounters(dev); + free_irq(dev->irq, dev); + /* Bring the PHY down */ - if (pdata->phy_dev) - phy_stop(pdata->phy_dev); + if (dev->phydev) { + phy_stop(dev->phydev); + phy_disconnect(dev->phydev); + dev->phydev = NULL; + } + netif_carrier_off(dev); SMSC_TRACE(pdata, ifdown, "Interface stopped"); return 0; @@ -1806,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev) spin_unlock_irqrestore(&pdata->mac_lock, flags); } -static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct smsc911x_data *pdata = netdev_priv(dev); - u32 intsts = smsc911x_reg_read(pdata, INT_STS); - u32 inten = smsc911x_reg_read(pdata, INT_EN); - int serviced = IRQ_NONE; - u32 temp; - - if (unlikely(intsts & inten & INT_STS_SW_INT_)) { - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_SW_INT_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_); - pdata->software_irq_signal = 1; - smp_wmb(); - serviced = IRQ_HANDLED; - } - - if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) { - /* Called when there is a multicast update scheduled and - * it is now safe to complete the update */ - SMSC_TRACE(pdata, intr, "RX Stop interrupt"); - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_); - if (pdata->multicast_update_pending) - smsc911x_rx_multicast_update_workaround(pdata); - serviced = IRQ_HANDLED; - } - - if (intsts & inten & INT_STS_TDFA_) { - temp = smsc911x_reg_read(pdata, FIFO_INT); - temp |= FIFO_INT_TX_AVAIL_LEVEL_; - smsc911x_reg_write(pdata, FIFO_INT, temp); - smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_); - netif_wake_queue(dev); - serviced = IRQ_HANDLED; - } - - if (unlikely(intsts & inten & INT_STS_RXE_)) { - SMSC_TRACE(pdata, intr, "RX Error interrupt"); - smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_); - serviced = IRQ_HANDLED; - } - - if (likely(intsts & inten & INT_STS_RSFL_)) { - if (likely(napi_schedule_prep(&pdata->napi))) { - /* Disable Rx interrupts */ - temp = smsc911x_reg_read(pdata, INT_EN); - temp &= (~INT_EN_RSFL_EN_); - smsc911x_reg_write(pdata, INT_EN, temp); - /* Schedule a NAPI poll */ - __napi_schedule(&pdata->napi); - } else { - SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed"); - } - serviced = IRQ_HANDLED; - } - - return serviced; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void smsc911x_poll_controller(struct net_device *dev) { @@ -1904,30 +1932,10 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p) /* Standard ioctls for mii-tool */ static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct smsc911x_data *pdata = netdev_priv(dev); - - if (!netif_running(dev) || !pdata->phy_dev) + if (!netif_running(dev) || !dev->phydev) return -EINVAL; - return phy_mii_ioctl(pdata->phy_dev, ifr, cmd); -} - -static int -smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - return phy_ethtool_gset(pdata->phy_dev, cmd); -} - -static int -smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - return phy_ethtool_sset(pdata->phy_dev, cmd); + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, @@ -1941,9 +1949,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev, static int smsc911x_ethtool_nwayreset(struct net_device *dev) { - struct smsc911x_data *pdata = netdev_priv(dev); - - return phy_start_aneg(pdata->phy_dev); + return phy_start_aneg(dev->phydev); } static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev) @@ -1969,7 +1975,7 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct smsc911x_data *pdata = netdev_priv(dev); - struct phy_device *phy_dev = pdata->phy_dev; + struct phy_device *phy_dev = dev->phydev; unsigned long flags; unsigned int i; unsigned int j = 0; @@ -2115,8 +2121,6 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev, } static const struct ethtool_ops smsc911x_ethtool_ops = { - .get_settings = smsc911x_ethtool_getsettings, - .set_settings = smsc911x_ethtool_setsettings, .get_link = ethtool_op_get_link, .get_drvinfo = smsc911x_ethtool_getdrvinfo, .nway_reset = smsc911x_ethtool_nwayreset, @@ -2128,6 +2132,8 @@ static const struct ethtool_ops smsc911x_ethtool_ops = { .get_eeprom = smsc911x_ethtool_get_eeprom, .set_eeprom = smsc911x_ethtool_set_eeprom, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static const struct net_device_ops smsc911x_netdev_ops = { @@ -2308,17 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev) pdata = netdev_priv(dev); BUG_ON(!pdata); BUG_ON(!pdata->ioaddr); - BUG_ON(!pdata->phy_dev); + WARN_ON(dev->phydev); SMSC_TRACE(pdata, ifdown, "Stopping driver"); - phy_disconnect(pdata->phy_dev); - pdata->phy_dev = NULL; mdiobus_unregister(pdata->mii_bus); mdiobus_free(pdata->mii_bus); unregister_netdev(dev); - free_irq(dev->irq, dev); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smsc911x-memory"); if (!res) @@ -2403,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev) struct smsc911x_data *pdata; struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev); struct resource *res; - unsigned int intcfg = 0; - int res_size, irq, irq_flags; + int res_size, irq; int retval; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -2443,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) pdata = netdev_priv(dev); dev->irq = irq; - irq_flags = irq_get_trigger_type(irq); pdata->ioaddr = ioremap_nocache(res->start, res_size); pdata->dev = dev; @@ -2490,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev) if (retval < 0) goto out_disable_resources; - /* configure irq polarity and type before connecting isr */ - if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH) - intcfg |= INT_CFG_IRQ_POL_; - - if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL) - intcfg |= INT_CFG_IRQ_TYPE_; - - smsc911x_reg_write(pdata, INT_CFG, intcfg); - - /* Ensure interrupts are globally disabled before connecting ISR */ - smsc911x_disable_irq_chip(dev); + netif_carrier_off(dev); - retval = request_irq(dev->irq, smsc911x_irqhandler, - irq_flags | IRQF_SHARED, dev->name, dev); + retval = smsc911x_mii_init(pdev, dev); if (retval) { - SMSC_WARN(pdata, probe, - "Unable to claim requested irq: %d", dev->irq); + SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); goto out_disable_resources; } - netif_carrier_off(dev); - retval = register_netdev(dev); if (retval) { SMSC_WARN(pdata, probe, "Error %i registering device", retval); - goto out_free_irq; + goto out_disable_resources; } else { SMSC_TRACE(pdata, probe, "Network interface: \"%s\"", dev->name); } - retval = smsc911x_mii_init(pdev, dev); - if (retval) { - SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); - goto out_unregister_netdev_5; - } - spin_lock_irq(&pdata->mac_lock); /* Check if mac address has been specified when bringing interface up */ @@ -2562,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev) return 0; -out_unregister_netdev_5: - unregister_netdev(dev); -out_free_irq: - free_irq(dev->irq, dev); out_disable_resources: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 8594b9e8b..b7bfed4bc 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -76,7 +76,6 @@ struct smsc9420_pdata { bool rx_csum; u32 msg_enable; - struct phy_device *phy_dev; struct mii_bus *mii_bus; int last_duplex; int last_carrier; @@ -226,36 +225,10 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd) /* Standard ioctls for mii-tool */ static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!netif_running(dev) || !pd->phy_dev) + if (!netif_running(dev) || !dev->phydev) return -EINVAL; - return phy_mii_ioctl(pd->phy_dev, ifr, cmd); -} - -static int smsc9420_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!pd->phy_dev) - return -ENODEV; - - cmd->maxtxpkt = 1; - cmd->maxrxpkt = 1; - return phy_ethtool_gset(pd->phy_dev, cmd); -} - -static int smsc9420_ethtool_set_settings(struct net_device *dev, - struct ethtool_cmd *cmd) -{ - struct smsc9420_pdata *pd = netdev_priv(dev); - - if (!pd->phy_dev) - return -ENODEV; - - return phy_ethtool_sset(pd->phy_dev, cmd); + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev, @@ -283,12 +256,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data) static int smsc9420_ethtool_nway_reset(struct net_device *netdev) { - struct smsc9420_pdata *pd = netdev_priv(netdev); - - if (!pd->phy_dev) + if (!netdev->phydev) return -ENODEV; - return phy_start_aneg(pd->phy_dev); + return phy_start_aneg(netdev->phydev); } static int smsc9420_ethtool_getregslen(struct net_device *dev) @@ -302,7 +273,7 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs, void *buf) { struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phy_dev = pd->phy_dev; + struct phy_device *phy_dev = dev->phydev; unsigned int i, j = 0; u32 *data = buf; @@ -443,8 +414,6 @@ static int smsc9420_ethtool_set_eeprom(struct net_device *dev, } static const struct ethtool_ops smsc9420_ethtool_ops = { - .get_settings = smsc9420_ethtool_get_settings, - .set_settings = smsc9420_ethtool_set_settings, .get_drvinfo = smsc9420_ethtool_get_drvinfo, .get_msglevel = smsc9420_ethtool_get_msglevel, .set_msglevel = smsc9420_ethtool_set_msglevel, @@ -456,6 +425,8 @@ static const struct ethtool_ops smsc9420_ethtool_ops = { .get_regs_len = smsc9420_ethtool_getregslen, .get_regs = smsc9420_ethtool_getregs, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /* Sets the device MAC address to dev_addr */ @@ -736,7 +707,7 @@ static int smsc9420_stop(struct net_device *dev) ulong flags; BUG_ON(!pd); - BUG_ON(!pd->phy_dev); + BUG_ON(!dev->phydev); /* disable master interrupt */ spin_lock_irqsave(&pd->int_lock, flags); @@ -757,10 +728,9 @@ static int smsc9420_stop(struct net_device *dev) smsc9420_dmac_soft_reset(pd); - phy_stop(pd->phy_dev); + phy_stop(dev->phydev); - phy_disconnect(pd->phy_dev); - pd->phy_dev = NULL; + phy_disconnect(dev->phydev); mdiobus_unregister(pd->mii_bus); mdiobus_free(pd->mii_bus); @@ -1093,7 +1063,8 @@ static void smsc9420_set_multicast_list(struct net_device *dev) static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) { - struct phy_device *phy_dev = pd->phy_dev; + struct net_device *dev = pd->dev; + struct phy_device *phy_dev = dev->phydev; u32 flow; if (phy_dev->duplex == DUPLEX_FULL) { @@ -1122,7 +1093,7 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd) static void smsc9420_phy_adjust_link(struct net_device *dev) { struct smsc9420_pdata *pd = netdev_priv(dev); - struct phy_device *phy_dev = pd->phy_dev; + struct phy_device *phy_dev = dev->phydev; int carrier; if (phy_dev->duplex != pd->last_duplex) { @@ -1155,7 +1126,7 @@ static int smsc9420_mii_probe(struct net_device *dev) struct smsc9420_pdata *pd = netdev_priv(dev); struct phy_device *phydev = NULL; - BUG_ON(pd->phy_dev); + BUG_ON(dev->phydev); /* Device only supports internal PHY at address 1 */ phydev = mdiobus_get_phy(pd->mii_bus, 1); @@ -1179,7 +1150,6 @@ static int smsc9420_mii_probe(struct net_device *dev) phy_attached_info(phydev); - pd->phy_dev = phydev; pd->last_duplex = -1; pd->last_carrier = -1; @@ -1440,7 +1410,7 @@ static int smsc9420_open(struct net_device *dev) } /* Bring the PHY up */ - phy_start(pd->phy_dev); + phy_start(dev->phydev); napi_enable(&pd->napi); diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index cec147d1d..8f06a6621 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -40,7 +40,7 @@ config DWMAC_GENERIC config DWMAC_IPQ806X tristate "QCA IPQ806x DWMAC support" default ARCH_QCOM - depends on OF + depends on OF && (ARCH_QCOM || COMPILE_TEST) select MFD_SYSCON help Support for QCA IPQ806X DWMAC Ethernet. @@ -53,7 +53,7 @@ config DWMAC_IPQ806X config DWMAC_LPC18XX tristate "NXP LPC18xx/43xx DWMAC support" default ARCH_LPC18XX - depends on OF + depends on OF && (ARCH_LPC18XX || COMPILE_TEST) select MFD_SYSCON ---help--- Support for NXP LPC18xx/43xx DWMAC Ethernet. @@ -61,7 +61,7 @@ config DWMAC_LPC18XX config DWMAC_MESON tristate "Amlogic Meson dwmac support" default ARCH_MESON - depends on OF + depends on OF && (ARCH_MESON || COMPILE_TEST) help Support for Ethernet controller on Amlogic Meson SoCs. @@ -72,7 +72,7 @@ config DWMAC_MESON config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" default ARCH_ROCKCHIP - depends on OF + depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST) select MFD_SYSCON help Support for Ethernet controller on Rockchip RK3288 SoC. @@ -83,7 +83,7 @@ config DWMAC_ROCKCHIP config DWMAC_SOCFPGA tristate "SOCFPGA dwmac support" default ARCH_SOCFPGA - depends on OF + depends on OF && (ARCH_SOCFPGA || COMPILE_TEST) select MFD_SYSCON help Support for ethernet controller on Altera SOCFPGA @@ -95,7 +95,7 @@ config DWMAC_SOCFPGA config DWMAC_STI tristate "STi GMAC support" default ARCH_STI - depends on OF + depends on OF && (ARCH_STI || COMPILE_TEST) select MFD_SYSCON ---help--- Support for ethernet controller on STi SOCs. @@ -107,7 +107,7 @@ config DWMAC_STI config DWMAC_SUNXI tristate "Allwinner GMAC support" default ARCH_SUNXI - depends on OF + depends on OF && (ARCH_SUNXI || COMPILE_TEST) ---help--- Support for Allwinner A20/A31 GMAC ethernet controllers. diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 0fb362d5a..44b630cd1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -11,11 +11,12 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o -obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o +obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o stmmac-platform-objs:= stmmac_platform.o +dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o stmmac-pci-objs:= stmmac_pci.o diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c new file mode 100644 index 000000000..2920e2ee3 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -0,0 +1,274 @@ +/* Copyright Altera Corporation (C) 2016. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Tien Hock Loh <thloh@altera.com> + */ + +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_net.h> +#include <linux/phy.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/stmmac.h> + +#include "stmmac.h" +#include "stmmac_platform.h" +#include "altr_tse_pcs.h" + +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII BIT(1) +#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII BIT(2) +#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2 +#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK GENMASK(1, 0) + +#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) +#define TSE_PCS_CONTROL_REG 0x00 +#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) +#define TSE_PCS_IF_MODE_REG 0x28 +#define TSE_PCS_LINK_TIMER_0_REG 0x24 +#define TSE_PCS_LINK_TIMER_1_REG 0x26 +#define TSE_PCS_SIZE 0x40 +#define TSE_PCS_STATUS_AN_COMPLETED_MASK BIT(5) +#define TSE_PCS_STATUS_LINK_MASK 0x0004 +#define TSE_PCS_STATUS_REG 0x02 +#define TSE_PCS_SGMII_SPEED_1000 BIT(3) +#define TSE_PCS_SGMII_SPEED_100 BIT(2) +#define TSE_PCS_SGMII_SPEED_10 0x0 +#define TSE_PCS_SW_RST_MASK 0x8000 +#define TSE_PCS_PARTNER_ABILITY_REG 0x0A +#define TSE_PCS_PARTNER_DUPLEX_FULL 0x1000 +#define TSE_PCS_PARTNER_DUPLEX_HALF 0x0000 +#define TSE_PCS_PARTNER_DUPLEX_MASK 0x1000 +#define TSE_PCS_PARTNER_SPEED_MASK GENMASK(11, 10) +#define TSE_PCS_PARTNER_SPEED_1000 BIT(11) +#define TSE_PCS_PARTNER_SPEED_100 BIT(10) +#define TSE_PCS_PARTNER_SPEED_10 0x0000 +#define TSE_PCS_PARTNER_SPEED_1000 BIT(11) +#define TSE_PCS_PARTNER_SPEED_100 BIT(10) +#define TSE_PCS_PARTNER_SPEED_10 0x0000 +#define TSE_PCS_SGMII_SPEED_MASK GENMASK(3, 2) +#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 +#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 +#define TSE_PCS_SW_RESET_TIMEOUT 100 +#define TSE_PCS_USE_SGMII_AN_MASK BIT(2) +#define TSE_PCS_USE_SGMII_ENA BIT(1) + +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_DISABLE 0x0001 +#define SGMII_ADAPTER_ENABLE 0x0000 + +#define AUTONEGO_LINK_TIMER 20 + +static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs) +{ + int counter = 0; + u16 val; + + val = readw(base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_SW_RST_MASK; + writew(val, base + TSE_PCS_CONTROL_REG); + + while (counter < TSE_PCS_SW_RESET_TIMEOUT) { + val = readw(base + TSE_PCS_CONTROL_REG); + val &= TSE_PCS_SW_RST_MASK; + if (val == 0) + break; + counter++; + udelay(1); + } + if (counter >= TSE_PCS_SW_RESET_TIMEOUT) { + dev_err(pcs->dev, "PCS could not get out of sw reset\n"); + return -ETIMEDOUT; + } + + return 0; +} + +int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs) +{ + int ret = 0; + + writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); + + writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); + writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); + + ret = tse_pcs_reset(base, pcs); + if (ret == 0) + writew(SGMII_ADAPTER_ENABLE, + pcs->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + return ret; +} + +static void pcs_link_timer_callback(unsigned long data) +{ + u16 val = 0; + struct tse_pcs *pcs = (struct tse_pcs *)data; + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + + val = readw(tse_pcs_base + TSE_PCS_STATUS_REG); + val &= TSE_PCS_STATUS_LINK_MASK; + + if (val != 0) { + dev_dbg(pcs->dev, "Adapter: Link is established\n"); + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + } else { + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} + +static void auto_nego_timer_callback(unsigned long data) +{ + u16 val = 0; + u16 speed = 0; + u16 duplex = 0; + struct tse_pcs *pcs = (struct tse_pcs *)data; + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + + val = readw(tse_pcs_base + TSE_PCS_STATUS_REG); + val &= TSE_PCS_STATUS_AN_COMPLETED_MASK; + + if (val != 0) { + dev_dbg(pcs->dev, "Adapter: Auto Negotiation is completed\n"); + val = readw(tse_pcs_base + TSE_PCS_PARTNER_ABILITY_REG); + speed = val & TSE_PCS_PARTNER_SPEED_MASK; + duplex = val & TSE_PCS_PARTNER_DUPLEX_MASK; + + if (speed == TSE_PCS_PARTNER_SPEED_10 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 10/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_100 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 100/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_1000 && + duplex == TSE_PCS_PARTNER_DUPLEX_FULL) + dev_dbg(pcs->dev, + "Adapter: Link Partner is Up - 1000/Full\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_10 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_100 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else if (speed == TSE_PCS_PARTNER_SPEED_1000 && + duplex == TSE_PCS_PARTNER_DUPLEX_HALF) + dev_err(pcs->dev, + "Adapter does not support Half Duplex\n"); + else + dev_err(pcs->dev, + "Adapter: Invalid Partner Speed and Duplex\n"); + + if (duplex == TSE_PCS_PARTNER_DUPLEX_FULL && + (speed == TSE_PCS_PARTNER_SPEED_10 || + speed == TSE_PCS_PARTNER_SPEED_100 || + speed == TSE_PCS_PARTNER_SPEED_1000)) + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + } else { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_RESTART_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + tse_pcs_reset(tse_pcs_base, pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} + +static void aneg_link_timer_callback(unsigned long data) +{ + struct tse_pcs *pcs = (struct tse_pcs *)data; + + if (pcs->autoneg == AUTONEG_ENABLE) + auto_nego_timer_callback(data); + else if (pcs->autoneg == AUTONEG_DISABLE) + pcs_link_timer_callback(data); +} + +void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed) +{ + void __iomem *tse_pcs_base = pcs->tse_pcs_base; + void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; + u32 val; + + writew(SGMII_ADAPTER_ENABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + pcs->autoneg = phy_dev->autoneg; + + if (phy_dev->autoneg == AUTONEG_ENABLE) { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_AN_EN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val |= TSE_PCS_USE_SGMII_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val |= TSE_PCS_CONTROL_RESTART_AN_MASK; + + tse_pcs_reset(tse_pcs_base, pcs); + + setup_timer(&pcs->aneg_link_timer, + aneg_link_timer_callback, (unsigned long)pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } else if (phy_dev->autoneg == AUTONEG_DISABLE) { + val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG); + val &= ~TSE_PCS_CONTROL_AN_EN_MASK; + writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val &= ~TSE_PCS_USE_SGMII_AN_MASK; + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG); + val &= ~TSE_PCS_SGMII_SPEED_MASK; + + switch (speed) { + case 1000: + val |= TSE_PCS_SGMII_SPEED_1000; + break; + case 100: + val |= TSE_PCS_SGMII_SPEED_100; + break; + case 10: + val |= TSE_PCS_SGMII_SPEED_10; + break; + default: + return; + } + writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG); + + tse_pcs_reset(tse_pcs_base, pcs); + + setup_timer(&pcs->aneg_link_timer, + aneg_link_timer_callback, (unsigned long)pcs); + mod_timer(&pcs->aneg_link_timer, jiffies + + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); + } +} diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h new file mode 100644 index 000000000..2f5882450 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h @@ -0,0 +1,36 @@ +/* Copyright Altera Corporation (C) 2016. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Tien Hock Loh <thloh@altera.com> + */ + +#ifndef __TSE_PCS_H__ +#define __TSE_PCS_H__ + +#include <linux/phy.h> +#include <linux/timer.h> + +struct tse_pcs { + struct device *dev; + void __iomem *tse_pcs_base; + void __iomem *sgmii_adapter_base; + struct timer_list aneg_link_timer; + int autoneg; +}; + +int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs); +void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, + unsigned int speed); + +#endif /* __TSE_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index fc60368df..2533b91f1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -232,6 +232,11 @@ struct stmmac_extra_stats { #define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */ #define DEFAULT_DMA_PBL 8 +/* PCS status and mask defines */ +#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */ +#define PCS_LINK_IRQ BIT(1) /* PCS Link */ +#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */ + /* Max/Min RI Watchdog Timer count value */ #define MAX_DMA_RIWT 0xff #define MIN_DMA_RIWT 0x20 @@ -272,9 +277,6 @@ enum dma_irq_status { #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) -#define CORE_PCS_ANE_COMPLETE (1 << 5) -#define CORE_PCS_LINK_STATUS (1 << 6) -#define CORE_RGMII_IRQ (1 << 7) #define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ @@ -469,9 +471,12 @@ struct stmmac_ops { void (*reset_eee_mode)(struct mac_device_info *hw); void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw); void (*set_eee_pls)(struct mac_device_info *hw, int link); - void (*ctrl_ane)(struct mac_device_info *hw, bool restart); - void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv); void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x); + /* PCS calls */ + void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback); + void (*pcs_rane)(void __iomem *ioaddr, bool restart); + void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); }; /* PTP and HW Timer helpers */ @@ -524,6 +529,9 @@ struct mac_device_info { int unicast_filter_entries; int mcast_bits_log2; unsigned int rx_csum; + unsigned int pcs; + unsigned int pmt; + unsigned int ps; }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, @@ -546,6 +554,7 @@ void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); + extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; extern const struct stmmac_desc_ops dwmac4_desc_ops; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 0cd3ecff7..92105916e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -46,6 +46,7 @@ struct rk_priv_data { struct platform_device *pdev; int phy_iface; struct regulator *regulator; + bool suspended; const struct rk_gmac_ops *ops; bool clk_enabled; @@ -72,6 +73,122 @@ struct rk_priv_data { #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16)) #define GRF_CLR_BIT(nr) (BIT(nr+16)) +#define RK3228_GRF_MAC_CON0 0x0900 +#define RK3228_GRF_MAC_CON1 0x0904 + +/* RK3228_GRF_MAC_CON0 */ +#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) +#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) + +/* RK3228_GRF_MAC_CON1 */ +#define RK3228_GMAC_PHY_INTF_SEL_RGMII \ + (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6)) +#define RK3228_GMAC_PHY_INTF_SEL_RMII \ + (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6)) +#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3) +#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RK3228_GMAC_SPEED_100M GRF_BIT(2) +#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) +#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9)) +#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9)) +#define RK3228_GMAC_RMII_MODE GRF_BIT(10) +#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10) +#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0) +#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0) +#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) +#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1) + +static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, + int tx_delay, int rx_delay) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RGMII | + RK3228_GMAC_RMII_MODE_CLR | + RK3228_GMAC_RXCLK_DLY_ENABLE | + RK3228_GMAC_TXCLK_DLY_ENABLE); + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0, + RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) | + RK3228_GMAC_CLK_TX_DL_CFG(tx_delay)); +} + +static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_PHY_INTF_SEL_RMII | + RK3228_GMAC_RMII_MODE); + + /* set MAC to RMII mode */ + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11)); +} + +static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_2_5M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_25M); + else if (speed == 1000) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_CLK_125M); + else + dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); +} + +static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "Missing rockchip,grf property\n"); + return; + } + + if (speed == 10) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_RMII_CLK_2_5M | + RK3228_GMAC_SPEED_10M); + else if (speed == 100) + regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, + RK3228_GMAC_RMII_CLK_25M | + RK3228_GMAC_SPEED_100M); + else + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); +} + +static const struct rk_gmac_ops rk3228_ops = { + .set_to_rgmii = rk3228_set_to_rgmii, + .set_to_rmii = rk3228_set_to_rmii, + .set_rgmii_speed = rk3228_set_rgmii_speed, + .set_rmii_speed = rk3228_set_rmii_speed, +}; + #define RK3288_GRF_SOC_CON1 0x0248 #define RK3288_GRF_SOC_CON3 0x0250 @@ -529,9 +646,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, return bsp_priv; } -static int rk_gmac_init(struct platform_device *pdev, void *priv) +static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) { - struct rk_priv_data *bsp_priv = priv; int ret; ret = phy_power_on(bsp_priv, true); @@ -545,14 +661,50 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv) return 0; } -static void rk_gmac_exit(struct platform_device *pdev, void *priv) +static void rk_gmac_powerdown(struct rk_priv_data *gmac) { - struct rk_priv_data *gmac = priv; - phy_power_on(gmac, false); gmac_clk_enable(gmac, false); } +static int rk_gmac_init(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + return rk_gmac_powerup(bsp_priv); +} + +static void rk_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + rk_gmac_powerdown(bsp_priv); +} + +static void rk_gmac_suspend(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + /* Keep the PHY up if we use Wake-on-Lan. */ + if (device_may_wakeup(&pdev->dev)) + return; + + rk_gmac_powerdown(bsp_priv); + bsp_priv->suspended = true; +} + +static void rk_gmac_resume(struct platform_device *pdev, void *priv) +{ + struct rk_priv_data *bsp_priv = priv; + + /* The PHY was up for Wake-on-Lan. */ + if (!bsp_priv->suspended) + return; + + rk_gmac_powerup(bsp_priv); + bsp_priv->suspended = false; +} + static void rk_fix_speed(void *priv, unsigned int speed) { struct rk_priv_data *bsp_priv = priv; @@ -591,6 +743,8 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->init = rk_gmac_init; plat_dat->exit = rk_gmac_exit; plat_dat->fix_mac_speed = rk_fix_speed; + plat_dat->suspend = rk_gmac_suspend; + plat_dat->resume = rk_gmac_resume; plat_dat->bsp_priv = rk_gmac_setup(pdev, data); if (IS_ERR(plat_dat->bsp_priv)) @@ -604,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev) } static const struct of_device_id rk_gmac_dwmac_match[] = { + { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f13499fa1..bec6963ac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -27,6 +27,11 @@ #include "stmmac.h" #include "stmmac_platform.h" +#include "altr_tse_pcs.h" + +#define SGMII_ADAPTER_CTRL_REG 0x00 +#define SGMII_ADAPTER_DISABLE 0x0001 + #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2 @@ -52,35 +57,46 @@ struct socfpga_dwmac { struct reset_control *stmmac_rst; void __iomem *splitter_base; bool f2h_ptp_ref_clk; + struct tse_pcs pcs; }; static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed) { struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv; void __iomem *splitter_base = dwmac->splitter_base; + void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base; + void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base; + struct device *dev = dwmac->dev; + struct net_device *ndev = dev_get_drvdata(dev); + struct phy_device *phy_dev = ndev->phydev; u32 val; - if (!splitter_base) - return; - - val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); - val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; - - switch (speed) { - case 1000: - val |= EMAC_SPLITTER_CTRL_SPEED_1000; - break; - case 100: - val |= EMAC_SPLITTER_CTRL_SPEED_100; - break; - case 10: - val |= EMAC_SPLITTER_CTRL_SPEED_10; - break; - default: - return; + if ((tse_pcs_base) && (sgmii_adapter_base)) + writew(SGMII_ADAPTER_DISABLE, + sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG); + + if (splitter_base) { + val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG); + val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK; + + switch (speed) { + case 1000: + val |= EMAC_SPLITTER_CTRL_SPEED_1000; + break; + case 100: + val |= EMAC_SPLITTER_CTRL_SPEED_100; + break; + case 10: + val |= EMAC_SPLITTER_CTRL_SPEED_10; + break; + default: + return; + } + writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); } - writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG); + if (tse_pcs_base && sgmii_adapter_base) + tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed); } static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev) @@ -88,9 +104,12 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * struct device_node *np = dev->of_node; struct regmap *sys_mgr_base_addr; u32 reg_offset, reg_shift; - int ret; - struct device_node *np_splitter; + int ret, index; + struct device_node *np_splitter = NULL; + struct device_node *np_sgmii_adapter = NULL; struct resource res_splitter; + struct resource res_tse_pcs; + struct resource res_sgmii_adapter; dwmac->interface = of_get_phy_mode(np); @@ -116,7 +135,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0); if (np_splitter) { - if (of_address_to_resource(np_splitter, 0, &res_splitter)) { + ret = of_address_to_resource(np_splitter, 0, &res_splitter); + of_node_put(np_splitter); + if (ret) { dev_info(dev, "Missing emac splitter address\n"); return -EINVAL; } @@ -128,12 +149,86 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * } } + np_sgmii_adapter = of_parse_phandle(np, + "altr,gmii-to-sgmii-converter", 0); + if (np_sgmii_adapter) { + index = of_property_match_string(np_sgmii_adapter, "reg-names", + "hps_emac_interface_splitter_avalon_slave"); + + if (index >= 0) { + if (of_address_to_resource(np_sgmii_adapter, index, + &res_splitter)) { + dev_err(dev, + "%s: ERROR: missing emac splitter address\n", + __func__); + ret = -EINVAL; + goto err_node_put; + } + + dwmac->splitter_base = + devm_ioremap_resource(dev, &res_splitter); + + if (IS_ERR(dwmac->splitter_base)) { + ret = PTR_ERR(dwmac->splitter_base); + goto err_node_put; + } + } + + index = of_property_match_string(np_sgmii_adapter, "reg-names", + "gmii_to_sgmii_adapter_avalon_slave"); + + if (index >= 0) { + if (of_address_to_resource(np_sgmii_adapter, index, + &res_sgmii_adapter)) { + dev_err(dev, + "%s: ERROR: failed mapping adapter\n", + __func__); + ret = -EINVAL; + goto err_node_put; + } + + dwmac->pcs.sgmii_adapter_base = + devm_ioremap_resource(dev, &res_sgmii_adapter); + + if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) { + ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base); + goto err_node_put; + } + } + + index = of_property_match_string(np_sgmii_adapter, "reg-names", + "eth_tse_control_port"); + + if (index >= 0) { + if (of_address_to_resource(np_sgmii_adapter, index, + &res_tse_pcs)) { + dev_err(dev, + "%s: ERROR: failed mapping tse control port\n", + __func__); + ret = -EINVAL; + goto err_node_put; + } + + dwmac->pcs.tse_pcs_base = + devm_ioremap_resource(dev, &res_tse_pcs); + + if (IS_ERR(dwmac->pcs.tse_pcs_base)) { + ret = PTR_ERR(dwmac->pcs.tse_pcs_base); + goto err_node_put; + } + } + } dwmac->reg_offset = reg_offset; dwmac->reg_shift = reg_shift; dwmac->sys_mgr_base_addr = sys_mgr_base_addr; dwmac->dev = dev; + of_node_put(np_sgmii_adapter); return 0; + +err_node_put: + of_node_put(np_sgmii_adapter); + return ret; } static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) @@ -151,6 +246,7 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) break; case PHY_INTERFACE_MODE_MII: case PHY_INTERFACE_MODE_GMII: + case PHY_INTERFACE_MODE_SGMII: val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; break; default: @@ -191,6 +287,12 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) */ if (dwmac->stmmac_rst) reset_control_deassert(dwmac->stmmac_rst); + if (phymode == PHY_INTERFACE_MODE_SGMII) { + if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) { + dev_err(dwmac->dev, "Unable to initialize TSE PCS"); + return -EINVAL; + } + } return 0; } @@ -225,6 +327,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (!ret) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *stpriv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index b0593a426..ff3e5ab39 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -38,19 +38,26 @@ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ #define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ -enum dwmac1000_irq_status { - lpiis_irq = 0x400, - time_stamp_irq = 0x0200, - mmc_rx_csum_offload_irq = 0x0080, - mmc_tx_irq = 0x0040, - mmc_rx_irq = 0x0020, - mmc_irq = 0x0010, - pmt_irq = 0x0008, - pcs_ane_irq = 0x0004, - pcs_link_irq = 0x0002, - rgmii_irq = 0x0001, -}; -#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ +#define GMAC_INT_STATUS_PMT BIT(3) +#define GMAC_INT_STATUS_MMCIS BIT(4) +#define GMAC_INT_STATUS_MMCRIS BIT(5) +#define GMAC_INT_STATUS_MMCTIS BIT(6) +#define GMAC_INT_STATUS_MMCCSUM BIT(7) +#define GMAC_INT_STATUS_TSTAMP BIT(9) +#define GMAC_INT_STATUS_LPIIS BIT(10) + +/* interrupt mask register */ +#define GMAC_INT_MASK 0x0000003c +#define GMAC_INT_DISABLE_RGMII BIT(0) +#define GMAC_INT_DISABLE_PCSLINK BIT(1) +#define GMAC_INT_DISABLE_PCSAN BIT(2) +#define GMAC_INT_DISABLE_PMT BIT(3) +#define GMAC_INT_DISABLE_TIMESTAMP BIT(9) +#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ + GMAC_INT_DISABLE_PCSLINK | \ + GMAC_INT_DISABLE_PCSAN) +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ + GMAC_INT_DISABLE_PCS) /* PMT Control and Status */ #define GMAC_PMT 0x0000002c @@ -90,42 +97,23 @@ enum power_event { (reg * 8)) #define GMAC_MAX_PERFECT_ADDRESSES 1 -/* PCS registers (AN/TBI/SGMII/RGMII) offset */ -#define GMAC_AN_CTRL 0x000000c0 /* AN control */ -#define GMAC_AN_STATUS 0x000000c4 /* AN status */ -#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ -#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */ -#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ -#define GMAC_TBI 0x000000d4 /* TBI extend status */ -#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */ - -/* AN Configuration defines */ -#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */ -#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */ -#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */ -#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */ -#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */ -#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */ - -/* AN Status defines */ -#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */ -#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */ -#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */ -#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */ - -/* Register 54 (SGMII/RGMII status register) */ -#define GMAC_S_R_GMII_LINK 0x8 -#define GMAC_S_R_GMII_SPEED 0x5 -#define GMAC_S_R_GMII_SPEED_SHIFT 0x1 -#define GMAC_S_R_GMII_MODE 0x1 -#define GMAC_S_R_GMII_SPEED_125 2 -#define GMAC_S_R_GMII_SPEED_25 1 - -/* Common ADV and LPA defines */ -#define GMAC_ANE_FD (1 << 5) -#define GMAC_ANE_HD (1 << 6) -#define GMAC_ANE_PSE (3 << 7) -#define GMAC_ANE_PSE_SHIFT 7 +#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */ +#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */ + +/* SGMII/RGMII status register */ +#define GMAC_RGSMIIIS_LNKMODE BIT(0) +#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1) +#define GMAC_RGSMIIIS_SPEED_SHIFT 1 +#define GMAC_RGSMIIIS_LNKSTS BIT(3) +#define GMAC_RGSMIIIS_JABTO BIT(4) +#define GMAC_RGSMIIIS_FALSECARDET BIT(5) +#define GMAC_RGSMIIIS_SMIDRXS BIT(16) +/* LNKMOD */ +#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_RGSMIIIS_SPEED_125 0x2 +#define GMAC_RGSMIIIS_SPEED_25 0x1 +#define GMAC_RGSMIIIS_SPEED_2_5 0x0 /* GMAC Configuration defines */ #define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index fb1eb578e..885a5e645 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -30,22 +30,48 @@ #include <linux/slab.h> #include <linux/ethtool.h> #include <asm/io.h> +#include "stmmac_pcs.h" #include "dwmac1000.h" static void dwmac1000_core_init(struct mac_device_info *hw, int mtu) { void __iomem *ioaddr = hw->pcsr; u32 value = readl(ioaddr + GMAC_CONTROL); + + /* Configure GMAC core */ value |= GMAC_CORE_INIT; + if (mtu > 1500) value |= GMAC_CONTROL_2K; if (mtu > 2000) value |= GMAC_CONTROL_JE; + if (hw->ps) { + value |= GMAC_CONTROL_TE; + + if (hw->ps == SPEED_1000) { + value &= ~GMAC_CONTROL_PS; + } else { + value |= GMAC_CONTROL_PS; + + if (hw->ps == SPEED_10) + value &= ~GMAC_CONTROL_FES; + else + value |= GMAC_CONTROL_FES; + } + } + writel(value, ioaddr + GMAC_CONTROL); /* Mask GMAC interrupts */ - writel(0x207, ioaddr + GMAC_INT_MASK); + value = GMAC_INT_DEFAULT_MASK; + + if (hw->pmt) + value &= ~GMAC_INT_DISABLE_PMT; + if (hw->pcs) + value &= ~GMAC_INT_DISABLE_PCS; + + writel(value, ioaddr + GMAC_INT_MASK); #ifdef STMMAC_VLAN_TAG_USED /* Tag detection without filtering */ @@ -235,12 +261,45 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode) } if (mode & WAKE_UCAST) { pr_debug("GMAC: WOL on global unicast\n"); - pmt |= global_unicast; + pmt |= power_down | global_unicast | wake_up_frame_en; } writel(pmt, ioaddr + GMAC_PMT); } +/* RGMII or SMII interface */ +static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 status; + + status = readl(ioaddr + GMAC_RGSMIIIS); + x->irq_rgmii_n++; + + /* Check the link status */ + if (status & GMAC_RGSMIIIS_LNKSTS) { + int speed_value; + + x->pcs_link = 1; + + speed_value = ((status & GMAC_RGSMIIIS_SPEED) >> + GMAC_RGSMIIIS_SPEED_SHIFT); + if (speed_value == GMAC_RGSMIIIS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_RGSMIIIS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; + + x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK); + + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } +} + static int dwmac1000_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, int ret = 0; /* Not used events (e.g. MMC interrupts) are not handled. */ - if ((intr_status & mmc_tx_irq)) + if ((intr_status & GMAC_INT_STATUS_MMCTIS)) x->mmc_tx_irq_n++; - if (unlikely(intr_status & mmc_rx_irq)) + if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS)) x->mmc_rx_irq_n++; - if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM)) x->mmc_rx_csum_offload_irq_n++; - if (unlikely(intr_status & pmt_irq)) { + if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) { /* clear the PMT bits 5 and 6 by reading the PMT status reg */ readl(ioaddr + GMAC_PMT); x->irq_receive_pmt_irq_n++; } - /* MAC trx/rx EEE LPI entry/exit interrupts */ - if (intr_status & lpiis_irq) { + + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & GMAC_INT_STATUS_LPIIS) { /* Clean LPI interrupt by reading the Reg 12 */ ret = readl(ioaddr + LPI_CTRL_STATUS); @@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, x->irq_rx_path_exit_lpi_mode_n++; } - if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { - readl(ioaddr + GMAC_AN_STATUS); - x->irq_pcs_ane_n++; - } - if (intr_status & rgmii_irq) { - u32 status = readl(ioaddr + GMAC_S_R_GMII); - x->irq_rgmii_n++; - - /* Save and dump the link status. */ - if (status & GMAC_S_R_GMII_LINK) { - int speed_value = (status & GMAC_S_R_GMII_SPEED) >> - GMAC_S_R_GMII_SPEED_SHIFT; - x->pcs_duplex = (status & GMAC_S_R_GMII_MODE); - - if (speed_value == GMAC_S_R_GMII_SPEED_125) - x->pcs_speed = SPEED_1000; - else if (speed_value == GMAC_S_R_GMII_SPEED_25) - x->pcs_speed = SPEED_100; - else - x->pcs_speed = SPEED_10; + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); - x->pcs_link = 1; - pr_debug("%s: Link is Up - %d/%s\n", __func__, - (int)x->pcs_speed, - x->pcs_duplex ? "Full" : "Half"); - } else { - x->pcs_link = 0; - pr_debug("%s: Link is Down\n", __func__); - } - } + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac1000_rgsmii(ioaddr, x); return ret; } @@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw) writel(value, ioaddr + LPI_TIMER_CTRL); } -static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart) +static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) { - void __iomem *ioaddr = hw->pcsr; - /* auto negotiation enable and External Loopback enable */ - u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; - - if (restart) - value |= GMAC_AN_CTRL_RAN; - - writel(value, ioaddr + GMAC_AN_CTRL); + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } -static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +static void dwmac1000_rane(void __iomem *ioaddr, bool restart) { - void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_ANE_ADV); - - if (value & GMAC_ANE_FD) - adv->duplex = DUPLEX_FULL; - if (value & GMAC_ANE_HD) - adv->duplex |= DUPLEX_HALF; - - adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; - - value = readl(ioaddr + GMAC_ANE_LPA); - - if (value & GMAC_ANE_FD) - adv->lp_duplex = DUPLEX_FULL; - if (value & GMAC_ANE_HD) - adv->lp_duplex = DUPLEX_HALF; + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} - adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); } static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) @@ -485,9 +501,10 @@ static const struct stmmac_ops dwmac1000_ops = { .reset_eee_mode = dwmac1000_reset_eee_mode, .set_eee_timer = dwmac1000_set_eee_timer, .set_eee_pls = dwmac1000_set_eee_pls, - .ctrl_ane = dwmac1000_ctrl_ane, - .get_adv = dwmac1000_get_adv, .debug = dwmac1000_debug, + .pcs_ctrl_ane = dwmac1000_ctrl_ane, + .pcs_rane = dwmac1000_rane, + .pcs_get_adv_lp = dwmac1000_get_adv_lp, }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index bc50952a1..6f4f5ce25 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -24,10 +24,8 @@ #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) #define GMAC_INT_STATUS 0x000000b0 #define GMAC_INT_EN 0x000000b4 -#define GMAC_AN_CTRL 0x000000e0 -#define GMAC_AN_STATUS 0x000000e4 -#define GMAC_AN_ADV 0x000000e8 -#define GMAC_AN_LPA 0x000000ec +#define GMAC_PCS_BASE 0x000000e0 +#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8 #define GMAC_PMT 0x000000c0 #define GMAC_VERSION 0x00000110 #define GMAC_DEBUG 0x00000114 @@ -54,9 +52,18 @@ #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 /* MAC Interrupt bitmap*/ +#define GMAC_INT_RGSMIIS BIT(0) +#define GMAC_INT_PCS_LINK BIT(1) +#define GMAC_INT_PCS_ANE BIT(2) +#define GMAC_INT_PCS_PHYIS BIT(3) #define GMAC_INT_PMT_EN BIT(4) #define GMAC_INT_LPI_EN BIT(5) +#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ + GMAC_INT_PCS_ANE) + +#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN + enum dwmac4_irq_status { time_stamp_irq = 0x00001000, mmc_rx_csum_offload_irq = 0x00000800, @@ -64,19 +71,8 @@ enum dwmac4_irq_status { mmc_rx_irq = 0x00000200, mmc_irq = 0x00000100, pmt_irq = 0x00000010, - pcs_ane_irq = 0x00000004, - pcs_link_irq = 0x00000002, }; -/* MAC Auto-Neg bitmap*/ -#define GMAC_AN_CTRL_RAN BIT(9) -#define GMAC_AN_CTRL_ANE BIT(12) -#define GMAC_AN_CTRL_ELE BIT(14) -#define GMAC_AN_FD BIT(5) -#define GMAC_AN_HD BIT(6) -#define GMAC_AN_PSE_MASK GENMASK(8, 7) -#define GMAC_AN_PSE_SHIFT 7 - /* MAC PMT bitmap */ enum power_event { pointer_reset = 0x80000000, @@ -250,6 +246,23 @@ enum power_event { #define MTL_DEBUG_RRCSTS_FLUSH 3 #define MTL_DEBUG_RWCSTS BIT(0) +/* SGMII/RGMII status register */ +#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0) +#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1) +#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4) +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16) +#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17) +#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17 +#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19) +#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20) +#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21) +/* LNKMOD */ +#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1 +/* LNKSPEED */ +#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1 +#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0 + extern const struct stmmac_dma_ops dwmac4_dma_ops; extern const struct stmmac_dma_ops dwmac410_dma_ops; #endif /* __DWMAC4_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 44da877d2..51019b794 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -17,6 +17,7 @@ #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/io.h> +#include "stmmac_pcs.h" #include "dwmac4.h" static void dwmac4_core_init(struct mac_device_info *hw, int mtu) @@ -31,10 +32,31 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu) if (mtu > 2000) value |= GMAC_CONFIG_JE; + if (hw->ps) { + value |= GMAC_CONFIG_TE; + + if (hw->ps == SPEED_1000) { + value &= ~GMAC_CONFIG_PS; + } else { + value |= GMAC_CONFIG_PS; + + if (hw->ps == SPEED_10) + value &= ~GMAC_CONFIG_FES; + else + value |= GMAC_CONFIG_FES; + } + } + writel(value, ioaddr + GMAC_CONFIG); /* Mask GMAC interrupts */ - writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN); + value = GMAC_INT_DEFAULT_MASK; + if (hw->pmt) + value |= GMAC_INT_PMT_EN; + if (hw->pcs) + value |= GMAC_PCS_IRQ_DEFAULT; + + writel(value, ioaddr + GMAC_INT_EN); } static void dwmac4_dump_regs(struct mac_device_info *hw) @@ -80,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) } if (mode & WAKE_UCAST) { pr_debug("GMAC: WOL on global unicast\n"); - pmt |= global_unicast; + pmt |= power_down | global_unicast | wake_up_frame_en; } writel(pmt, ioaddr + GMAC_PMT); @@ -190,39 +212,53 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, } } -static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart) +static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, + bool loopback) { - void __iomem *ioaddr = hw->pcsr; - - /* auto negotiation enable and External Loopback enable */ - u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); +} - if (restart) - value |= GMAC_AN_CTRL_RAN; +static void dwmac4_rane(void __iomem *ioaddr, bool restart) +{ + dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); +} - writel(value, ioaddr + GMAC_AN_CTRL); +static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) +{ + dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); } -static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +/* RGMII or SMII interface */ +static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x) { - void __iomem *ioaddr = hw->pcsr; - u32 value = readl(ioaddr + GMAC_AN_ADV); + u32 status; - if (value & GMAC_AN_FD) - adv->duplex = DUPLEX_FULL; - if (value & GMAC_AN_HD) - adv->duplex |= DUPLEX_HALF; + status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS); + x->irq_rgmii_n++; - adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; + /* Check the link status */ + if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) { + int speed_value; - value = readl(ioaddr + GMAC_AN_LPA); + x->pcs_link = 1; + + speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >> + GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT); + if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125) + x->pcs_speed = SPEED_1000; + else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25) + x->pcs_speed = SPEED_100; + else + x->pcs_speed = SPEED_10; - if (value & GMAC_AN_FD) - adv->lp_duplex = DUPLEX_FULL; - if (value & GMAC_AN_HD) - adv->lp_duplex = DUPLEX_HALF; + x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK); - adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; + pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed, + x->pcs_duplex ? "Full" : "Half"); + } else { + x->pcs_link = 0; + pr_info("Link is Down\n"); + } } static int dwmac4_irq_status(struct mac_device_info *hw, @@ -248,11 +284,6 @@ static int dwmac4_irq_status(struct mac_device_info *hw, x->irq_receive_pmt_irq_n++; } - if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { - readl(ioaddr + GMAC_AN_STATUS); - x->irq_pcs_ane_n++; - } - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); /* Check MTL Interrupt: Currently only one queue is used: Q0. */ if (mtl_int_qx_status & MTL_INT_Q0) { @@ -267,6 +298,10 @@ static int dwmac4_irq_status(struct mac_device_info *hw, } } + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); + if (intr_status & PCS_RGSMIIIS_IRQ) + dwmac4_phystatus(ioaddr, x); + return ret; } @@ -363,8 +398,9 @@ static const struct stmmac_ops dwmac4_ops = { .pmt = dwmac4_pmt, .set_umac_addr = dwmac4_set_umac_addr, .get_umac_addr = dwmac4_get_umac_addr, - .ctrl_ane = dwmac4_ctrl_ane, - .get_adv = dwmac4_get_adv, + .pcs_ctrl_ane = dwmac4_ctrl_ane, + .pcs_rane = dwmac4_rane, + .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 59ae6088c..8dc9056c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -117,7 +117,6 @@ struct stmmac_priv { int eee_enabled; int eee_active; int tx_lpi_timer; - int pcs; unsigned int mode; int extend_desc; struct ptp_clock *ptp_clock; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index e2b98b016..1e06173fc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -276,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, struct phy_device *phy = priv->phydev; int rc; - if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { struct rgmii_adv adv; if (!priv->xstats.pcs_link) { @@ -289,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev, ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed); /* Get and convert ADV/LP_ADV from the HW AN registers */ - if (!priv->hw->mac->get_adv) + if (!priv->hw->mac->pcs_get_adv_lp) return -EOPNOTSUPP; /* should never happen indeed */ - priv->hw->mac->get_adv(priv->hw, &adv); + priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv); /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */ @@ -361,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, struct phy_device *phy = priv->phydev; int rc; - if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) { + if (priv->hw->pcs & STMMAC_PCS_RGMII || + priv->hw->pcs & STMMAC_PCS_SGMII) { u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause; /* Only support ANE */ @@ -376,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev, ADVERTISED_10baseT_Full); spin_lock(&priv->lock); - if (priv->hw->mac->ctrl_ane) - priv->hw->mac->ctrl_ane(priv->hw, 1); + + if (priv->hw->mac->pcs_ctrl_ane) + priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1, + priv->hw->ps, 0); + spin_unlock(&priv->lock); return 0; @@ -452,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev, { struct stmmac_priv *priv = netdev_priv(netdev); - if (priv->pcs) /* FIXME */ - return; - pause->rx_pause = 0; pause->tx_pause = 0; + + if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { + struct rgmii_adv adv_lp; + + pause->autoneg = 1; + priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); + if (!adv_lp.pause) + return; + } else { + if (!(priv->phydev->supported & SUPPORTED_Pause) || + !(priv->phydev->supported & SUPPORTED_Asym_Pause)) + return; + } + pause->autoneg = priv->phydev->autoneg; if (priv->flow_ctrl & FLOW_RX) @@ -473,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev, struct stmmac_priv *priv = netdev_priv(netdev); struct phy_device *phy = priv->phydev; int new_pause = FLOW_OFF; - int ret = 0; - if (priv->pcs) /* FIXME */ - return -EOPNOTSUPP; + if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) { + struct rgmii_adv adv_lp; + + pause->autoneg = 1; + priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp); + if (!adv_lp.pause) + return -EOPNOTSUPP; + } else { + if (!(phy->supported & SUPPORTED_Pause) || + !(phy->supported & SUPPORTED_Asym_Pause)) + return -EOPNOTSUPP; + } if (pause->rx_pause) new_pause |= FLOW_RX; @@ -488,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev, if (phy->autoneg) { if (netif_running(netdev)) - ret = phy_start_aneg(phy); - } else - priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, - priv->flow_ctrl, priv->pause); - return ret; + return phy_start_aneg(phy); + } + + priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl, + priv->pause); + return 0; } static void stmmac_get_ethtool_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index e4071265b..4c8c60af7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -285,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* Using PCS we cannot dial with the phy registers at this stage * so we do not support extra feature like EEE. */ - if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) || - (priv->pcs == STMMAC_PCS_RTBI)) + if ((priv->hw->pcs == STMMAC_PCS_RGMII) || + (priv->hw->pcs == STMMAC_PCS_TBI) || + (priv->hw->pcs == STMMAC_PCS_RTBI)) goto out; /* MAC core supports the EEE feature. */ @@ -799,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv) (interface == PHY_INTERFACE_MODE_RGMII_RXID) || (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { pr_debug("STMMAC: PCS RGMII support enable\n"); - priv->pcs = STMMAC_PCS_RGMII; + priv->hw->pcs = STMMAC_PCS_RGMII; } else if (interface == PHY_INTERFACE_MODE_SGMII) { pr_debug("STMMAC: PCS SGMII support enable\n"); - priv->pcs = STMMAC_PCS_SGMII; + priv->hw->pcs = STMMAC_PCS_SGMII; } } } @@ -1665,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->plat->bus_setup) priv->plat->bus_setup(priv->ioaddr); + /* PS and related bits will be programmed according to the speed */ + if (priv->hw->pcs) { + int speed = priv->plat->mac_port_sel_speed; + + if ((speed == SPEED_10) || (speed == SPEED_100) || + (speed == SPEED_1000)) { + priv->hw->ps = speed; + } else { + dev_warn(priv->device, "invalid port speed\n"); + priv->hw->ps = 0; + } + } + /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->hw, dev->mtu); @@ -1714,8 +1728,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); } - if (priv->pcs && priv->hw->mac->ctrl_ane) - priv->hw->mac->ctrl_ane(priv->hw, 0); + if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) + priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); /* set TX ring length */ if (priv->hw->dma->set_tx_ring_len) @@ -1748,8 +1762,9 @@ static int stmmac_open(struct net_device *dev) stmmac_check_ether_addr(priv); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) { + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) { ret = stmmac_init_phy(dev); if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", @@ -2809,6 +2824,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->rx_tail_addr, STMMAC_CHAN0); } + + /* PCS link status */ + if (priv->hw->pcs) { + if (priv->xstats.pcs_link) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + } } /* To handle DMA interrupts */ @@ -3130,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv) */ priv->plat->enh_desc = priv->dma_cap.enh_desc; priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; + priv->hw->pmt = priv->plat->pmt; /* TXCOE doesn't work in thresh DMA mode */ if (priv->plat->force_thresh_dma_mode) @@ -3325,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device, stmmac_check_pcs_mode(priv); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) { + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) { /* MDIO bus Registration */ ret = stmmac_mdio_register(ndev); if (ret < 0) { @@ -3372,12 +3397,14 @@ int stmmac_dvr_remove(struct device *dev) stmmac_set_mac(priv->ioaddr, false); netif_carrier_off(ndev); unregister_netdev(ndev); + of_node_put(priv->plat->phy_node); if (priv->stmmac_rst) reset_control_assert(priv->stmmac_rst); clk_disable_unprepare(priv->pclk); clk_disable_unprepare(priv->stmmac_clk); - if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI && - priv->pcs != STMMAC_PCS_RTBI) + if (priv->hw->pcs != STMMAC_PCS_RGMII && + priv->hw->pcs != STMMAC_PCS_TBI && + priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); free_netdev(ndev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h new file mode 100644 index 000000000..eba41c24b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -0,0 +1,159 @@ +/* + * stmmac_pcs.h: Physical Coding Sublayer Header File + * + * Copyright (C) 2016 STMicroelectronics (R&D) Limited + * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __STMMAC_PCS_H__ +#define __STMMAC_PCS_H__ + +#include <linux/slab.h> +#include <linux/io.h> +#include "common.h" + +/* PCS registers (AN/TBI/SGMII/RGMII) offsets */ +#define GMAC_AN_CTRL(x) (x) /* AN control */ +#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */ +#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */ +#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */ +#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */ +#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */ + +/* AN Configuration defines */ +#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */ +#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */ +#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */ +#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */ +#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */ +#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */ + +/* AN Status defines */ +#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */ +#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */ +#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */ +#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */ + +/* ADV and LPA defines */ +#define GMAC_ANE_FD BIT(5) +#define GMAC_ANE_HD BIT(6) +#define GMAC_ANE_PSE GENMASK(8, 7) +#define GMAC_ANE_PSE_SHIFT 7 +#define GMAC_ANE_RFE GENMASK(13, 12) +#define GMAC_ANE_RFE_SHIFT 12 +#define GMAC_ANE_ACK BIT(14) + +/** + * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @intr_status: GMAC core interrupt status + * @x: pointer to log these events as stats + * Description: it is the ISR for PCS events: Auto-Negotiation Completed and + * Link status. + */ +static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, + unsigned int intr_status, + struct stmmac_extra_stats *x) +{ + u32 val = readl(ioaddr + GMAC_AN_STATUS(reg)); + + if (intr_status & PCS_ANE_IRQ) { + x->irq_pcs_ane_n++; + if (val & GMAC_AN_STATUS_ANC) + pr_info("stmmac_pcs: ANE process completed\n"); + } + + if (intr_status & PCS_LINK_IRQ) { + x->irq_pcs_link_n++; + if (val & GMAC_AN_STATUS_LS) + pr_info("stmmac_pcs: Link Up\n"); + else + pr_info("stmmac_pcs: Link Down\n"); + } +} + +/** + * dwmac_rane - To restart ANE + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @restart: to restart ANE + * Description: this is to just restart the Auto-Negotiation. + */ +static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_ctrl_ane - To program the AN Control Register. + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @ane: to enable the auto-negotiation + * @srgmi_ral: to manage MAC-2-MAC SGMII connections. + * @loopback: to cause the PHY to loopback tx data into rx path. + * Description: this is the main function to configure the AN control register + * and init the ANE, select loopback (usually for debugging purpose) and + * configure SGMII RAL. + */ +static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane, + bool srgmi_ral, bool loopback) +{ + u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); + + /* Enable and restart the Auto-Negotiation */ + if (ane) + value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN; + + /* In case of MAC-2-MAC connection, block is configured to operate + * according to MAC conf register. + */ + if (srgmi_ral) + value |= GMAC_AN_CTRL_SGMRAL; + + if (loopback) + value |= GMAC_AN_CTRL_ELE; + + writel(value, ioaddr + GMAC_AN_CTRL(reg)); +} + +/** + * dwmac_get_adv_lp - Get ADV and LP cap + * @ioaddr: IO registers pointer + * @reg: Base address of the AN Control Register. + * @adv_lp: structure to store the adv,lp status + * Description: this is to expose the ANE advertisement and Link partner ability + * status to ethtool support. + */ +static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg, + struct rgmii_adv *adv_lp) +{ + u32 value = readl(ioaddr + GMAC_ANE_ADV(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->duplex |= DUPLEX_HALF; + + adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; + + value = readl(ioaddr + GMAC_ANE_LPA(reg)); + + if (value & GMAC_ANE_FD) + adv_lp->lp_duplex = DUPLEX_FULL; + if (value & GMAC_ANE_HD) + adv_lp->lp_duplex = DUPLEX_HALF; + + adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT; +} +#endif /* __STMMAC_PCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 409db913b..756bb548e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -113,8 +113,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) return NULL; axi = kzalloc(sizeof(*axi), GFP_KERNEL); - if (!axi) + if (!axi) { + of_node_put(np); return ERR_PTR(-ENOMEM); + } axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); @@ -127,6 +129,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt); of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt); of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + of_node_put(np); return axi; } @@ -302,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { - of_node_put(np); + of_node_put(plat->phy_node); return ERR_PTR(-ENOMEM); } plat->dma_cfg = dma_cfg; @@ -319,6 +322,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } + of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed); + plat->axi = stmmac_axi_setup(pdev); return plat; @@ -411,7 +416,9 @@ static int stmmac_pltfr_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); ret = stmmac_suspend(dev); - if (priv->plat->exit) + if (priv->plat->suspend) + priv->plat->suspend(pdev, priv->plat->bsp_priv); + else if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); return ret; @@ -430,7 +437,9 @@ static int stmmac_pltfr_resume(struct device *dev) struct stmmac_priv *priv = netdev_priv(ndev); struct platform_device *pdev = to_platform_device(dev); - if (priv->plat->init) + if (priv->plat->resume) + priv->plat->resume(pdev, priv->plat->bsp_priv); + else if (priv->plat->init) priv->plat->init(pdev, priv->plat->bsp_priv); return stmmac_resume(dev); diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 158213cd6..4490ebaed 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -46,7 +46,6 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> -#include <linux/version.h> #include <linux/device.h> #include <linux/bitrev.h> @@ -598,7 +597,6 @@ struct net_local { struct work_struct txtimeout_reinit; phy_interface_t phy_interface; - struct phy_device *phy_dev; struct mii_bus *mii_bus; unsigned int link; @@ -816,7 +814,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -850,6 +848,7 @@ static void dwceqos_link_down(struct net_local *lp) static void dwceqos_link_up(struct net_local *lp) { + struct net_device *ndev = lp->ndev; u32 regval; unsigned long flags; @@ -860,7 +859,7 @@ static void dwceqos_link_up(struct net_local *lp) dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); spin_unlock_irqrestore(&lp->hw_lock, flags); - lp->eee_active = !phy_init_eee(lp->phy_dev, 0); + lp->eee_active = !phy_init_eee(ndev->phydev, 0); /* Check for changed EEE capability */ if (!lp->eee_active && lp->eee_enabled) { @@ -876,7 +875,8 @@ static void dwceqos_link_up(struct net_local *lp) static void dwceqos_set_speed(struct net_local *lp) { - struct phy_device *phydev = lp->phy_dev; + struct net_device *ndev = lp->ndev; + struct phy_device *phydev = ndev->phydev; u32 regval; regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); @@ -903,7 +903,7 @@ static void dwceqos_set_speed(struct net_local *lp) static void dwceqos_adjust_link(struct net_device *ndev) { struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; + struct phy_device *phydev = ndev->phydev; int status_change = 0; if (lp->phy_defer) @@ -987,7 +987,6 @@ static int dwceqos_mii_probe(struct net_device *ndev) lp->link = 0; lp->speed = 0; lp->duplex = DUPLEX_UNKNOWN; - lp->phy_dev = phydev; return 0; } @@ -1247,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp) lp->mii_bus->read = &dwceqos_mdio_read; lp->mii_bus->write = &dwceqos_mdio_write; lp->mii_bus->priv = lp; - lp->mii_bus->parent = &lp->ndev->dev; + lp->mii_bus->parent = &lp->pdev->dev; of_address_to_resource(lp->pdev->dev.of_node, 0, &res); snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", @@ -1531,6 +1530,7 @@ static void dwceqos_configure_bus(struct net_local *lp) static void dwceqos_init_hw(struct net_local *lp) { + struct net_device *ndev = lp->ndev; u32 regval; u32 buswidth; u32 dma_skip; @@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp) DWCEQOS_MMC_CTRL_RSTONRD); dwceqos_enable_mmc_interrupt(lp); - /* Enable Interrupts */ - dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, - DWCEQOS_DMA_CH0_IE_NIE | - DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | - DWCEQOS_DMA_CH0_IE_AIE | - DWCEQOS_DMA_CH0_IE_FBEE); - + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | @@ -1645,10 +1639,10 @@ static void dwceqos_init_hw(struct net_local *lp) regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); lp->phy_defer = false; - mutex_lock(&lp->phy_dev->lock); - phy_read_status(lp->phy_dev); + mutex_lock(&ndev->phydev->lock); + phy_read_status(ndev->phydev); dwceqos_adjust_link(lp->ndev); - mutex_unlock(&lp->phy_dev->lock); + mutex_unlock(&ndev->phydev->lock); } static void dwceqos_tx_reclaim(unsigned long data) @@ -1898,13 +1892,22 @@ static int dwceqos_open(struct net_device *ndev) * hence the unusual init order with phy_start first. */ lp->phy_defer = true; - phy_start(lp->phy_dev); + phy_start(ndev->phydev); dwceqos_init_hw(lp); napi_enable(&lp->napi); netif_start_queue(ndev); tasklet_enable(&lp->tx_bdreclaim_tasklet); + /* Enable Interrupts -- do this only after we enable NAPI and the + * tasklet. + */ + dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, + DWCEQOS_DMA_CH0_IE_NIE | + DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | + DWCEQOS_DMA_CH0_IE_AIE | + DWCEQOS_DMA_CH0_IE_FBEE); + return 0; } @@ -1943,7 +1946,7 @@ static int dwceqos_stop(struct net_device *ndev) dwceqos_drain_dma(lp); dwceqos_reset_hw(lp); - phy_stop(lp->phy_dev); + phy_stop(ndev->phydev); dwceqos_descriptor_free(lp); @@ -2523,30 +2526,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) return s; } -static int -dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, ecmd); -} - -static int -dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct net_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, ecmd); -} - static void dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) { @@ -2574,17 +2553,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev, lp->flowcontrol.autoneg = pp->autoneg; if (pp->autoneg) { - lp->phy_dev->advertising |= ADVERTISED_Pause; - lp->phy_dev->advertising |= ADVERTISED_Asym_Pause; + ndev->phydev->advertising |= ADVERTISED_Pause; + ndev->phydev->advertising |= ADVERTISED_Asym_Pause; } else { - lp->phy_dev->advertising &= ~ADVERTISED_Pause; - lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause; + ndev->phydev->advertising &= ~ADVERTISED_Pause; + ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; lp->flowcontrol.rx = pp->rx_pause; lp->flowcontrol.tx = pp->tx_pause; } if (netif_running(ndev)) - ret = phy_start_aneg(lp->phy_dev); + ret = phy_start_aneg(ndev->phydev); return ret; } @@ -2705,7 +2684,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) dwceqos_get_tx_lpi_state(regval)); } - return phy_ethtool_get_eee(lp->phy_dev, edata); + return phy_ethtool_get_eee(ndev->phydev, edata); } static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) @@ -2747,7 +2726,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) spin_unlock_irqrestore(&lp->hw_lock, flags); } - return phy_ethtool_set_eee(lp->phy_dev, edata); + return phy_ethtool_set_eee(ndev->phydev, edata); } static u32 dwceqos_get_msglevel(struct net_device *ndev) @@ -2765,8 +2744,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) } static struct ethtool_ops dwceqos_ethtool_ops = { - .get_settings = dwceqos_get_settings, - .set_settings = dwceqos_set_settings, .get_drvinfo = dwceqos_get_drvinfo, .get_link = ethtool_op_get_link, .get_pauseparam = dwceqos_get_pauseparam, @@ -2780,6 +2757,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = { .set_eee = dwceqos_set_eee, .get_msglevel = dwceqos_get_msglevel, .set_msglevel = dwceqos_set_msglevel, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static struct net_device_ops netdev_ops = { @@ -2874,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev) ndev->features = ndev->hw_features; - netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); - - ret = register_netdev(ndev); - if (ret) { - dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_clk_dis_aper; - } - lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); if (IS_ERR(lp->phy_ref_clk)) { dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); ret = PTR_ERR(lp->phy_ref_clk); - goto err_out_unregister_netdev; + goto err_out_clk_dis_aper; } ret = clk_prepare_enable(lp->phy_ref_clk); if (ret) { dev_err(&pdev->dev, "Unable to enable device clock.\n"); - goto err_out_unregister_netdev; + goto err_out_clk_dis_aper; } lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, @@ -2901,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev) ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); if (ret < 0) { dev_err(&pdev->dev, "invalid fixed-link"); - goto err_out_unregister_netdev; + goto err_out_clk_dis_phy; } lp->phy_node = of_node_get(lp->pdev->dev.of_node); @@ -2910,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev) ret = of_get_phy_mode(lp->pdev->dev.of_node); if (ret < 0) { dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } lp->phy_interface = ret; @@ -2918,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev) ret = dwceqos_mii_init(lp); if (ret) { dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } ret = dwceqos_mii_probe(ndev); if (ret != 0) { netdev_err(ndev, "mii_probe fail.\n"); ret = -ENXIO; - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); @@ -2934,7 +2905,8 @@ static int dwceqos_probe(struct platform_device *pdev) (unsigned long)ndev); tasklet_disable(&lp->tx_bdreclaim_tasklet); - lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME); + lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, + WQ_MEM_RECLAIM, 0); INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); platform_set_drvdata(pdev, ndev); @@ -2942,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev) if (ret) { dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", ret); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", pdev->id, ndev->base_addr, ndev->irq); @@ -2952,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev) if (ret) { dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", ndev->irq, ret); - goto err_out_unregister_clk_notifier; + goto err_out_clk_dis_phy; } if (netif_msg_probe(lp)) netdev_dbg(ndev, "net_local@%p\n", lp); + netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); + goto err_out_clk_dis_phy; + } + return 0; -err_out_unregister_clk_notifier: +err_out_clk_dis_phy: clk_disable_unprepare(lp->phy_ref_clk); -err_out_unregister_netdev: - unregister_netdev(ndev); err_out_clk_dis_aper: clk_disable_unprepare(lp->apb_pclk); err_out_free_netdev: @@ -2981,8 +2959,8 @@ static int dwceqos_remove(struct platform_device *pdev) if (ndev) { lp = netdev_priv(ndev); - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); + if (ndev->phydev) + phy_disconnect(ndev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index f7540dd6b..f5e931e50 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) { err = pci_enable_msi(pdev); if (err) - pr_err("Can't eneble msi. error is %d\n", err); + pr_err("Can't enable msi. error is %d\n", err); else nic->irq_type = IRQ_MSI; } else diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index e7f0b7d95..9904d740d 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -48,8 +48,7 @@ config TI_DAVINCI_CPDMA will be called davinci_cpdma. This is recommended. config TI_CPSW_PHY_SEL - bool "TI CPSW Switch Phy sel Support" - depends on TI_CPSW + bool ---help--- This driver supports configuring of the phy mode connected to the CPSW. diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 7eef45e6d..d300d536d 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -205,7 +205,6 @@ struct cpmac_priv { dma_addr_t dma_ring; void __iomem *regs; struct mii_bus *mii_bus; - struct phy_device *phy; char phy_name[MII_BUS_ID_SIZE + 3]; int oldlink, oldspeed, oldduplex; u32 msg_enable; @@ -830,37 +829,12 @@ static void cpmac_tx_timeout(struct net_device *dev) static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { - struct cpmac_priv *priv = netdev_priv(dev); - if (!(netif_running(dev))) return -EINVAL; - if (!priv->phy) + if (!dev->phydev) return -EINVAL; - return phy_mii_ioctl(priv->phy, ifr, cmd); -} - -static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - if (priv->phy) - return phy_ethtool_gset(priv->phy, cmd); - - return -EINVAL; -} - -static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct cpmac_priv *priv = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (priv->phy) - return phy_ethtool_sset(priv->phy, cmd); - - return -EINVAL; + return phy_mii_ioctl(dev->phydev, ifr, cmd); } static void cpmac_get_ringparam(struct net_device *dev, @@ -900,12 +874,12 @@ static void cpmac_get_drvinfo(struct net_device *dev, } static const struct ethtool_ops cpmac_ethtool_ops = { - .get_settings = cpmac_get_settings, - .set_settings = cpmac_set_settings, .get_drvinfo = cpmac_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = cpmac_get_ringparam, .set_ringparam = cpmac_set_ringparam, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static void cpmac_adjust_link(struct net_device *dev) @@ -914,16 +888,16 @@ static void cpmac_adjust_link(struct net_device *dev) int new_state = 0; spin_lock(&priv->lock); - if (priv->phy->link) { + if (dev->phydev->link) { netif_tx_start_all_queues(dev); - if (priv->phy->duplex != priv->oldduplex) { + if (dev->phydev->duplex != priv->oldduplex) { new_state = 1; - priv->oldduplex = priv->phy->duplex; + priv->oldduplex = dev->phydev->duplex; } - if (priv->phy->speed != priv->oldspeed) { + if (dev->phydev->speed != priv->oldspeed) { new_state = 1; - priv->oldspeed = priv->phy->speed; + priv->oldspeed = dev->phydev->speed; } if (!priv->oldlink) { @@ -938,7 +912,7 @@ static void cpmac_adjust_link(struct net_device *dev) } if (new_state && netif_msg_link(priv) && net_ratelimit()) - phy_print_status(priv->phy); + phy_print_status(dev->phydev); spin_unlock(&priv->lock); } @@ -1016,8 +990,8 @@ static int cpmac_open(struct net_device *dev) cpmac_hw_start(dev); napi_enable(&priv->napi); - priv->phy->state = PHY_CHANGELINK; - phy_start(priv->phy); + dev->phydev->state = PHY_CHANGELINK; + phy_start(dev->phydev); return 0; @@ -1032,8 +1006,10 @@ fail_desc: kfree_skb(priv->rx_head[i].skb); } } + dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size, + priv->desc_ring, priv->dma_ring); + fail_alloc: - kfree(priv->desc_ring); iounmap(priv->regs); fail_remap: @@ -1053,7 +1029,7 @@ static int cpmac_stop(struct net_device *dev) cancel_work_sync(&priv->reset_work); napi_disable(&priv->napi); - phy_stop(priv->phy); + phy_stop(dev->phydev); cpmac_hw_stop(dev); @@ -1106,6 +1082,7 @@ static int cpmac_probe(struct platform_device *pdev) struct cpmac_priv *priv; struct net_device *dev; struct plat_cpmac_data *pdata; + struct phy_device *phydev = NULL; pdata = dev_get_platdata(&pdev->dev); @@ -1142,7 +1119,7 @@ static int cpmac_probe(struct platform_device *pdev) mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!mem) { rc = -ENODEV; - goto out; + goto fail; } dev->irq = platform_get_irq_byname(pdev, "irq"); @@ -1162,15 +1139,15 @@ static int cpmac_probe(struct platform_device *pdev) snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); - priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, - PHY_INTERFACE_MODE_MII); + phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link, + PHY_INTERFACE_MODE_MII); - if (IS_ERR(priv->phy)) { + if (IS_ERR(phydev)) { if (netif_msg_drv(priv)) dev_err(&pdev->dev, "Could not attach to PHY\n"); - rc = PTR_ERR(priv->phy); - goto out; + rc = PTR_ERR(phydev); + goto fail; } rc = register_netdev(dev); @@ -1189,7 +1166,6 @@ static int cpmac_probe(struct platform_device *pdev) fail: free_netdev(dev); -out: return rc; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 53190894f..f85d605e4 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -364,7 +364,6 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset) } struct cpsw_priv { - spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; struct napi_struct napi_rx; @@ -735,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status) netif_receive_skb(skb); ndev->stats.rx_bytes += len; ndev->stats.rx_packets++; + kmemleak_not_leak(new_skb); } else { ndev->stats.rx_dropped++; new_skb = skb; @@ -1244,6 +1244,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->phy = NULL; cpsw_ale_control_set(priv->ale, slave_port, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); + soft_reset_slave(slave); } static int cpsw_ndo_open(struct net_device *ndev) @@ -1252,7 +1253,11 @@ static int cpsw_ndo_open(struct net_device *ndev) int i, ret; u32 reg; - pm_runtime_get_sync(&priv->pdev->dev); + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } if (!cpsw_common_res_usage_state(priv)) cpsw_intr_disable(priv); @@ -1278,6 +1283,7 @@ static int cpsw_ndo_open(struct net_device *ndev) if (!cpsw_common_res_usage_state(priv)) { struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); + int buf_num; /* setup tx dma to fixed prio and zero offset */ cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1); @@ -1305,10 +1311,8 @@ static int cpsw_ndo_open(struct net_device *ndev) enable_irq(priv->irqs_table[0]); } - if (WARN_ON(!priv->data.rx_descs)) - priv->data.rx_descs = 128; - - for (i = 0; i < priv->data.rx_descs; i++) { + buf_num = cpdma_chan_get_rx_buf_num(priv->dma); + for (i = 0; i < buf_num; i++) { struct sk_buff *skb; ret = -ENOMEM; @@ -1322,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev) kfree_skb(skb); goto err_cleanup; } + kmemleak_not_leak(skb); } /* continue even if we didn't manage to submit all * receive descs @@ -1611,10 +1616,17 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) struct sockaddr *addr = (struct sockaddr *)p; int flags = 0; u16 vid = 0; + int ret; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { vid = priv->slaves[priv->emac_port].port_vlan; flags = ALE_VLAN; @@ -1629,6 +1641,8 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN); for_each_slave(priv, cpsw_set_slave_mac, priv); + pm_runtime_put(&priv->pdev->dev); + return 0; } @@ -1693,10 +1707,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) { struct cpsw_priv *priv = netdev_priv(ndev); + int ret; if (vid == priv->data.default_vlan) return 0; + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { /* In dual EMAC, reserved VLAN id should not be used for * creating VLAN interfaces as this can break the dual @@ -1711,7 +1732,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, } dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); - return cpsw_add_vlan_ale_entry(priv, vid); + ret = cpsw_add_vlan_ale_entry(priv, vid); + + pm_runtime_put(&priv->pdev->dev); + return ret; } static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, @@ -1723,6 +1747,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (vid == priv->data.default_vlan) return 0; + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + return ret; + } + if (priv->data.dual_emac) { int i; @@ -1742,8 +1772,10 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, if (ret != 0) return ret; - return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); + ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); + pm_runtime_put(&priv->pdev->dev); + return ret; } static const struct net_device_ops cpsw_netdev_ops = { @@ -1902,10 +1934,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev, priv->tx_pause = pause->tx_pause ? true : false; for_each_slave(priv, _cpsw_adjust_link, priv, &link); - return 0; } +static int cpsw_ethtool_op_begin(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); + pm_runtime_put_noidle(&priv->pdev->dev); + } + + return ret; +} + +static void cpsw_ethtool_op_complete(struct net_device *ndev) +{ + struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_put(&priv->pdev->dev); + if (ret < 0) + cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); +} + static const struct ethtool_ops cpsw_ethtool_ops = { .get_drvinfo = cpsw_get_drvinfo, .get_msglevel = cpsw_get_msglevel, @@ -1925,6 +1980,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = { .set_wol = cpsw_set_wol, .get_regs_len = cpsw_get_regs_len, .get_regs = cpsw_get_regs, + .begin = cpsw_ethtool_op_begin, + .complete = cpsw_ethtool_op_complete, }; static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, @@ -1999,12 +2056,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, } data->bd_ram_size = prop; - if (of_property_read_u32(node, "rx_descs", &prop)) { - dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n"); - return -EINVAL; - } - data->rx_descs = prop; - if (of_property_read_u32(node, "mac_control", &prop)) { dev_err(&pdev->dev, "Missing mac_control property in the DT.\n"); return -EINVAL; @@ -2022,7 +2073,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, if (ret) dev_warn(&pdev->dev, "Doesn't have any child node\n"); - for_each_child_of_node(node, slave_node) { + for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = data->slave_data + i; const void *mac_addr = NULL; int lenp; @@ -2124,7 +2175,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, } priv_sl2 = netdev_priv(ndev); - spin_lock_init(&priv_sl2->lock); priv_sl2->data = *data; priv_sl2->pdev = pdev; priv_sl2->ndev = ndev; @@ -2243,7 +2293,6 @@ static int cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); - spin_lock_init(&priv->lock); priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; @@ -2321,7 +2370,11 @@ static int cpsw_probe(struct platform_device *pdev) /* Need to enable clocks with runtime PM api to access module * registers */ - pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + goto clean_runtime_disable_ret; + } priv->version = readl(&priv->regs->id_ver); pm_runtime_put_sync(&pdev->dev); @@ -2513,19 +2566,17 @@ clean_ndev_ret: return ret; } -static int cpsw_remove_child_device(struct device *dev, void *c) -{ - struct platform_device *pdev = to_platform_device(dev); - - of_device_unregister(pdev); - - return 0; -} - static int cpsw_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&pdev->dev); + return ret; + } if (priv->data.dual_emac) unregister_netdev(cpsw_get_slave_ndev(priv, 1)); @@ -2533,8 +2584,9 @@ static int cpsw_remove(struct platform_device *pdev) cpsw_ale_destroy(priv->ale); cpdma_ctlr_destroy(priv->dma); + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); - device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device); if (priv->data.dual_emac) free_netdev(cpsw_get_slave_ndev(priv, 1)); free_netdev(ndev); @@ -2554,16 +2606,12 @@ static int cpsw_suspend(struct device *dev) for (i = 0; i < priv->data.slaves; i++) { if (netif_running(priv->slaves[i].ndev)) cpsw_ndo_stop(priv->slaves[i].ndev); - soft_reset_slave(priv->slaves + i); } } else { if (netif_running(ndev)) cpsw_ndo_stop(ndev); - for_each_slave(priv, soft_reset_slave); } - pm_runtime_put_sync(&pdev->dev); - /* Select sleep pin state */ pinctrl_pm_select_sleep_state(&pdev->dev); @@ -2576,8 +2624,6 @@ static int cpsw_resume(struct device *dev) struct net_device *ndev = platform_get_drvdata(pdev); struct cpsw_priv *priv = netdev_priv(ndev); - pm_runtime_get_sync(&pdev->dev); - /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index e50afd1b2..16b54c6f3 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -35,7 +35,6 @@ struct cpsw_platform_data { u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ u32 ale_entries; /* ale table size */ u32 bd_ram_size; /*buffer descriptor ram size */ - u32 rx_descs; /* Number of Rx Descriptios */ u32 mac_control; /* Mac control register */ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/ bool dual_emac; /* Enable Dual EMAC mode */ diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 18bf3a8fd..19e5f32a8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -21,7 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/delay.h> - +#include <linux/genalloc.h> #include "davinci_cpdma.h" /* DMA Registers */ @@ -87,9 +87,8 @@ struct cpdma_desc_pool { void *cpumap; /* dma_alloc map */ int desc_size, mem_size; int num_desc, used_desc; - unsigned long *bitmap; struct device *dev; - spinlock_t lock; + struct gen_pool *gen_pool; }; enum cpdma_state { @@ -98,8 +97,6 @@ enum cpdma_state { CPDMA_STATE_TEARDOWN, }; -static const char *cpdma_state_str[] = { "idle", "active", "teardown" }; - struct cpdma_ctlr { enum cpdma_state state; struct cpdma_params params; @@ -117,6 +114,7 @@ struct cpdma_chan { int chan_num; spinlock_t lock; int count; + u32 desc_num; u32 mask; cpdma_handler_fn handler; enum dma_data_direction dir; @@ -145,6 +143,19 @@ struct cpdma_chan { (directed << CPDMA_TO_PORT_SHIFT)); \ } while (0) +static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) +{ + if (!pool) + return; + + WARN_ON(pool->used_desc); + if (pool->cpumap) + dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, + pool->phys); + else + iounmap(pool->iomap); +} + /* * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci * emac) have dedicated on-chip memory for these descriptors. Some other @@ -155,24 +166,25 @@ static struct cpdma_desc_pool * cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, int size, int align) { - int bitmap_size; struct cpdma_desc_pool *pool; + int ret; pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL); if (!pool) - goto fail; - - spin_lock_init(&pool->lock); + goto gen_pool_create_fail; pool->dev = dev; pool->mem_size = size; pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); pool->num_desc = size / pool->desc_size; - bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); - pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL); - if (!pool->bitmap) - goto fail; + pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1, + "cpdma"); + if (IS_ERR(pool->gen_pool)) { + dev_err(dev, "pool create failed %ld\n", + PTR_ERR(pool->gen_pool)); + goto gen_pool_create_fail; + } if (phys) { pool->phys = phys; @@ -185,24 +197,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } - if (pool->iomap) - return pool; -fail: - return NULL; -} + if (!pool->iomap) + goto gen_pool_create_fail; -static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) -{ - if (!pool) - return; - - WARN_ON(pool->used_desc); - if (pool->cpumap) { - dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, - pool->phys); - } else { - iounmap(pool->iomap); + ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap, + pool->phys, pool->mem_size, -1); + if (ret < 0) { + dev_err(dev, "pool add failed %d\n", ret); + goto gen_pool_add_virt_fail; } + + return pool; + +gen_pool_add_virt_fail: + cpdma_desc_pool_destroy(pool); +gen_pool_create_fail: + return NULL; } static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, @@ -220,47 +230,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) } static struct cpdma_desc __iomem * -cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx) +cpdma_desc_alloc(struct cpdma_desc_pool *pool) { - unsigned long flags; - int index; - int desc_start; - int desc_end; struct cpdma_desc __iomem *desc = NULL; - spin_lock_irqsave(&pool->lock, flags); - - if (is_rx) { - desc_start = 0; - desc_end = pool->num_desc/2; - } else { - desc_start = pool->num_desc/2; - desc_end = pool->num_desc; - } - - index = bitmap_find_next_zero_area(pool->bitmap, - desc_end, desc_start, num_desc, 0); - if (index < desc_end) { - bitmap_set(pool->bitmap, index, num_desc); - desc = pool->iomap + pool->desc_size * index; + desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool, + pool->desc_size); + if (desc) pool->used_desc++; - } - spin_unlock_irqrestore(&pool->lock, flags); return desc; } static void cpdma_desc_free(struct cpdma_desc_pool *pool, struct cpdma_desc __iomem *desc, int num_desc) { - unsigned long flags, index; - - index = ((unsigned long)desc - (unsigned long)pool->iomap) / - pool->desc_size; - spin_lock_irqsave(&pool->lock, flags); - bitmap_clear(pool->bitmap, index, num_desc); + gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size); pool->used_desc--; - spin_unlock_irqrestore(&pool->lock, flags); } struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) @@ -369,86 +355,13 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) } EXPORT_SYMBOL_GPL(cpdma_ctlr_stop); -int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) -{ - struct device *dev = ctlr->dev; - unsigned long flags; - int i; - - spin_lock_irqsave(&ctlr->lock, flags); - - dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); - - dev_info(dev, "CPDMA: txidver: %x", - dma_reg_read(ctlr, CPDMA_TXIDVER)); - dev_info(dev, "CPDMA: txcontrol: %x", - dma_reg_read(ctlr, CPDMA_TXCONTROL)); - dev_info(dev, "CPDMA: txteardown: %x", - dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); - dev_info(dev, "CPDMA: rxidver: %x", - dma_reg_read(ctlr, CPDMA_RXIDVER)); - dev_info(dev, "CPDMA: rxcontrol: %x", - dma_reg_read(ctlr, CPDMA_RXCONTROL)); - dev_info(dev, "CPDMA: softreset: %x", - dma_reg_read(ctlr, CPDMA_SOFTRESET)); - dev_info(dev, "CPDMA: rxteardown: %x", - dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); - dev_info(dev, "CPDMA: txintstatraw: %x", - dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); - dev_info(dev, "CPDMA: txintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); - dev_info(dev, "CPDMA: txintmaskset: %x", - dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); - dev_info(dev, "CPDMA: txintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); - dev_info(dev, "CPDMA: macinvector: %x", - dma_reg_read(ctlr, CPDMA_MACINVECTOR)); - dev_info(dev, "CPDMA: maceoivector: %x", - dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); - dev_info(dev, "CPDMA: rxintstatraw: %x", - dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); - dev_info(dev, "CPDMA: rxintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); - dev_info(dev, "CPDMA: rxintmaskset: %x", - dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); - dev_info(dev, "CPDMA: rxintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); - dev_info(dev, "CPDMA: dmaintstatraw: %x", - dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); - dev_info(dev, "CPDMA: dmaintstatmasked: %x", - dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); - dev_info(dev, "CPDMA: dmaintmaskset: %x", - dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); - dev_info(dev, "CPDMA: dmaintmaskclear: %x", - dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); - - if (!ctlr->params.has_ext_regs) { - dev_info(dev, "CPDMA: dmacontrol: %x", - dma_reg_read(ctlr, CPDMA_DMACONTROL)); - dev_info(dev, "CPDMA: dmastatus: %x", - dma_reg_read(ctlr, CPDMA_DMASTATUS)); - dev_info(dev, "CPDMA: rxbuffofs: %x", - dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); - } - - for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) - if (ctlr->channels[i]) - cpdma_chan_dump(ctlr->channels[i]); - - spin_unlock_irqrestore(&ctlr->lock, flags); - return 0; -} -EXPORT_SYMBOL_GPL(cpdma_ctlr_dump); - int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) { - unsigned long flags; int ret = 0, i; if (!ctlr) return -EINVAL; - spin_lock_irqsave(&ctlr->lock, flags); if (ctlr->state != CPDMA_STATE_IDLE) cpdma_ctlr_stop(ctlr); @@ -456,7 +369,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) cpdma_chan_destroy(ctlr->channels[i]); cpdma_desc_pool_destroy(ctlr->pool); - spin_unlock_irqrestore(&ctlr->lock, flags); return ret; } EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy); @@ -516,6 +428,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, chan->state = CPDMA_STATE_IDLE; chan->chan_num = chan_num; chan->handler = handler; + chan->desc_num = ctlr->pool->num_desc / 2; if (is_rx_chan(chan)) { chan->hdp = ctlr->params.rxhdp + offset; @@ -543,6 +456,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, } EXPORT_SYMBOL_GPL(cpdma_chan_create); +int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr) +{ + return ctlr->pool->num_desc / 2; +} +EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num); + int cpdma_chan_destroy(struct cpdma_chan *chan) { struct cpdma_ctlr *ctlr; @@ -574,54 +493,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan, } EXPORT_SYMBOL_GPL(cpdma_chan_get_stats); -int cpdma_chan_dump(struct cpdma_chan *chan) -{ - unsigned long flags; - struct device *dev = chan->ctlr->dev; - - spin_lock_irqsave(&chan->lock, flags); - - dev_info(dev, "channel %d (%s %d) state %s", - chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", - chan_linear(chan), cpdma_state_str[chan->state]); - dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); - dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); - if (chan->rxfree) { - dev_info(dev, "\trxfree: %x\n", - chan_read(chan, rxfree)); - } - - dev_info(dev, "\tstats head_enqueue: %d\n", - chan->stats.head_enqueue); - dev_info(dev, "\tstats tail_enqueue: %d\n", - chan->stats.tail_enqueue); - dev_info(dev, "\tstats pad_enqueue: %d\n", - chan->stats.pad_enqueue); - dev_info(dev, "\tstats misqueued: %d\n", - chan->stats.misqueued); - dev_info(dev, "\tstats desc_alloc_fail: %d\n", - chan->stats.desc_alloc_fail); - dev_info(dev, "\tstats pad_alloc_fail: %d\n", - chan->stats.pad_alloc_fail); - dev_info(dev, "\tstats runt_receive_buff: %d\n", - chan->stats.runt_receive_buff); - dev_info(dev, "\tstats runt_transmit_buff: %d\n", - chan->stats.runt_transmit_buff); - dev_info(dev, "\tstats empty_dequeue: %d\n", - chan->stats.empty_dequeue); - dev_info(dev, "\tstats busy_dequeue: %d\n", - chan->stats.busy_dequeue); - dev_info(dev, "\tstats good_dequeue: %d\n", - chan->stats.good_dequeue); - dev_info(dev, "\tstats requeue: %d\n", - chan->stats.requeue); - dev_info(dev, "\tstats teardown_dequeue: %d\n", - chan->stats.teardown_dequeue); - - spin_unlock_irqrestore(&chan->lock, flags); - return 0; -} - static void __cpdma_chan_submit(struct cpdma_chan *chan, struct cpdma_desc __iomem *desc) { @@ -675,7 +546,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, goto unlock_ret; } - desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan)); + if (chan->count >= chan->desc_num) { + chan->stats.desc_alloc_fail++; + ret = -ENOMEM; + goto unlock_ret; + } + + desc = cpdma_desc_alloc(ctlr->pool); if (!desc) { chan->stats.desc_alloc_fail++; ret = -ENOMEM; @@ -721,24 +598,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit); bool cpdma_check_free_tx_desc(struct cpdma_chan *chan) { - unsigned long flags; - int index; - bool ret; struct cpdma_ctlr *ctlr = chan->ctlr; struct cpdma_desc_pool *pool = ctlr->pool; + bool free_tx_desc; + unsigned long flags; - spin_lock_irqsave(&pool->lock, flags); - - index = bitmap_find_next_zero_area(pool->bitmap, - pool->num_desc, pool->num_desc/2, 1, 0); - - if (index < pool->num_desc) - ret = true; - else - ret = false; - - spin_unlock_irqrestore(&pool->lock, flags); - return ret; + spin_lock_irqsave(&chan->lock, flags); + free_tx_desc = (chan->count < chan->desc_num) && + gen_pool_avail(pool->gen_pool); + spin_unlock_irqrestore(&chan->lock, flags); + return free_tx_desc; } EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h index 86dee487f..4b46cd6e9 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.h +++ b/drivers/net/ethernet/ti/davinci_cpdma.h @@ -77,14 +77,13 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params); int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr); int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); -int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr); struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, cpdma_handler_fn handler); +int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr); int cpdma_chan_destroy(struct cpdma_chan *chan); int cpdma_chan_start(struct cpdma_chan *chan); int cpdma_chan_stop(struct cpdma_chan *chan); -int cpdma_chan_dump(struct cpdma_chan *chan); int cpdma_chan_get_stats(struct cpdma_chan *chan, struct cpdma_chan_stats *stats); diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index f56d66e6e..727a79f3c 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -348,7 +348,6 @@ struct emac_priv { u32 rx_addr_type; const char *phy_id; struct device_node *phy_node; - struct phy_device *phydev; spinlock_t lock; /*platform specific members*/ void (*int_enable) (void); @@ -380,97 +379,6 @@ static char *emac_rxhost_errcodes[16] = { #define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) /** - * emac_dump_regs - Dump important EMAC registers to debug terminal - * @priv: The DaVinci EMAC private adapter structure - * - * Executes ethtool set cmd & sets phy mode - * - */ -static void emac_dump_regs(struct emac_priv *priv) -{ - struct device *emac_dev = &priv->ndev->dev; - - /* Print important registers in EMAC */ - dev_info(emac_dev, "EMAC Basic registers\n"); - if (priv->version == EMAC_VERSION_1) { - dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", - emac_ctrl_read(EMAC_CTRL_EWCTL), - emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); - } - dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", - emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); - dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ - "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE), - emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN)); - dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ - "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), - emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); - dev_info(emac_dev, "EMAC Statistics\n"); - dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", - emac_read(EMAC_RXGOODFRAMES)); - dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n", - emac_read(EMAC_RXBCASTFRAMES)); - dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n", - emac_read(EMAC_RXMCASTFRAMES)); - dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n", - emac_read(EMAC_RXPAUSEFRAMES)); - dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n", - emac_read(EMAC_RXCRCERRORS)); - dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n", - emac_read(EMAC_RXALIGNCODEERRORS)); - dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n", - emac_read(EMAC_RXOVERSIZED)); - dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n", - emac_read(EMAC_RXJABBER)); - dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n", - emac_read(EMAC_RXUNDERSIZED)); - dev_info(emac_dev, "EMAC: rx_fragments:%d\n", - emac_read(EMAC_RXFRAGMENTS)); - dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n", - emac_read(EMAC_RXFILTERED)); - dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n", - emac_read(EMAC_RXQOSFILTERED)); - dev_info(emac_dev, "EMAC: rx_octets:%d\n", - emac_read(EMAC_RXOCTETS)); - dev_info(emac_dev, "EMAC: tx_goodframes:%d\n", - emac_read(EMAC_TXGOODFRAMES)); - dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n", - emac_read(EMAC_TXBCASTFRAMES)); - dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n", - emac_read(EMAC_TXMCASTFRAMES)); - dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n", - emac_read(EMAC_TXPAUSEFRAMES)); - dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n", - emac_read(EMAC_TXDEFERRED)); - dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n", - emac_read(EMAC_TXCOLLISION)); - dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n", - emac_read(EMAC_TXSINGLECOLL)); - dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n", - emac_read(EMAC_TXMULTICOLL)); - dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n", - emac_read(EMAC_TXEXCESSIVECOLL)); - dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n", - emac_read(EMAC_TXLATECOLL)); - dev_info(emac_dev, "EMAC: tx_underrun:%d\n", - emac_read(EMAC_TXUNDERRUN)); - dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n", - emac_read(EMAC_TXCARRIERSENSE)); - dev_info(emac_dev, "EMAC: tx_octets:%d\n", - emac_read(EMAC_TXOCTETS)); - dev_info(emac_dev, "EMAC: net_octets:%d\n", - emac_read(EMAC_NETOCTETS)); - dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n", - emac_read(EMAC_RXSOFOVERRUNS)); - dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n", - emac_read(EMAC_RXMOFOVERRUNS)); - dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", - emac_read(EMAC_RXDMAOVERRUNS)); - - cpdma_ctlr_dump(priv->dma); -} - -/** * emac_get_drvinfo - Get EMAC driver information * @ndev: The DaVinci EMAC network adapter * @info: ethtool info structure containing name and version @@ -486,43 +394,6 @@ static void emac_get_drvinfo(struct net_device *ndev, } /** - * emac_get_settings - Get EMAC settings - * @ndev: The DaVinci EMAC network adapter - * @ecmd: ethtool command - * - * Executes ethool get command - * - */ -static int emac_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct emac_priv *priv = netdev_priv(ndev); - if (priv->phydev) - return phy_ethtool_gset(priv->phydev, ecmd); - else - return -EOPNOTSUPP; - -} - -/** - * emac_set_settings - Set EMAC settings - * @ndev: The DaVinci EMAC network adapter - * @ecmd: ethtool command - * - * Executes ethool set command - * - */ -static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) -{ - struct emac_priv *priv = netdev_priv(ndev); - if (priv->phydev) - return phy_ethtool_sset(priv->phydev, ecmd); - else - return -EOPNOTSUPP; - -} - -/** * emac_get_coalesce - Get interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure @@ -625,12 +496,12 @@ static int emac_set_coalesce(struct net_device *ndev, */ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = emac_get_drvinfo, - .get_settings = emac_get_settings, - .set_settings = emac_set_settings, .get_link = ethtool_op_get_link, .get_coalesce = emac_get_coalesce, .set_coalesce = emac_set_coalesce, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /** @@ -651,8 +522,8 @@ static void emac_update_phystatus(struct emac_priv *priv) mac_control = emac_read(EMAC_MACCONTROL); cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? DUPLEX_FULL : DUPLEX_HALF; - if (priv->phydev) - new_duplex = priv->phydev->duplex; + if (ndev->phydev) + new_duplex = ndev->phydev->duplex; else new_duplex = DUPLEX_FULL; @@ -1134,8 +1005,6 @@ static void emac_dev_tx_timeout(struct net_device *ndev) if (netif_msg_tx_err(priv)) dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); - emac_dump_regs(priv); - ndev->stats.tx_errors++; emac_int_disable(priv); cpdma_chan_stop(priv->txchan); @@ -1454,7 +1323,7 @@ static void emac_poll_controller(struct net_device *ndev) static void emac_adjust_link(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = ndev->phydev; unsigned long flags; int new_state = 0; @@ -1483,7 +1352,7 @@ static void emac_adjust_link(struct net_device *ndev) } if (new_state) { emac_update_phystatus(priv); - phy_print_status(priv->phydev); + phy_print_status(ndev->phydev); } spin_unlock_irqrestore(&priv->lock, flags); @@ -1505,15 +1374,13 @@ static void emac_adjust_link(struct net_device *ndev) */ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) { - struct emac_priv *priv = netdev_priv(ndev); - if (!(netif_running(ndev))) return -EINVAL; /* TODO: Add phy read and write and private statistics get feature */ - if (priv->phydev) - return phy_mii_ioctl(priv->phydev, ifrq, cmd); + if (ndev->phydev) + return phy_mii_ioctl(ndev->phydev, ifrq, cmd); else return -EOPNOTSUPP; } @@ -1542,6 +1409,7 @@ static int emac_dev_open(struct net_device *ndev) int res_num = 0, irq_num = 0; int i = 0; struct emac_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = NULL; ret = pm_runtime_get_sync(&priv->pdev->dev); if (ret < 0) { @@ -1607,12 +1475,10 @@ static int emac_dev_open(struct net_device *ndev) cpdma_ctlr_start(priv->dma); - priv->phydev = NULL; - if (priv->phy_node) { - priv->phydev = of_phy_connect(ndev, priv->phy_node, - &emac_adjust_link, 0, 0); - if (!priv->phydev) { + phydev = of_phy_connect(ndev, priv->phy_node, + &emac_adjust_link, 0, 0); + if (!phydev) { dev_err(emac_dev, "could not connect to phy %s\n", priv->phy_node->full_name); ret = -ENODEV; @@ -1621,7 +1487,7 @@ static int emac_dev_open(struct net_device *ndev) } /* use the first phy on the bus if pdata did not give us a phy id */ - if (!priv->phydev && !priv->phy_id) { + if (!phydev && !priv->phy_id) { struct device *phy; phy = bus_find_device(&mdio_bus_type, NULL, NULL, @@ -1630,16 +1496,15 @@ static int emac_dev_open(struct net_device *ndev) priv->phy_id = dev_name(phy); } - if (!priv->phydev && priv->phy_id && *priv->phy_id) { - priv->phydev = phy_connect(ndev, priv->phy_id, - &emac_adjust_link, - PHY_INTERFACE_MODE_MII); + if (!phydev && priv->phy_id && *priv->phy_id) { + phydev = phy_connect(ndev, priv->phy_id, + &emac_adjust_link, + PHY_INTERFACE_MODE_MII); - if (IS_ERR(priv->phydev)) { + if (IS_ERR(phydev)) { dev_err(emac_dev, "could not connect to phy %s\n", priv->phy_id); - ret = PTR_ERR(priv->phydev); - priv->phydev = NULL; + ret = PTR_ERR(phydev); goto err; } @@ -1647,10 +1512,10 @@ static int emac_dev_open(struct net_device *ndev) priv->speed = 0; priv->duplex = ~0; - phy_attached_info(priv->phydev); + phy_attached_info(phydev); } - if (!priv->phydev) { + if (!phydev) { /* No PHY , fix the link, speed and duplex settings */ dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); priv->link = 1; @@ -1659,14 +1524,11 @@ static int emac_dev_open(struct net_device *ndev) emac_update_phystatus(priv); } - if (!netif_running(ndev)) /* debug only - to avoid compiler warning */ - emac_dump_regs(priv); - if (netif_msg_drv(priv)) dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); - if (priv->phydev) - phy_start(priv->phydev); + if (phydev) + phy_start(phydev); return 0; @@ -1717,8 +1579,8 @@ static int emac_dev_stop(struct net_device *ndev) cpdma_ctlr_stop(priv->dma); emac_write(EMAC_SOFTRESET, 1); - if (priv->phydev) - phy_disconnect(priv->phydev); + if (ndev->phydev) + phy_disconnect(ndev->phydev); /* Free IRQ */ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { @@ -2102,6 +1964,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_ctlr_destroy(priv->dma); unregister_netdev(ndev); + of_node_put(priv->phy_node); pm_runtime_disable(&pdev->dev); free_netdev(ndev); diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 4e7c9b9b0..33df340db 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -53,6 +53,10 @@ #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ +struct davinci_mdio_of_param { + int autosuspend_delay_ms; +}; + struct davinci_mdio_regs { u32 version; u32 control; @@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = { struct davinci_mdio_data { struct mdio_platform_data pdata; struct davinci_mdio_regs __iomem *regs; - spinlock_t lock; struct clk *clk; struct device *dev; struct mii_bus *bus; - bool suspended; + bool active_in_suspend; unsigned long access_time; /* jiffies */ /* Indicates that driver shouldn't modify phy_mask in case * if MDIO bus is registered from DT. */ bool skip_scan; + u32 clk_div; }; -static void __davinci_mdio_reset(struct davinci_mdio_data *data) +static void davinci_mdio_init_clk(struct davinci_mdio_data *data) { u32 mdio_in, div, mdio_out_khz, access_time; @@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data) if (div > CONTROL_MAX_DIV) div = CONTROL_MAX_DIV; - /* set enable and clock divider */ - __raw_writel(div | CONTROL_ENABLE, &data->regs->control); - + data->clk_div = div; /* * One mdio transaction consists of: * 32 bits of preamble @@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data) data->access_time = 1; } +static void davinci_mdio_enable(struct davinci_mdio_data *data) +{ + /* set enable and clock divider */ + __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control); +} + static int davinci_mdio_reset(struct mii_bus *bus) { struct davinci_mdio_data *data = bus->priv; u32 phy_mask, ver; + int ret; - __davinci_mdio_reset(data); + ret = pm_runtime_get_sync(data->dev); + if (ret < 0) { + pm_runtime_put_noidle(data->dev); + return ret; + } /* wait for scan logic to settle */ msleep(PHY_MAX_ADDR * data->access_time); @@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus) (ver >> 8) & 0xff, ver & 0xff); if (data->skip_scan) - return 0; + goto done; /* get phy mask from the alive register */ phy_mask = __raw_readl(&data->regs->alive); @@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus) } data->bus->phy_mask = phy_mask; +done: + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); + return 0; } @@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) * operation */ dev_warn(data->dev, "resetting idled controller\n"); - __davinci_mdio_reset(data); + davinci_mdio_enable(data); return -EAGAIN; } @@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - spin_lock(&data->lock); - - if (data->suspended) { - spin_unlock(&data->lock); - return -ENODEV; + ret = pm_runtime_get_sync(data->dev); + if (ret < 0) { + pm_runtime_put_noidle(data->dev); + return ret; } reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | @@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) break; } - spin_unlock(&data->lock); - + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); return ret; } @@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) return -EINVAL; - spin_lock(&data->lock); - - if (data->suspended) { - spin_unlock(&data->lock); - return -ENODEV; + ret = pm_runtime_get_sync(data->dev); + if (ret < 0) { + pm_runtime_put_noidle(data->dev); + return ret; } reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | @@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, break; } - spin_unlock(&data->lock); + pm_runtime_mark_last_busy(data->dev); + pm_runtime_put_autosuspend(data->dev); - return 0; + return ret; } #if IS_ENABLED(CONFIG_OF) @@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data, } #endif +#if IS_ENABLED(CONFIG_OF) +static const struct davinci_mdio_of_param of_cpsw_mdio_data = { + .autosuspend_delay_ms = 100, +}; + +static const struct of_device_id davinci_mdio_of_mtable[] = { + { .compatible = "ti,davinci_mdio", }, + { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data}, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); +#endif + static int davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev); @@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev) struct resource *res; struct phy_device *phy; int ret, addr; + int autosuspend_delay_ms = -1; data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev) } if (dev->of_node) { - if (davinci_mdio_probe_dt(&data->pdata, pdev)) - data->pdata = default_pdata; + const struct of_device_id *of_id; + + ret = davinci_mdio_probe_dt(&data->pdata, pdev); + if (ret) + return ret; snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + + of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev); + if (of_id) { + const struct davinci_mdio_of_param *of_mdio_data; + + of_mdio_data = of_id->data; + if (of_mdio_data) + autosuspend_delay_ms = + of_mdio_data->autosuspend_delay_ms; + } } else { data->pdata = pdata ? (*pdata) : default_pdata; snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", @@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev) data->bus->parent = dev; data->bus->priv = data; - pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); data->clk = devm_clk_get(dev, "fck"); if (IS_ERR(data->clk)) { dev_err(dev, "failed to get device clock\n"); - ret = PTR_ERR(data->clk); - data->clk = NULL; - goto bail_out; + return PTR_ERR(data->clk); } dev_set_drvdata(dev, data); data->dev = dev; - spin_lock_init(&data->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(data->regs)) { - ret = PTR_ERR(data->regs); - goto bail_out; - } + if (IS_ERR(data->regs)) + return PTR_ERR(data->regs); + + davinci_mdio_init_clk(data); + + pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); /* register the mii bus * Create PHYs from DT only in case if PHY child nodes are explicitly @@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev) return 0; bail_out: - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); - return ret; } @@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev) if (data->bus) mdiobus_unregister(data->bus); - pm_runtime_put_sync(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } -#ifdef CONFIG_PM_SLEEP -static int davinci_mdio_suspend(struct device *dev) +#ifdef CONFIG_PM +static int davinci_mdio_runtime_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); u32 ctrl; - spin_lock(&data->lock); - /* shutdown the scan state machine */ ctrl = __raw_readl(&data->regs->control); ctrl &= ~CONTROL_ENABLE; __raw_writel(ctrl, &data->regs->control); wait_for_idle(data); - data->suspended = true; - spin_unlock(&data->lock); - pm_runtime_put_sync(data->dev); + return 0; +} + +static int davinci_mdio_runtime_resume(struct device *dev) +{ + struct davinci_mdio_data *data = dev_get_drvdata(dev); + + davinci_mdio_enable(data); + return 0; +} +#endif + +#ifdef CONFIG_PM_SLEEP +static int davinci_mdio_suspend(struct device *dev) +{ + struct davinci_mdio_data *data = dev_get_drvdata(dev); + int ret = 0; + + data->active_in_suspend = !pm_runtime_status_suspended(dev); + if (data->active_in_suspend) + ret = pm_runtime_force_suspend(dev); + if (ret < 0) + return ret; /* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); @@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev) /* Select default pin state */ pinctrl_pm_select_default_state(dev); - pm_runtime_get_sync(data->dev); - - spin_lock(&data->lock); - /* restart the scan state machine */ - __davinci_mdio_reset(data); - - data->suspended = false; - spin_unlock(&data->lock); + if (data->active_in_suspend) + pm_runtime_force_resume(dev); return 0; } #endif static const struct dev_pm_ops davinci_mdio_pm_ops = { + SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend, + davinci_mdio_runtime_resume, NULL) SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) }; -#if IS_ENABLED(CONFIG_OF) -static const struct of_device_id davinci_mdio_of_mtable[] = { - { .compatible = "ti,davinci_mdio", }, - { /* sentinel */ }, -}; -MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable); -#endif - static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 561703317..ece0ea0f6 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1651,7 +1651,6 @@ static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int) dma_addr_t head_list_phys; u32 ack = 1; - host_int = 0; if (priv->tlan_rev < 0x30) { TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 922a443e3..4ef605a90 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) { if (!info->egress_timer_scheduled) { - mod_timer_pinned(&info->egress_timer, jiffies + 1); + mod_timer(&info->egress_timer, jiffies + 1); info->egress_timer_scheduled = true; } } @@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr) BUG(); /* Initialize the egress timer. */ - init_timer(&info->egress_timer); + init_timer_pinned(&info->egress_timer); info->egress_timer.data = (long)info; info->egress_timer.function = tile_net_handle_egress_timer; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 548747834..5b01b3fa9 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -280,7 +280,7 @@ struct tc35815_regs { * Descriptors */ -/* Frame descripter */ +/* Frame descriptor */ struct FDesc { volatile __u32 FDNext; volatile __u32 FDSystem; @@ -288,7 +288,7 @@ struct FDesc { volatile __u32 FDCtl; }; -/* Buffer descripter */ +/* Buffer descriptor */ struct BDesc { volatile __u32 BuffData; volatile __u32 BDCtl; @@ -296,7 +296,7 @@ struct BDesc { #define FD_ALIGN 16 -/* Frame Descripter bit assign ---------------------------------------------- */ +/* Frame Descriptor bit assign ---------------------------------------------- */ #define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ #define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ #define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ @@ -309,7 +309,7 @@ struct BDesc { #define FD_Next_EOL 0x00000001 /* FD EOL indicator */ #define FD_BDCnt_SHIFT 16 -/* Buffer Descripter bit assign --------------------------------------------- */ +/* Buffer Descriptor bit assign --------------------------------------------- */ #define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */ #define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ #define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ @@ -405,7 +405,6 @@ struct tc35815_local { spinlock_t rx_lock; struct mii_bus *mii_bus; - struct phy_device *phy_dev; int duplex; int speed; int link; @@ -539,7 +538,7 @@ static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val) static void tc_handle_link_change(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); - struct phy_device *phydev = lp->phy_dev; + struct phy_device *phydev = dev->phydev; unsigned long flags; int status_change = 0; @@ -645,7 +644,6 @@ static int tc_mii_probe(struct net_device *dev) lp->link = 0; lp->speed = 0; lp->duplex = -1; - lp->phy_dev = phydev; return 0; } @@ -853,7 +851,7 @@ static void tc35815_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct tc35815_local *lp = netdev_priv(dev); - phy_disconnect(lp->phy_dev); + phy_disconnect(dev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); unregister_netdev(dev); @@ -1143,8 +1141,8 @@ static void tc35815_restart(struct net_device *dev) struct tc35815_local *lp = netdev_priv(dev); int ret; - if (lp->phy_dev) { - ret = phy_init_hw(lp->phy_dev); + if (dev->phydev) { + ret = phy_init_hw(dev->phydev); if (ret) printk(KERN_ERR "%s: PHY init failed.\n", dev->name); } @@ -1236,7 +1234,7 @@ tc35815_open(struct net_device *dev) netif_carrier_off(dev); /* schedule a link state check */ - phy_start(lp->phy_dev); + phy_start(dev->phydev); /* We are now ready to accept transmit requeusts from * the queueing layer of the networking. @@ -1387,7 +1385,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) if (status & Int_IntExBD) { if (netif_msg_rx_err(lp)) dev_warn(&dev->dev, - "Excessive Buffer Descriptiors (%#x).\n", + "Excessive Buffer Descriptors (%#x).\n", status); dev->stats.rx_length_errors++; ret = 0; @@ -1819,8 +1817,8 @@ tc35815_close(struct net_device *dev) netif_stop_queue(dev); napi_disable(&lp->napi); - if (lp->phy_dev) - phy_stop(lp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); cancel_work_sync(&lp->restart_work); /* Flush the Tx and disable Rx here. */ @@ -1946,24 +1944,6 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo * strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info)); } -static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct tc35815_local *lp = netdev_priv(dev); - - if (!lp->phy_dev) - return -ENODEV; - return phy_ethtool_gset(lp->phy_dev, cmd); -} - -static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct tc35815_local *lp = netdev_priv(dev); - - if (!lp->phy_dev) - return -ENODEV; - return phy_ethtool_sset(lp->phy_dev, cmd); -} - static u32 tc35815_get_msglevel(struct net_device *dev) { struct tc35815_local *lp = netdev_priv(dev); @@ -2013,25 +1993,23 @@ static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data) static const struct ethtool_ops tc35815_ethtool_ops = { .get_drvinfo = tc35815_get_drvinfo, - .get_settings = tc35815_get_settings, - .set_settings = tc35815_set_settings, .get_link = ethtool_op_get_link, .get_msglevel = tc35815_get_msglevel, .set_msglevel = tc35815_set_msglevel, .get_strings = tc35815_get_strings, .get_sset_count = tc35815_get_sset_count, .get_ethtool_stats = tc35815_get_ethtool_stats, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct tc35815_local *lp = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!lp->phy_dev) + if (!dev->phydev) return -ENODEV; - return phy_mii_ioctl(lp->phy_dev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static void tc35815_chip_reset(struct net_device *dev) @@ -2116,7 +2094,7 @@ static void tc35815_chip_init(struct net_device *dev) if (lp->chiptype == TC35815_TX4939) txctl &= ~Tx_EnLCarr; /* WORKAROUND: ignore LostCrS in full duplex operation */ - if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) + if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL) txctl &= ~Tx_EnLCarr; tc_writel(txctl, &tr->Tx_Ctl); } @@ -2132,8 +2110,8 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) if (!netif_running(dev)) return 0; netif_device_detach(dev); - if (lp->phy_dev) - phy_stop(lp->phy_dev); + if (dev->phydev) + phy_stop(dev->phydev); spin_lock_irqsave(&lp->lock, flags); tc35815_chip_reset(dev); spin_unlock_irqrestore(&lp->lock, flags); @@ -2144,7 +2122,6 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) static int tc35815_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); - struct tc35815_local *lp = netdev_priv(dev); pci_restore_state(pdev); if (!netif_running(dev)) @@ -2152,8 +2129,8 @@ static int tc35815_resume(struct pci_dev *pdev) pci_set_power_state(pdev, PCI_D0); tc35815_restart(dev); netif_carrier_off(dev); - if (lp->phy_dev) - phy_start(lp->phy_dev); + if (dev->phydev) + phy_start(dev->phydev); netif_device_attach(dev); return 0; } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 01a77145a..8fd131207 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = { static void tsi108_timed_checker(unsigned long dev_ptr); +#ifdef DEBUG static void dump_eth_one(struct net_device *dev) { struct tsi108_prv_data *data = netdev_priv(dev); @@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev) TSI_READ(TSI108_EC_RXESTAT), TSI_READ(TSI108_EC_RXERR), data->rxpending); } +#endif /* Synchronization is needed between the thread and up/down events. * Note that the PHY is accessed through the same registers for both diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 4f6255cf6..37ab46cdb 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1154,7 +1154,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops, if (err < 0) goto err_register; - priv->xfer_wq = create_workqueue(netdev_name(ndev)); + priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0); if (!priv->xfer_wq) { err = -ENOMEM; goto err_wq; @@ -1233,7 +1233,6 @@ int w5100_remove(struct device *dev) flush_work(&priv->setrx_work); flush_work(&priv->restart_work); - flush_workqueue(priv->xfer_wq); destroy_workqueue(priv->xfer_wq); unregister_netdev(ndev); diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h index 902457e43..7d06e3e1a 100644 --- a/drivers/net/ethernet/xilinx/ll_temac.h +++ b/drivers/net/ethernet/xilinx/ll_temac.h @@ -332,7 +332,6 @@ struct temac_local { struct device *dev; /* Connection to PHY device */ - struct phy_device *phy_dev; /* Pointer to PHY device */ struct device_node *phy_node; /* MDIO bus data */ diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 739708712..a9bd665fd 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -590,7 +590,7 @@ static void temac_device_reset(struct net_device *ndev) static void temac_adjust_link(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); - struct phy_device *phy = lp->phy_dev; + struct phy_device *phy = ndev->phydev; u32 mii_speed; int link_state; @@ -843,19 +843,20 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev) static int temac_open(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); + struct phy_device *phydev = NULL; int rc; dev_dbg(&ndev->dev, "temac_open()\n"); if (lp->phy_node) { - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, - temac_adjust_link, 0, 0); - if (!lp->phy_dev) { + phydev = of_phy_connect(lp->ndev, lp->phy_node, + temac_adjust_link, 0, 0); + if (!phydev) { dev_err(lp->dev, "of_phy_connect() failed\n"); return -ENODEV; } - phy_start(lp->phy_dev); + phy_start(phydev); } temac_device_reset(ndev); @@ -872,9 +873,8 @@ static int temac_open(struct net_device *ndev) err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (phydev) + phy_disconnect(phydev); dev_err(lp->dev, "request_irq() failed\n"); return rc; } @@ -882,15 +882,15 @@ static int temac_open(struct net_device *ndev) static int temac_stop(struct net_device *ndev) { struct temac_local *lp = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; dev_dbg(&ndev->dev, "temac_close()\n"); free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (phydev) + phy_disconnect(phydev); temac_dma_bd_release(ndev); @@ -916,15 +916,13 @@ temac_poll_controller(struct net_device *ndev) static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { - struct temac_local *lp = netdev_priv(ndev); - if (!netif_running(ndev)) return -EINVAL; - if (!lp->phy_dev) + if (!ndev->phydev) return -EINVAL; - return phy_mii_ioctl(lp->phy_dev, rq, cmd); + return phy_mii_ioctl(ndev->phydev, rq, cmd); } static const struct net_device_ops temac_netdev_ops = { @@ -969,30 +967,17 @@ static const struct attribute_group temac_attr_group = { }; /* ethtool support */ -static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd) -{ - struct temac_local *lp = netdev_priv(ndev); - return phy_ethtool_gset(lp->phy_dev, cmd); -} - -static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd) -{ - struct temac_local *lp = netdev_priv(ndev); - return phy_ethtool_sset(lp->phy_dev, cmd); -} - static int temac_nway_reset(struct net_device *ndev) { - struct temac_local *lp = netdev_priv(ndev); - return phy_start_aneg(lp->phy_dev); + return phy_start_aneg(ndev->phydev); } static const struct ethtool_ops temac_ethtool_ops = { - .get_settings = temac_get_settings, - .set_settings = temac_set_settings, .nway_reset = temac_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int temac_of_probe(struct platform_device *op) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 9ead4e269..af27f7d1c 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -382,7 +382,6 @@ struct axidma_bd { * struct axienet_local - axienet private per device data * @ndev: Pointer for net_device to which it will be attached. * @dev: Pointer to device structure - * @phy_dev: Pointer to PHY device structure attached to the axienet_local * @phy_node: Pointer to device node structure * @mii_bus: Pointer to MII bus structure * @regs: Base address for the axienet_local device address space @@ -420,7 +419,6 @@ struct axienet_local { struct device *dev; /* Connection to PHY device */ - struct phy_device *phy_dev; /* Pointer to PHY device */ struct device_node *phy_node; /* MDIO bus data */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 8c7f5be51..36ee7ab30 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -525,7 +525,7 @@ static void axienet_adjust_link(struct net_device *ndev) u32 link_state; u32 setspeed = 1; struct axienet_local *lp = netdev_priv(ndev); - struct phy_device *phy = lp->phy_dev; + struct phy_device *phy = ndev->phydev; link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { @@ -911,6 +911,7 @@ static int axienet_open(struct net_device *ndev) { int ret, mdio_mcreg; struct axienet_local *lp = netdev_priv(ndev); + struct phy_device *phydev = NULL; dev_dbg(&ndev->dev, "axienet_open()\n"); @@ -934,19 +935,19 @@ static int axienet_open(struct net_device *ndev) if (lp->phy_node) { if (lp->phy_type == XAE_PHY_TYPE_GMII) { - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, + PHY_INTERFACE_MODE_GMII); } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) { - lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_RGMII_ID); + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, + PHY_INTERFACE_MODE_RGMII_ID); } - if (!lp->phy_dev) + if (!phydev) dev_err(lp->dev, "of_phy_connect() failed\n"); else - phy_start(lp->phy_dev); + phy_start(phydev); } /* Enable tasklets for Axi DMA error handling */ @@ -967,9 +968,8 @@ static int axienet_open(struct net_device *ndev) err_rx_irq: free_irq(lp->tx_irq, ndev); err_tx_irq: - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (phydev) + phy_disconnect(phydev); tasklet_kill(&lp->dma_err_tasklet); dev_err(lp->dev, "request_irq() failed\n"); return ret; @@ -1006,9 +1006,8 @@ static int axienet_stop(struct net_device *ndev) free_irq(lp->tx_irq, ndev); free_irq(lp->rx_irq, ndev); - if (lp->phy_dev) - phy_disconnect(lp->phy_dev); - lp->phy_dev = NULL; + if (ndev->phydev) + phy_disconnect(ndev->phydev); axienet_dma_bd_release(ndev); return 0; @@ -1078,51 +1077,6 @@ static const struct net_device_ops axienet_netdev_ops = { }; /** - * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. - * @ndev: Pointer to net_device structure - * @ecmd: Pointer to ethtool_cmd structure - * - * This implements ethtool command for getting PHY settings. If PHY could - * not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to get the PHY settings. - * Issue "ethtool ethX" under linux prompt to execute this function. - * - * Return: 0 on success, -ENODEV if PHY doesn't exist - */ -static int axienet_ethtools_get_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct axienet_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - if (!phydev) - return -ENODEV; - return phy_ethtool_gset(phydev, ecmd); -} - -/** - * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. - * @ndev: Pointer to net_device structure - * @ecmd: Pointer to ethtool_cmd structure - * - * This implements ethtool command for setting various PHY settings. If PHY - * could not be found, the function returns -ENODEV. This function calls the - * relevant PHY ethtool API to set the PHY. - * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this - * function. - * - * Return: 0 on success, -ENODEV if PHY doesn't exist - */ -static int axienet_ethtools_set_settings(struct net_device *ndev, - struct ethtool_cmd *ecmd) -{ - struct axienet_local *lp = netdev_priv(ndev); - struct phy_device *phydev = lp->phy_dev; - if (!phydev) - return -ENODEV; - return phy_ethtool_sset(phydev, ecmd); -} - -/** * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. * @ndev: Pointer to net_device structure * @ed: Pointer to ethtool_drvinfo structure @@ -1344,8 +1298,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev, } static struct ethtool_ops axienet_ethtool_ops = { - .get_settings = axienet_ethtools_get_settings, - .set_settings = axienet_ethtools_set_settings, .get_drvinfo = axienet_ethtools_get_drvinfo, .get_regs_len = axienet_ethtools_get_regs_len, .get_regs = axienet_ethtools_get_regs, @@ -1354,6 +1306,8 @@ static struct ethtool_ops axienet_ethtool_ops = { .set_pauseparam = axienet_ethtools_set_pauseparam, .get_coalesce = axienet_ethtools_get_coalesce, .set_coalesce = axienet_ethtools_set_coalesce, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /** @@ -1587,9 +1541,9 @@ static int axienet_probe(struct platform_device *pdev) /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); - if (IS_ERR(np)) { + if (!np) { dev_err(&pdev->dev, "could not find DMA node\n"); - ret = PTR_ERR(np); + ret = -ENODEV; goto free_netdev; } ret = of_address_to_resource(np, 0, &dmares); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 3cee84a24..93dc10b10 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev) lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong"); mac_address = of_get_mac_address(ofdev->dev.of_node); - if (mac_address) + if (mac_address) { /* Set the MAC address. */ memcpy(ndev->dev_addr, mac_address, ETH_ALEN); - else - dev_warn(dev, "No MAC address found\n"); + } else { + dev_warn(dev, "No MAC address found, using random\n"); + eth_hw_addr_random(ndev); + } /* Clear the Tx CSR's in case this is a restart */ __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET); diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 7b44968e0..ddced28e8 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id) dev->stats.tx_packets += lp->last_ptr_value - n; netif_wake_queue(dev); } - if (tx_status & 0x0002) { /* Execessive collissions */ - pr_debug("tx restarted due to execssive collissions\n"); + if (tx_status & 0x0002) { /* Excessive collisions */ + pr_debug("tx restarted due to excessive collisions\n"); PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */ } if (tx_status & 0x0040) diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 513840794..7f127dc1b 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -171,7 +171,6 @@ struct port { struct npe *npe; struct net_device *netdev; struct napi_struct napi; - struct phy_device *phydev; struct eth_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ @@ -562,7 +561,7 @@ static void ixp4xx_mdio_remove(void) static void ixp4xx_adjust_link(struct net_device *dev) { struct port *port = netdev_priv(dev); - struct phy_device *phydev = port->phydev; + struct phy_device *phydev = dev->phydev; if (!phydev->link) { if (port->speed) { @@ -976,8 +975,6 @@ static void eth_set_mcast_list(struct net_device *dev) static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { - struct port *port = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; @@ -988,7 +985,7 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) return hwtstamp_get(dev, req); } - return phy_mii_ioctl(port->phydev, req, cmd); + return phy_mii_ioctl(dev->phydev, req, cmd); } /* ethtool support */ @@ -1005,22 +1002,9 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); } -static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct port *port = netdev_priv(dev); - return phy_ethtool_gset(port->phydev, cmd); -} - -static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct port *port = netdev_priv(dev); - return phy_ethtool_sset(port->phydev, cmd); -} - static int ixp4xx_nway_reset(struct net_device *dev) { - struct port *port = netdev_priv(dev); - return phy_start_aneg(port->phydev); + return phy_start_aneg(dev->phydev); } int ixp46x_phc_index = -1; @@ -1054,11 +1038,11 @@ static int ixp4xx_get_ts_info(struct net_device *dev, static const struct ethtool_ops ixp4xx_ethtool_ops = { .get_drvinfo = ixp4xx_get_drvinfo, - .get_settings = ixp4xx_get_settings, - .set_settings = ixp4xx_set_settings, .nway_reset = ixp4xx_nway_reset, .get_link = ethtool_op_get_link, .get_ts_info = ixp4xx_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; @@ -1259,7 +1243,7 @@ static int eth_open(struct net_device *dev) } port->speed = 0; /* force "link up" message */ - phy_start(port->phydev); + phy_start(dev->phydev); for (i = 0; i < ETH_ALEN; i++) __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); @@ -1380,7 +1364,7 @@ static int eth_close(struct net_device *dev) printk(KERN_CRIT "%s: unable to disable loopback\n", dev->name); - phy_stop(port->phydev); + phy_stop(dev->phydev); if (!ports_open) qmgr_disable_irq(TXDONE_QUEUE); @@ -1405,6 +1389,7 @@ static int eth_init_one(struct platform_device *pdev) struct port *port; struct net_device *dev; struct eth_plat_info *plat = dev_get_platdata(&pdev->dev); + struct phy_device *phydev = NULL; u32 regs_phys; char phy_id[MII_BUS_ID_SIZE + 3]; int err; @@ -1466,14 +1451,14 @@ static int eth_init_one(struct platform_device *pdev) snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, mdio_bus->id, plat->phy); - port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, - PHY_INTERFACE_MODE_MII); - if (IS_ERR(port->phydev)) { - err = PTR_ERR(port->phydev); + phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phydev)) { + err = PTR_ERR(phydev); goto err_free_mem; } - port->phydev->irq = PHY_POLL; + phydev->irq = PHY_POLL; if ((err = register_netdev(dev))) goto err_phy_dis; @@ -1484,7 +1469,7 @@ static int eth_init_one(struct platform_device *pdev) return 0; err_phy_dis: - phy_disconnect(port->phydev); + phy_disconnect(phydev); err_free_mem: npe_port_tab[NPE_ID(port->id)] = NULL; release_resource(port->mem_res); @@ -1498,10 +1483,11 @@ err_free: static int eth_remove_one(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct phy_device *phydev = dev->phydev; struct port *port = netdev_priv(dev); unregister_netdev(dev); - phy_disconnect(port->phydev); + phy_disconnect(phydev); npe_port_tab[NPE_ID(port->id)] = NULL; npe_release(port->npe); release_resource(port->mem_res); diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 86c331bb5..9006877c5 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -1187,8 +1187,9 @@ static int fjes_probe(struct platform_device *plat_dev) adapter->force_reset = false; adapter->open_guard = false; - adapter->txrx_wq = create_workqueue(DRV_NAME "/txrx"); - adapter->control_wq = create_workqueue(DRV_NAME "/control"); + adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0); + adapter->control_wq = alloc_workqueue(DRV_NAME "/control", + WQ_MEM_RECLAIM, 0); INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task); INIT_WORK(&adapter->raise_intr_rxdata_task, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 9b3dc3c61..3c20e87bb 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -12,7 +12,6 @@ #include <linux/kernel.h> #include <linux/module.h> -#include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/hash.h> #include <net/dst_metadata.h> @@ -397,23 +396,6 @@ static struct socket *geneve_create_sock(struct net *net, bool ipv6, return sock; } -static void geneve_notify_add_rx_port(struct geneve_sock *gs) -{ - struct net_device *dev; - struct sock *sk = gs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = geneve_get_sk_family(gs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_add_geneve_port) - dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, - port); - } - rcu_read_unlock(); -} - static int geneve_hlen(struct genevehdr *gh) { return sizeof(*gh) + gh->opt_len * 4; @@ -533,7 +515,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, INIT_HLIST_HEAD(&gs->vni_list[h]); /* Initialize the geneve udp offloads structure */ - geneve_notify_add_rx_port(gs); + udp_tunnel_notify_add_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); /* Mark socket as an encapsulation socket */ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg)); @@ -548,31 +530,13 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, return gs; } -static void geneve_notify_del_rx_port(struct geneve_sock *gs) -{ - struct net_device *dev; - struct sock *sk = gs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = geneve_get_sk_family(gs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_del_geneve_port) - dev->netdev_ops->ndo_del_geneve_port(dev, sa_family, - port); - } - - rcu_read_unlock(); -} - static void __geneve_sock_release(struct geneve_sock *gs) { if (!gs || --gs->refcnt) return; list_del(&gs->list); - geneve_notify_del_rx_port(gs); + udp_tunnel_notify_del_rx_port(gs->sock, UDP_TUNNEL_TYPE_GENEVE); udp_tunnel_sock_release(gs->sock); kfree_rcu(gs, rcu); } @@ -1170,29 +1134,20 @@ static struct device_type geneve_type = { .name = "geneve", }; -/* Calls the ndo_add_geneve_port of the caller in order to +/* Calls the ndo_udp_tunnel_add of the caller in order to * supply the listening GENEVE udp ports. Callers are expected - * to implement the ndo_add_geneve_port. + * to implement the ndo_udp_tunnel_add. */ static void geneve_push_rx_ports(struct net_device *dev) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; - sa_family_t sa_family; - struct sock *sk; - __be16 port; - - if (!dev->netdev_ops->ndo_add_geneve_port) - return; rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) { - sk = gs->sock->sk; - sa_family = sk->sk_family; - port = inet_sk(sk)->inet_sport; - dev->netdev_ops->ndo_add_geneve_port(dev, sa_family, port); - } + list_for_each_entry_rcu(gs, &gn->sock_list, list) + udp_tunnel_push_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); rcu_read_unlock(); } @@ -1555,7 +1510,7 @@ static int geneve_netdevice_event(struct notifier_block *unused, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (event == NETDEV_OFFLOAD_PUSH_GENEVE) + if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) geneve_push_rx_ports(dev); return NOTIFY_DONE; diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 4e976a0d5..97e0cbca0 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -16,7 +16,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> -#include <linux/version.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/rculist.h> diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index acb636963..072cddce9 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -156,7 +156,7 @@ struct baycom_state { /* --------------------------------------------------------------------- */ -static void __inline__ baycom_int_freq(struct baycom_state *bc) +static inline void baycom_int_freq(struct baycom_state *bc) { #ifdef BAYCOM_DEBUG unsigned long cur_jiffies = jiffies; @@ -192,7 +192,7 @@ static void __inline__ baycom_int_freq(struct baycom_state *bc) /* --------------------------------------------------------------------- */ -static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc) +static inline void par96_tx(struct net_device *dev, struct baycom_state *bc) { int i; unsigned int data = hdlcdrv_getbits(&bc->hdrv); @@ -216,7 +216,7 @@ static __inline__ void par96_tx(struct net_device *dev, struct baycom_state *bc) /* --------------------------------------------------------------------- */ -static __inline__ void par96_rx(struct net_device *dev, struct baycom_state *bc) +static inline void par96_rx(struct net_device *dev, struct baycom_state *bc) { int i; unsigned int data, mask, mask2, descx; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index c270c5a54..591af71ea 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -173,6 +173,7 @@ struct rndis_device { /* Interface */ struct rndis_message; +struct netvsc_device; int netvsc_device_add(struct hv_device *device, void *additional_info); int netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, @@ -189,8 +190,8 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct vmbus_channel *channel, u16 vlan_tci); void netvsc_channel_cb(void *context); -int rndis_filter_open(struct hv_device *dev); -int rndis_filter_close(struct hv_device *dev); +int rndis_filter_open(struct netvsc_device *nvdev); +int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, void *additional_info); void rndis_filter_device_remove(struct hv_device *dev); @@ -200,7 +201,7 @@ int rndis_filter_receive(struct hv_device *dev, struct vmbus_channel *channel); int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter); -int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac); +int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); @@ -643,12 +644,6 @@ struct netvsc_reconfig { u32 event; }; -struct garp_wrk { - struct work_struct dwrk; - struct net_device *netdev; - struct netvsc_device *netvsc_dev; -}; - /* The context of the netvsc device */ struct net_device_context { /* point back to our device context */ @@ -666,7 +661,6 @@ struct net_device_context { struct work_struct work; u32 msg_enable; /* debug level */ - struct garp_wrk gwrk; struct netvsc_stats __percpu *tx_stats; struct netvsc_stats __percpu *rx_stats; @@ -677,6 +671,15 @@ struct net_device_context { /* the device is going away */ bool start_remove; + + /* State to manage the associated VF interface. */ + struct net_device *vf_netdev; + bool vf_inject; + atomic_t vf_use_cnt; + /* 1: allocated, serial number is valid. 0: not allocated */ + u32 vf_alloc; + /* Serial number of the VF to team with */ + u32 vf_serial; }; /* Per netvsc device */ @@ -732,17 +735,21 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - /* 1: allocated, serial number is valid. 0: not allocated */ - u32 vf_alloc; - /* Serial number of the VF to team with */ - u32 vf_serial; atomic_t open_cnt; - /* State to manage the associated VF interface. */ - bool vf_inject; - struct net_device *vf_netdev; - atomic_t vf_use_cnt; }; +static inline struct netvsc_device * +net_device_to_netvsc_device(struct net_device *ndev) +{ + return ((struct net_device_context *)netdev_priv(ndev))->nvdev; +} + +static inline struct netvsc_device * +hv_device_to_netvsc_device(struct hv_device *device) +{ + return net_device_to_netvsc_device(hv_get_drvdata(device)); +} + /* NdisInitialize message */ struct rndis_initialize_request { u32 req_id; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 719cb3578..410fb8e81 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -77,13 +77,9 @@ static struct netvsc_device *alloc_net_device(void) init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); - atomic_set(&net_device->vf_use_cnt, 0); net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; - net_device->vf_netdev = NULL; - net_device->vf_inject = false; - return net_device; } @@ -95,9 +91,7 @@ static void free_netvsc_device(struct netvsc_device *nvdev) static struct netvsc_device *get_outbound_net_device(struct hv_device *device) { - struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device = hv_device_to_netvsc_device(device); if (net_device && net_device->destroy) net_device = NULL; @@ -107,9 +101,7 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device) static struct netvsc_device *get_inbound_net_device(struct hv_device *device) { - struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device = hv_device_to_netvsc_device(device); if (!net_device) goto get_in_err; @@ -128,8 +120,7 @@ static int netvsc_destroy_buf(struct hv_device *device) struct nvsp_message *revoke_packet; int ret = 0; struct net_device *ndev = hv_get_drvdata(device); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); /* * If we got a section count, it means we received a @@ -249,7 +240,6 @@ static int netvsc_destroy_buf(struct hv_device *device) static int netvsc_init_buf(struct hv_device *device) { int ret = 0; - unsigned long t; struct netvsc_device *net_device; struct nvsp_message *init_packet; struct net_device *ndev; @@ -310,9 +300,7 @@ static int netvsc_init_buf(struct hv_device *device) goto cleanup; } - t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); - BUG_ON(t == 0); - + wait_for_completion(&net_device->channel_init_wait); /* Check the response */ if (init_packet->msg.v1_msg. @@ -395,8 +383,7 @@ static int netvsc_init_buf(struct hv_device *device) goto cleanup; } - t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); - BUG_ON(t == 0); + wait_for_completion(&net_device->channel_init_wait); /* Check the response */ if (init_packet->msg.v1_msg. @@ -450,7 +437,6 @@ static int negotiate_nvsp_ver(struct hv_device *device, { struct net_device *ndev = hv_get_drvdata(device); int ret; - unsigned long t; memset(init_packet, 0, sizeof(struct nvsp_message)); init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; @@ -467,10 +453,7 @@ static int negotiate_nvsp_ver(struct hv_device *device, if (ret != 0) return ret; - t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); - - if (t == 0) - return -ETIMEDOUT; + wait_for_completion(&net_device->channel_init_wait); if (init_packet->msg.init_msg.init_complete.status != NVSP_STAT_SUCCESS) @@ -1119,16 +1102,16 @@ static void netvsc_send_table(struct hv_device *hdev, nvscdev->send_table[i] = tab[i]; } -static void netvsc_send_vf(struct netvsc_device *nvdev, +static void netvsc_send_vf(struct net_device_context *net_device_ctx, struct nvsp_message *nvmsg) { - nvdev->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; - nvdev->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; + net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; + net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; } static inline void netvsc_receive_inband(struct hv_device *hdev, - struct netvsc_device *nvdev, - struct nvsp_message *nvmsg) + struct net_device_context *net_device_ctx, + struct nvsp_message *nvmsg) { switch (nvmsg->hdr.msg_type) { case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: @@ -1136,11 +1119,45 @@ static inline void netvsc_receive_inband(struct hv_device *hdev, break; case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: - netvsc_send_vf(nvdev, nvmsg); + netvsc_send_vf(net_device_ctx, nvmsg); break; } } +static void netvsc_process_raw_pkt(struct hv_device *device, + struct vmbus_channel *channel, + struct netvsc_device *net_device, + struct net_device *ndev, + u64 request_id, + struct vmpacket_descriptor *desc) +{ + struct nvsp_message *nvmsg; + struct net_device_context *net_device_ctx = netdev_priv(ndev); + + nvmsg = (struct nvsp_message *)((unsigned long) + desc + (desc->offset8 << 3)); + + switch (desc->type) { + case VM_PKT_COMP: + netvsc_send_completion(net_device, channel, device, desc); + break; + + case VM_PKT_DATA_USING_XFER_PAGES: + netvsc_receive(net_device, channel, device, desc); + break; + + case VM_PKT_DATA_INBAND: + netvsc_receive_inband(device, net_device_ctx, nvmsg); + break; + + default: + netdev_err(ndev, "unhandled packet type %d, tid %llx\n", + desc->type, request_id); + break; + } +} + + void netvsc_channel_cb(void *context) { int ret; @@ -1153,7 +1170,7 @@ void netvsc_channel_cb(void *context) unsigned char *buffer; int bufferlen = NETVSC_PACKET_SIZE; struct net_device *ndev; - struct nvsp_message *nvmsg; + bool need_to_commit = false; if (channel->primary_channel != NULL) device = channel->primary_channel->device_obj; @@ -1167,39 +1184,36 @@ void netvsc_channel_cb(void *context) buffer = get_per_channel_state(channel); do { + desc = get_next_pkt_raw(channel); + if (desc != NULL) { + netvsc_process_raw_pkt(device, + channel, + net_device, + ndev, + desc->trans_id, + desc); + + put_pkt_raw(channel, desc); + need_to_commit = true; + continue; + } + if (need_to_commit) { + need_to_commit = false; + commit_rd_index(channel); + } + ret = vmbus_recvpacket_raw(channel, buffer, bufferlen, &bytes_recvd, &request_id); if (ret == 0) { if (bytes_recvd > 0) { desc = (struct vmpacket_descriptor *)buffer; - nvmsg = (struct nvsp_message *)((unsigned long) - desc + (desc->offset8 << 3)); - switch (desc->type) { - case VM_PKT_COMP: - netvsc_send_completion(net_device, - channel, - device, desc); - break; - - case VM_PKT_DATA_USING_XFER_PAGES: - netvsc_receive(net_device, channel, - device, desc); - break; - - case VM_PKT_DATA_INBAND: - netvsc_receive_inband(device, - net_device, - nvmsg); - break; - - default: - netdev_err(ndev, - "unhandled packet type %d, " - "tid %llx len %d\n", - desc->type, request_id, - bytes_recvd); - break; - } + netvsc_process_raw_pkt(device, + channel, + net_device, + ndev, + request_id, + desc); + } else { /* diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 6a69b5cc9..3ba29fc80 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -98,16 +98,14 @@ static void netvsc_set_multicast_list(struct net_device *net) static int netvsc_open(struct net_device *net) { - struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *device_obj = net_device_ctx->device_ctx; - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = net_device_to_netvsc_device(net); struct rndis_device *rdev; int ret = 0; netif_carrier_off(net); /* Open up the device */ - ret = rndis_filter_open(device_obj); + ret = rndis_filter_open(nvdev); if (ret != 0) { netdev_err(net, "unable to open device (ret %d).\n", ret); return ret; @@ -125,7 +123,6 @@ static int netvsc_open(struct net_device *net) static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); - struct hv_device *device_obj = net_device_ctx->device_ctx; struct netvsc_device *nvdev = net_device_ctx->nvdev; int ret; u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20; @@ -135,7 +132,7 @@ static int netvsc_close(struct net_device *net) /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ cancel_work_sync(&net_device_ctx->work); - ret = rndis_filter_close(device_obj); + ret = rndis_filter_close(nvdev); if (ret != 0) { netdev_err(net, "unable to close device (ret %d).\n", ret); return ret; @@ -661,20 +658,19 @@ int netvsc_recv_callback(struct hv_device *device_obj, struct sk_buff *skb; struct sk_buff *vf_skb; struct netvsc_stats *rx_stats; - struct netvsc_device *netvsc_dev = net_device_ctx->nvdev; u32 bytes_recvd = packet->total_data_buflen; int ret = 0; if (!net || net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - if (READ_ONCE(netvsc_dev->vf_inject)) { - atomic_inc(&netvsc_dev->vf_use_cnt); - if (!READ_ONCE(netvsc_dev->vf_inject)) { + if (READ_ONCE(net_device_ctx->vf_inject)) { + atomic_inc(&net_device_ctx->vf_use_cnt); + if (!READ_ONCE(net_device_ctx->vf_inject)) { /* * We raced; just move on. */ - atomic_dec(&netvsc_dev->vf_use_cnt); + atomic_dec(&net_device_ctx->vf_use_cnt); goto vf_injection_done; } @@ -686,22 +682,23 @@ int netvsc_recv_callback(struct hv_device *device_obj, * the host). Deliver these via the VF interface * in the guest. */ - vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet, - csum_info, *data, vlan_tci); + vf_skb = netvsc_alloc_recv_skb(net_device_ctx->vf_netdev, + packet, csum_info, *data, + vlan_tci); if (vf_skb != NULL) { - ++netvsc_dev->vf_netdev->stats.rx_packets; - netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd; + ++net_device_ctx->vf_netdev->stats.rx_packets; + net_device_ctx->vf_netdev->stats.rx_bytes += + bytes_recvd; netif_receive_skb(vf_skb); } else { ++net->stats.rx_dropped; ret = NVSP_STAT_FAIL; } - atomic_dec(&netvsc_dev->vf_use_cnt); + atomic_dec(&net_device_ctx->vf_use_cnt); return ret; } vf_injection_done: - net_device_ctx = netdev_priv(net); rx_stats = this_cpu_ptr(net_device_ctx->rx_stats); /* Allocate a skb - TODO direct I/O to pages? */ @@ -986,8 +983,6 @@ static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net, static int netvsc_set_mac_addr(struct net_device *ndev, void *p) { - struct net_device_context *ndevctx = netdev_priv(ndev); - struct hv_device *hdev = ndevctx->device_ctx; struct sockaddr *addr = p; char save_adr[ETH_ALEN]; unsigned char save_aatype; @@ -1000,7 +995,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) if (err != 0) return err; - err = rndis_filter_set_device_mac(hdev, addr->sa_data); + err = rndis_filter_set_device_mac(ndev, addr->sa_data); if (err != 0) { /* roll back to saved MAC */ memcpy(ndev->dev_addr, save_adr, ETH_ALEN); @@ -1156,17 +1151,6 @@ static void netvsc_free_netdev(struct net_device *netdev) free_netdev(netdev); } -static void netvsc_notify_peers(struct work_struct *wrk) -{ - struct garp_wrk *gwrk; - - gwrk = container_of(wrk, struct garp_wrk, dwrk); - - netdev_notify_peers(gwrk->netdev); - - atomic_dec(&gwrk->netvsc_dev->vf_use_cnt); -} - static struct net_device *get_netvsc_net_device(char *mac) { struct net_device *dev, *found = NULL; @@ -1209,7 +1193,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if (netvsc_dev == NULL) + if (!netvsc_dev || net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); @@ -1217,10 +1201,23 @@ static int netvsc_register_vf(struct net_device *vf_netdev) * Take a reference on the module. */ try_module_get(THIS_MODULE); - netvsc_dev->vf_netdev = vf_netdev; + net_device_ctx->vf_netdev = vf_netdev; return NOTIFY_OK; } +static void netvsc_inject_enable(struct net_device_context *net_device_ctx) +{ + net_device_ctx->vf_inject = true; +} + +static void netvsc_inject_disable(struct net_device_context *net_device_ctx) +{ + net_device_ctx->vf_inject = false; + + /* Wait for currently active users to drain out. */ + while (atomic_read(&net_device_ctx->vf_use_cnt) != 0) + udelay(50); +} static int netvsc_vf_up(struct net_device *vf_netdev) { @@ -1239,16 +1236,16 @@ static int netvsc_vf_up(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) + if (!netvsc_dev || !net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF up: %s\n", vf_netdev->name); - netvsc_dev->vf_inject = true; + netvsc_inject_enable(net_device_ctx); /* * Open the device before switching data path. */ - rndis_filter_open(net_device_ctx->device_ctx); + rndis_filter_open(netvsc_dev); /* * notify the host to switch the data path. @@ -1258,15 +1255,8 @@ static int netvsc_vf_up(struct net_device *vf_netdev) netif_carrier_off(ndev); - /* - * Now notify peers. We are scheduling work to - * notify peers; take a reference to prevent - * the VF interface from vanishing. - */ - atomic_inc(&netvsc_dev->vf_use_cnt); - net_device_ctx->gwrk.netdev = vf_netdev; - net_device_ctx->gwrk.netvsc_dev = netvsc_dev; - schedule_work(&net_device_ctx->gwrk.dwrk); + /* Now notify peers through VF device. */ + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); return NOTIFY_OK; } @@ -1289,29 +1279,18 @@ static int netvsc_vf_down(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL)) + if (!netvsc_dev || !net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF down: %s\n", vf_netdev->name); - netvsc_dev->vf_inject = false; - /* - * Wait for currently active users to - * drain out. - */ - - while (atomic_read(&netvsc_dev->vf_use_cnt) != 0) - udelay(50); + netvsc_inject_disable(net_device_ctx); netvsc_switch_datapath(ndev, false); netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); - rndis_filter_close(net_device_ctx->device_ctx); + rndis_filter_close(netvsc_dev); netif_carrier_on(ndev); - /* - * Notify peers. - */ - atomic_inc(&netvsc_dev->vf_use_cnt); - net_device_ctx->gwrk.netdev = ndev; - net_device_ctx->gwrk.netvsc_dev = netvsc_dev; - schedule_work(&net_device_ctx->gwrk.dwrk); + + /* Now notify peers through netvsc device. */ + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); return NOTIFY_OK; } @@ -1333,11 +1312,11 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); netvsc_dev = net_device_ctx->nvdev; - if (netvsc_dev == NULL) + if (!netvsc_dev || !net_device_ctx->vf_netdev) return NOTIFY_DONE; netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); - - netvsc_dev->vf_netdev = NULL; + netvsc_inject_disable(net_device_ctx); + net_device_ctx->vf_netdev = NULL; module_put(THIS_MODULE); return NOTIFY_OK; } @@ -1383,11 +1362,14 @@ static int netvsc_probe(struct hv_device *dev, INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change); INIT_WORK(&net_device_ctx->work, do_set_multicast); - INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers); spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + atomic_set(&net_device_ctx->vf_use_cnt, 0); + net_device_ctx->vf_netdev = NULL; + net_device_ctx->vf_inject = false; + net->netdev_ops = &device_ops; net->hw_features = NETVSC_HW_FEATURES; @@ -1500,6 +1482,15 @@ static int netvsc_netdev_event(struct notifier_block *this, { struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); + /* Avoid Vlan dev with same MAC registering as VF */ + if (event_dev->priv_flags & IFF_802_1Q_VLAN) + return NOTIFY_DONE; + + /* Avoid Bonding master dev with same MAC registering as VF */ + if (event_dev->priv_flags & IFF_BONDING && + event_dev->flags & IFF_MASTER) + return NOTIFY_DONE; + switch (event) { case NETDEV_REGISTER: return netvsc_register_vf(event_dev); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 97c292b7d..8e830f741 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -466,7 +466,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, struct rndis_query_request *query; struct rndis_query_complete *query_complete; int ret = 0; - unsigned long t; if (!result) return -EINVAL; @@ -503,11 +502,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, if (ret != 0) goto cleanup; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - ret = -ETIMEDOUT; - goto cleanup; - } + wait_for_completion(&request->wait_event); /* Copy the response back */ query_complete = &request->response_msg.msg.query_complete; @@ -543,11 +538,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev) #define NWADR_STR "NetworkAddress" #define NWADR_STRLEN 14 -int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac) +int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) { - struct net_device *ndev = hv_get_drvdata(hdev); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -558,7 +551,6 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac) u32 extlen = sizeof(struct rndis_config_parameter_info) + 2*NWADR_STRLEN + 4*ETH_ALEN; int ret; - unsigned long t; request = get_rndis_request(rdev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen); @@ -599,21 +591,13 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac) if (ret != 0) goto cleanup; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - netdev_err(ndev, "timeout before we got a set response...\n"); - /* - * can't put_rndis_request, since we may still receive a - * send-completion. - */ - return -EBUSY; - } else { - set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", - set_complete->status); - ret = -EINVAL; - } + wait_for_completion(&request->wait_event); + + set_complete = &request->response_msg.msg.set_complete; + if (set_complete->status != RNDIS_STATUS_SUCCESS) { + netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", + set_complete->status); + ret = -EINVAL; } cleanup: @@ -622,12 +606,10 @@ cleanup: } static int -rndis_filter_set_offload_params(struct hv_device *hdev, +rndis_filter_set_offload_params(struct net_device *ndev, struct ndis_offload_params *req_offloads) { - struct net_device *ndev = hv_get_drvdata(hdev); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -635,7 +617,6 @@ rndis_filter_set_offload_params(struct hv_device *hdev, struct rndis_set_complete *set_complete; u32 extlen = sizeof(struct ndis_offload_params); int ret; - unsigned long t; u32 vsp_version = nvdev->nvsp_version; if (vsp_version <= NVSP_PROTOCOL_VERSION_4) { @@ -669,20 +650,12 @@ rndis_filter_set_offload_params(struct hv_device *hdev, if (ret != 0) goto cleanup; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - netdev_err(ndev, "timeout before we got aOFFLOAD set response...\n"); - /* can't put_rndis_request, since we may still receive a - * send-completion. - */ - return -EBUSY; - } else { - set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set offload on host side:0x%x\n", - set_complete->status); - ret = -EINVAL; - } + wait_for_completion(&request->wait_event); + set_complete = &request->response_msg.msg.set_complete; + if (set_complete->status != RNDIS_STATUS_SUCCESS) { + netdev_err(ndev, "Fail to set offload on host side:0x%x\n", + set_complete->status); + ret = -EINVAL; } cleanup: @@ -710,7 +683,6 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) u32 *itab; u8 *keyp; int i, ret; - unsigned long t; request = get_rndis_request( rdev, RNDIS_MSG_SET, @@ -753,20 +725,12 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue) if (ret != 0) goto cleanup; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - if (t == 0) { - netdev_err(ndev, "timeout before we got a set response...\n"); - /* can't put_rndis_request, since we may still receive a - * send-completion. - */ - return -ETIMEDOUT; - } else { - set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", - set_complete->status); - ret = -EINVAL; - } + wait_for_completion(&request->wait_event); + set_complete = &request->response_msg.msg.set_complete; + if (set_complete->status != RNDIS_STATUS_SUCCESS) { + netdev_err(ndev, "Fail to set RSS parameters:0x%x\n", + set_complete->status); + ret = -EINVAL; } cleanup: @@ -795,8 +759,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) struct rndis_set_complete *set_complete; u32 status; int ret; - unsigned long t; - struct net_device *ndev = dev->ndev; request = get_rndis_request(dev, RNDIS_MSG_SET, RNDIS_MESSAGE_SIZE(struct rndis_set_request) + @@ -819,26 +781,14 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter) if (ret != 0) goto cleanup; - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); + wait_for_completion(&request->wait_event); - if (t == 0) { - netdev_err(ndev, - "timeout before we got a set response...\n"); - ret = -ETIMEDOUT; - /* - * We can't deallocate the request since we may still receive a - * send completion for it. - */ - goto exit; - } else { - set_complete = &request->response_msg.msg.set_complete; - status = set_complete->status; - } + set_complete = &request->response_msg.msg.set_complete; + status = set_complete->status; cleanup: if (request) put_rndis_request(dev, request); -exit: return ret; } @@ -850,9 +800,7 @@ static int rndis_filter_init_device(struct rndis_device *dev) struct rndis_initialize_complete *init_complete; u32 status; int ret; - unsigned long t; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev); request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); @@ -875,12 +823,7 @@ static int rndis_filter_init_device(struct rndis_device *dev) goto cleanup; } - t = wait_for_completion_timeout(&request->wait_event, 5*HZ); - - if (t == 0) { - ret = -ETIMEDOUT; - goto cleanup; - } + wait_for_completion(&request->wait_event); init_complete = &request->response_msg.msg.init_complete; status = init_complete->status; @@ -977,8 +920,7 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) { struct net_device *ndev = hv_get_drvdata(new_sc->primary_channel->device_obj); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *nvscdev = net_device_ctx->nvdev; + struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); u16 chn_index = new_sc->offermsg.offer.sub_channel_index; int ret; unsigned long flags; @@ -1014,7 +956,6 @@ int rndis_filter_device_add(struct hv_device *dev, struct netvsc_device_info *device_info = additional_info; struct ndis_offload_params offloads; struct nvsp_message *init_packet; - unsigned long t; struct ndis_recv_scale_cap rsscap; u32 rsscap_size = sizeof(struct ndis_recv_scale_cap); u32 mtu, size; @@ -1088,7 +1029,7 @@ int rndis_filter_device_add(struct hv_device *dev, offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED; - ret = rndis_filter_set_offload_params(dev, &offloads); + ret = rndis_filter_set_offload_params(net, &offloads); if (ret) goto err_dev_remv; @@ -1157,11 +1098,8 @@ int rndis_filter_device_add(struct hv_device *dev, VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); if (ret) goto out; - t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ); - if (t == 0) { - ret = -ETIMEDOUT; - goto out; - } + wait_for_completion(&net_device->channel_init_wait); + if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { ret = -ENODEV; @@ -1196,21 +1134,14 @@ err_dev_remv: void rndis_filter_device_remove(struct hv_device *dev) { - struct net_device *ndev = hv_get_drvdata(dev); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_dev = net_device_ctx->nvdev; + struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev); struct rndis_device *rndis_dev = net_dev->extension; - unsigned long t; /* If not all subchannel offers are complete, wait for them until * completion to avoid race. */ - while (net_dev->num_sc_offered > 0) { - t = wait_for_completion_timeout(&net_dev->channel_init_wait, - 10 * HZ); - if (t == 0) - WARN(1, "Netvsc: Waiting for sub-channel processing"); - } + if (net_dev->num_sc_offered > 0) + wait_for_completion(&net_dev->channel_init_wait); /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); @@ -1222,27 +1153,19 @@ void rndis_filter_device_remove(struct hv_device *dev) } -int rndis_filter_open(struct hv_device *dev) +int rndis_filter_open(struct netvsc_device *nvdev) { - struct net_device *ndev = hv_get_drvdata(dev); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; - - if (!net_device) + if (!nvdev) return -EINVAL; - if (atomic_inc_return(&net_device->open_cnt) != 1) + if (atomic_inc_return(&nvdev->open_cnt) != 1) return 0; - return rndis_filter_open_device(net_device->extension); + return rndis_filter_open_device(nvdev->extension); } -int rndis_filter_close(struct hv_device *dev) +int rndis_filter_close(struct netvsc_device *nvdev) { - struct net_device *ndev = hv_get_drvdata(dev); - struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; - if (!nvdev) return -EINVAL; diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c index 52c9051f3..1056ed142 100644 --- a/drivers/net/ieee802154/atusb.c +++ b/drivers/net/ieee802154/atusb.c @@ -366,11 +366,7 @@ static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel) struct atusb *atusb = hw->priv; int ret; - /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0, - * "Mode 3a, Carrier sense OR energy above threshold". - * We should probably make this configurable. @@@ - */ - ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel); + ret = atusb_write_subreg(atusb, SR_CHANNEL, channel); if (ret < 0) return ret; msleep(1); /* @@@ ugly synchronization */ diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c index 860d4aed8..0becf0ac3 100644 --- a/drivers/net/ieee802154/fakelb.c +++ b/drivers/net/ieee802154/fakelb.c @@ -112,6 +112,12 @@ static void fakelb_hw_stop(struct ieee802154_hw *hw) write_unlock_bh(&fakelb_ifup_phys_lock); } +static int +fakelb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on) +{ + return 0; +} + static const struct ieee802154_ops fakelb_ops = { .owner = THIS_MODULE, .xmit_async = fakelb_hw_xmit, @@ -119,6 +125,7 @@ static const struct ieee802154_ops fakelb_ops = { .set_channel = fakelb_hw_channel, .start = fakelb_hw_start, .stop = fakelb_hw_stop, + .set_promiscuous_mode = fakelb_set_promiscuous_mode, }; /* Number of dummy devices to be set up by this module. */ @@ -174,6 +181,7 @@ static int fakelb_add_one(struct device *dev) hw->phy->current_channel = 13; phy->channel = hw->phy->current_channel; + hw->flags = IEEE802154_HW_PROMISCUOUS; hw->parent = dev; err = ieee802154_register_hw(hw); diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index f446db828..7b131f8e4 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -1054,6 +1054,8 @@ static irqreturn_t mrf24j40_isr(int irq, void *data) disable_irq_nosync(irq); devrec->irq_buf[0] = MRF24J40_READSHORT(REG_INTSTAT); + devrec->irq_buf[1] = 0; + /* Read the interrupt status */ ret = spi_async(devrec->spi, &devrec->irq_msg); if (ret) { diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index d6d0524ee..b5f9511d8 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -254,6 +254,18 @@ acct: } } +static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev) +{ + bool xnet = true; + + if (dev) + xnet = !net_eq(dev_net(skb->dev), dev_net(dev)); + + skb_scrub_packet(skb, xnet); + if (dev) + skb->dev = dev; +} + static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, bool local) { @@ -280,7 +292,7 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, *pskb = skb; } - skb->dev = dev; + ipvlan_skb_crossing_ns(skb, dev); if (local) { skb->pkt_type = PACKET_HOST; @@ -347,7 +359,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, return addr; } -static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet) +static int ipvlan_process_v4_outbound(struct sk_buff *skb) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; @@ -370,7 +382,6 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet) ip_rt_put(rt); goto err; } - skb_scrub_packet(skb, xnet); skb_dst_set(skb, &rt->dst); err = ip_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) @@ -385,7 +396,7 @@ out: return ret; } -static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet) +static int ipvlan_process_v6_outbound(struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net_device *dev = skb->dev; @@ -408,7 +419,6 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet) dst_release(dst); goto err; } - skb_scrub_packet(skb, xnet); skb_dst_set(skb, dst); err = ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) @@ -423,7 +433,7 @@ out: return ret; } -static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet) +static int ipvlan_process_outbound(struct sk_buff *skb) { struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; @@ -447,9 +457,9 @@ static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet) } if (skb->protocol == htons(ETH_P_IPV6)) - ret = ipvlan_process_v6_outbound(skb, xnet); + ret = ipvlan_process_v6_outbound(skb); else if (skb->protocol == htons(ETH_P_IP)) - ret = ipvlan_process_v4_outbound(skb, xnet); + ret = ipvlan_process_v4_outbound(skb); else { pr_warn_ratelimited("Dropped outbound packet type=%x\n", ntohs(skb->protocol)); @@ -485,7 +495,6 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) void *lyr3h; struct ipvl_addr *addr; int addr_type; - bool xnet; lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); if (!lyr3h) @@ -496,9 +505,8 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) return ipvlan_rcv_frame(addr, &skb, true); out: - xnet = !net_eq(dev_net(skb->dev), dev_net(ipvlan->phy_dev)); - skb->dev = ipvlan->phy_dev; - return ipvlan_process_outbound(skb, xnet); + ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); + return ipvlan_process_outbound(skb); } static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) @@ -528,11 +536,12 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) return dev_forward_skb(ipvlan->phy_dev, skb); } else if (is_multicast_ether_addr(eth->h_dest)) { + ipvlan_skb_crossing_ns(skb, NULL); ipvlan_multicast_enqueue(ipvlan->port, skb); return NET_XMIT_SUCCESS; } - skb->dev = ipvlan->phy_dev; + ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); return dev_queue_xmit(skb); } @@ -622,8 +631,10 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, * when work-queue processes this frame. This is * achieved by returning RX_HANDLER_PASS. */ - if (nskb) + if (nskb) { + ipvlan_skb_crossing_ns(nskb, NULL); ipvlan_multicast_enqueue(port, nskb); + } } } else { struct ipvl_addr *addr; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1c4d395fb..18b4e8c7f 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -80,13 +80,6 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree_rcu(port, rcu); } -/* ipvlan network devices have devices nesting below it and are a special - * "super class" of normal network devices; split their locks off into a - * separate class since they always nest. - */ -static struct lock_class_key ipvlan_netdev_xmit_lock_key; -static struct lock_class_key ipvlan_netdev_addr_lock_key; - #define IPVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ @@ -96,19 +89,6 @@ static struct lock_class_key ipvlan_netdev_addr_lock_key; #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) -static void ipvlan_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key); -} - -static void ipvlan_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL); -} - static int ipvlan_init(struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); @@ -123,7 +103,7 @@ static int ipvlan_init(struct net_device *dev) dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; - ipvlan_set_lockdep_class(dev); + netdev_lockdep_set_classes(dev); ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats); if (!ipvlan->pcpu_stats) diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a400288cb..6255973e3 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -169,10 +169,9 @@ static void loopback_setup(struct net_device *dev) dev->flags = IFF_LOOPBACK; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; netif_keep_dst(dev); - dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; + dev->hw_features = NETIF_F_GSO_SOFTWARE; dev->features = NETIF_F_SG | NETIF_F_FRAGLIST - | NETIF_F_ALL_TSO - | NETIF_F_UFO + | NETIF_F_GSO_SOFTWARE | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SCTP_CRC diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index a70b6c460..351e701eb 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -18,6 +18,7 @@ #include <linux/rtnetlink.h> #include <net/genetlink.h> #include <net/sock.h> +#include <net/gro_cells.h> #include <uapi/linux/if_macsec.h> @@ -268,6 +269,8 @@ struct macsec_dev { struct net_device *real_dev; struct pcpu_secy_stats __percpu *stats; struct list_head secys; + struct gro_cells gro_cells; + unsigned int nest_level; }; /** @@ -342,7 +345,6 @@ static void free_rxsa(struct rcu_head *head) crypto_free_aead(sa->key.tfm); free_percpu(sa->stats); - macsec_rxsc_put(sa->sc); kfree(sa); } @@ -508,7 +510,7 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) } #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) -#define MACSEC_NEEDED_TAILROOM MACSEC_MAX_ICV_LEN +#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) { @@ -861,6 +863,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; + struct macsec_rx_sc *rx_sc = rx_sa->sc; int len, ret; u32 pn; @@ -879,7 +882,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) macsec_reset_skb(skb, macsec->secy.netdev); len = skb->len; - ret = netif_rx(skb); + ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, len); else @@ -889,6 +892,7 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) out: macsec_rxsa_put(rx_sa); + macsec_rxsc_put(rx_sc); dev_put(dev); } @@ -1051,6 +1055,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) struct pcpu_rx_sc_stats *rxsc_stats; struct pcpu_secy_stats *secy_stats; bool pulled_sci; + int ret; if (skb_headroom(skb) < ETH_HLEN) goto drop_direct; @@ -1103,6 +1108,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); + sc = sc ? macsec_rxsc_get(sc) : NULL; if (sc) { secy = &macsec->secy; @@ -1177,8 +1183,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) if (IS_ERR(skb)) { /* the decrypt callback needs the reference */ - if (PTR_ERR(skb) != -EINPROGRESS) + if (PTR_ERR(skb) != -EINPROGRESS) { macsec_rxsa_put(rx_sa); + macsec_rxsc_put(rx_sc); + } rcu_read_unlock(); *pskb = NULL; return RX_HANDLER_CONSUMED; @@ -1194,16 +1202,23 @@ deliver: if (rx_sa) macsec_rxsa_put(rx_sa); - count_rx(dev, skb->len); + macsec_rxsc_put(rx_sc); + + ret = gro_cells_receive(&macsec->gro_cells, skb); + if (ret == NET_RX_SUCCESS) + count_rx(dev, skb->len); + else + macsec->secy.netdev->stats.rx_dropped++; rcu_read_unlock(); - *pskb = skb; - return RX_HANDLER_ANOTHER; + *pskb = NULL; + return RX_HANDLER_CONSUMED; drop: macsec_rxsa_put(rx_sa); drop_nosa: + macsec_rxsc_put(rx_sc); rcu_read_unlock(); drop_direct: kfree_skb(skb); @@ -1219,7 +1234,6 @@ nosci: list_for_each_entry_rcu(macsec, &rxd->secys, secys) { struct sk_buff *nskb; - int ret; secy_stats = this_cpu_ptr(macsec->stats); @@ -1264,22 +1278,22 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) int ret; tfm = crypto_alloc_aead("gcm(aes)", 0, 0); - if (!tfm || IS_ERR(tfm)) - return NULL; + + if (IS_ERR(tfm)) + return tfm; ret = crypto_aead_setkey(tfm, key, key_len); - if (ret < 0) { - crypto_free_aead(tfm); - return NULL; - } + if (ret < 0) + goto fail; ret = crypto_aead_setauthsize(tfm, icv_len); - if (ret < 0) { - crypto_free_aead(tfm); - return NULL; - } + if (ret < 0) + goto fail; return tfm; +fail: + crypto_free_aead(tfm); + return ERR_PTR(ret); } static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, @@ -1287,12 +1301,12 @@ static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, { rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); if (!rx_sa->stats) - return -1; + return -ENOMEM; rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); - if (!rx_sa->key.tfm) { + if (IS_ERR(rx_sa->key.tfm)) { free_percpu(rx_sa->stats); - return -1; + return PTR_ERR(rx_sa->key.tfm); } rx_sa->active = false; @@ -1385,12 +1399,12 @@ static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, { tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); if (!tx_sa->stats) - return -1; + return -ENOMEM; tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); - if (!tx_sa->key.tfm) { + if (IS_ERR(tx_sa->key.tfm)) { free_percpu(tx_sa->stats); - return -1; + return PTR_ERR(tx_sa->key.tfm); } tx_sa->active = false; @@ -1623,6 +1637,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) unsigned char assoc_num; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + int err; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -1638,7 +1653,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) rtnl_lock(); rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); - if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) { + if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } @@ -1659,13 +1674,19 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) } rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); - if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), - secy->key_len, secy->icv_len)) { - kfree(rx_sa); + if (!rx_sa) { rtnl_unlock(); return -ENOMEM; } + err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), + secy->key_len, secy->icv_len); + if (err < 0) { + kfree(rx_sa); + rtnl_unlock(); + return err; + } + if (tb_sa[MACSEC_SA_ATTR_PN]) { spin_lock_bh(&rx_sa->lock); rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); @@ -1771,6 +1792,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) struct macsec_tx_sa *tx_sa; unsigned char assoc_num; struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + int err; if (!attrs[MACSEC_ATTR_IFINDEX]) return -EINVAL; @@ -1807,13 +1829,19 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) } tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); - if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), - secy->key_len, secy->icv_len)) { - kfree(tx_sa); + if (!tx_sa) { rtnl_unlock(); return -ENOMEM; } + err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), + secy->key_len, secy->icv_len); + if (err < 0) { + kfree(tx_sa); + rtnl_unlock(); + return err; + } + nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN); spin_lock_bh(&tx_sa->lock); @@ -2672,15 +2700,24 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, #define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) +static struct lock_class_key macsec_netdev_addr_lock_key; + static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + int err; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) return -ENOMEM; + err = gro_cells_init(&macsec->gro_cells, dev); + if (err) { + free_percpu(dev->tstats); + return err; + } + dev->features = real_dev->features & MACSEC_FEATURES; dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; @@ -2699,6 +2736,9 @@ static int macsec_dev_init(struct net_device *dev) static void macsec_dev_uninit(struct net_device *dev) { + struct macsec_dev *macsec = macsec_priv(dev); + + gro_cells_destroy(&macsec->gro_cells); free_percpu(dev->tstats); } @@ -2708,8 +2748,9 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; - features &= real_dev->features & MACSEC_FEATURES; - features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; + features &= (real_dev->features & MACSEC_FEATURES) | + NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; + features |= NETIF_F_LLTX; return features; } @@ -2872,6 +2913,13 @@ static int macsec_get_iflink(const struct net_device *dev) return macsec_priv(dev)->real_dev->ifindex; } + +static int macsec_get_nest_level(struct net_device *dev) +{ + return macsec_priv(dev)->nest_level; +} + + static const struct net_device_ops macsec_netdev_ops = { .ndo_init = macsec_dev_init, .ndo_uninit = macsec_dev_uninit, @@ -2885,6 +2933,7 @@ static const struct net_device_ops macsec_netdev_ops = { .ndo_start_xmit = macsec_start_xmit, .ndo_get_stats64 = macsec_get_stats64, .ndo_get_iflink = macsec_get_iflink, + .ndo_get_lock_subclass = macsec_get_nest_level, }; static const struct device_type macsec_type = { @@ -3009,22 +3058,31 @@ static void macsec_del_dev(struct macsec_dev *macsec) } } +static void macsec_common_dellink(struct net_device *dev, struct list_head *head) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + + unregister_netdevice_queue(dev, head); + list_del_rcu(&macsec->secys); + macsec_del_dev(macsec); + netdev_upper_dev_unlink(real_dev, dev); + + macsec_generation++; +} + static void macsec_dellink(struct net_device *dev, struct list_head *head) { struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - macsec_generation++; + macsec_common_dellink(dev, head); - unregister_netdevice_queue(dev, head); - list_del_rcu(&macsec->secys); if (list_empty(&rxd->secys)) { netdev_rx_handler_unregister(real_dev); kfree(rxd); } - - macsec_del_dev(macsec); } static int register_macsec_dev(struct net_device *real_dev, @@ -3141,6 +3199,18 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (err < 0) return err; + dev_hold(real_dev); + + macsec->nest_level = dev_get_nest_level(real_dev) + 1; + netdev_lockdep_set_classes(dev); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macsec_netdev_addr_lock_key, + macsec_get_nest_level(dev)); + + err = netdev_upper_dev_link(real_dev, dev); + if (err < 0) + goto unregister; + /* need to be already registered so that ->init has run and * the MAC addr is set */ @@ -3153,12 +3223,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, if (rx_handler && sci_exists(real_dev, sci)) { err = -EBUSY; - goto unregister; + goto unlink; } err = macsec_add_dev(dev, sci, icv_len); if (err) - goto unregister; + goto unlink; if (data) macsec_changelink_common(dev, data); @@ -3169,12 +3239,12 @@ static int macsec_newlink(struct net *net, struct net_device *dev, macsec_generation++; - dev_hold(real_dev); - return 0; del_dev: macsec_del_dev(macsec); +unlink: + netdev_upper_dev_unlink(real_dev, dev); unregister: unregister_netdevice(dev); return err; @@ -3193,14 +3263,26 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) if (data[IFLA_MACSEC_CIPHER_SUITE]) csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); - if (data[IFLA_MACSEC_ICV_LEN]) + if (data[IFLA_MACSEC_ICV_LEN]) { icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); + if (icv_len != DEFAULT_ICV_LEN) { + char dummy_key[DEFAULT_SAK_LEN] = { 0 }; + struct crypto_aead *dummy_tfm; + + dummy_tfm = macsec_alloc_tfm(dummy_key, + DEFAULT_SAK_LEN, + icv_len); + if (IS_ERR(dummy_tfm)) + return PTR_ERR(dummy_tfm); + crypto_free_aead(dummy_tfm); + } + } switch (csid) { case MACSEC_DEFAULT_CIPHER_ID: case MACSEC_DEFAULT_CIPHER_ALT: if (icv_len < MACSEC_MIN_ICV_LEN || - icv_len > MACSEC_MAX_ICV_LEN) + icv_len > MACSEC_STD_ICV_LEN) return -EINVAL; break; default: @@ -3332,8 +3414,12 @@ static int macsec_notify(struct notifier_block *this, unsigned long event, rxd = macsec_data_rtnl(real_dev); list_for_each_entry_safe(m, n, &rxd->secys, secys) { - macsec_dellink(m->secy.netdev, &head); + macsec_common_dellink(m->secy.netdev, &head); } + + netdev_rx_handler_unregister(real_dev); + kfree(rxd); + unregister_netdevice_many(&head); break; } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index cb01023ea..3234fcdea 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -49,6 +49,7 @@ struct macvlan_port { bool passthru; int count; struct hlist_head vlan_source_hash[MACVLAN_HASH_SIZE]; + DECLARE_BITMAP(mc_filter, MACVLAN_MC_FILTER_SZ); }; struct macvlan_source_entry { @@ -305,11 +306,14 @@ static void macvlan_process_broadcast(struct work_struct *w) rcu_read_unlock(); + if (src) + dev_put(src->dev); kfree_skb(skb); } } static void macvlan_broadcast_enqueue(struct macvlan_port *port, + const struct macvlan_dev *src, struct sk_buff *skb) { struct sk_buff *nskb; @@ -319,8 +323,12 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, if (!nskb) goto err; + MACVLAN_SKB_CB(nskb)->src = src; + spin_lock(&port->bc_queue.lock); if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) { + if (src) + dev_hold(src->dev); __skb_queue_tail(&port->bc_queue, nskb); err = 0; } @@ -412,6 +420,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { + unsigned int hash; + skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN); if (!skb) return RX_HANDLER_CONSUMED; @@ -429,8 +439,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) goto out; } - MACVLAN_SKB_CB(skb)->src = src; - macvlan_broadcast_enqueue(port, skb); + hash = mc_hash(NULL, eth->h_dest); + if (test_bit(hash, port->mc_filter)) + macvlan_broadcast_enqueue(port, src, skb); return RX_HANDLER_PASS; } @@ -716,12 +727,12 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) } } -static void macvlan_set_mac_lists(struct net_device *dev) +static void macvlan_compute_filter(unsigned long *mc_filter, + struct net_device *dev, + struct macvlan_dev *vlan) { - struct macvlan_dev *vlan = netdev_priv(dev); - if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { - bitmap_fill(vlan->mc_filter, MACVLAN_MC_FILTER_SZ); + bitmap_fill(mc_filter, MACVLAN_MC_FILTER_SZ); } else { struct netdev_hw_addr *ha; DECLARE_BITMAP(filter, MACVLAN_MC_FILTER_SZ); @@ -733,10 +744,33 @@ static void macvlan_set_mac_lists(struct net_device *dev) __set_bit(mc_hash(vlan, dev->broadcast), filter); - bitmap_copy(vlan->mc_filter, filter, MACVLAN_MC_FILTER_SZ); + bitmap_copy(mc_filter, filter, MACVLAN_MC_FILTER_SZ); } +} + +static void macvlan_set_mac_lists(struct net_device *dev) +{ + struct macvlan_dev *vlan = netdev_priv(dev); + + macvlan_compute_filter(vlan->mc_filter, dev, vlan); + dev_uc_sync(vlan->lowerdev, dev); dev_mc_sync(vlan->lowerdev, dev); + + /* This is slightly inaccurate as we're including the subscription + * list of vlan->lowerdev too. + * + * Bug alert: This only works if everyone has the same broadcast + * address as lowerdev. As soon as someone changes theirs this + * will break. + * + * However, this is already broken as when you change your broadcast + * address we don't get called. + * + * The solution is to maintain a list of broadcast addresses like + * we do for uc/mc, if you care. + */ + macvlan_compute_filter(vlan->port->mc_filter, vlan->lowerdev, NULL); } static int macvlan_change_mtu(struct net_device *dev, int new_mtu) @@ -754,7 +788,6 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ -static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_FEATURES \ @@ -775,20 +808,12 @@ static int macvlan_get_nest_level(struct net_device *dev) return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; } -static void macvlan_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *_unused) -{ - lockdep_set_class(&txq->_xmit_lock, - &macvlan_netdev_xmit_lock_key); -} - static void macvlan_set_lockdep_class(struct net_device *dev) { + netdev_lockdep_set_classes(dev); lockdep_set_class_and_subclass(&dev->addr_list_lock, &macvlan_netdev_addr_lock_key, macvlan_get_nest_level(dev)); - netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } static int macvlan_init(struct net_device *dev) @@ -1290,7 +1315,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; - vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; + vlan->nest_level = dev_get_nest_level(lowerdev) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index bd6720962..070e3290a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -21,6 +21,7 @@ #include <net/rtnetlink.h> #include <net/sock.h> #include <linux/virtio_net.h> +#include <linux/skb_array.h> /* * A macvtap queue is the central object of this driver, it connects @@ -43,6 +44,7 @@ struct macvtap_queue { u16 queue_index; bool enabled; struct list_head next; + struct skb_array skb_array; }; #define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) @@ -299,6 +301,9 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, if (!numvtaps) goto out; + if (numvtaps == 1) + goto single; + /* Check if we can use flow to select a queue */ rxq = skb_get_hash(skb); if (rxq) { @@ -316,6 +321,7 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev, goto out; } +single: tap = rcu_dereference(vlan->taps[0]); out: return tap; @@ -362,7 +368,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) if (!q) return RX_HANDLER_PASS; - if (skb_queue_len(&q->sk.sk_receive_queue) >= dev->tx_queue_len) + if (__skb_array_full(&q->skb_array)) goto drop; skb_push(skb, ETH_HLEN); @@ -380,7 +386,8 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) goto drop; if (!segs) { - skb_queue_tail(&q->sk.sk_receive_queue, skb); + if (skb_array_produce(&q->skb_array, skb)) + goto drop; goto wake_up; } @@ -389,7 +396,11 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) struct sk_buff *nskb = segs->next; segs->next = NULL; - skb_queue_tail(&q->sk.sk_receive_queue, segs); + if (skb_array_produce(&q->skb_array, segs)) { + kfree_skb(segs); + kfree_skb_list(nskb); + break; + } segs = nskb; } } else { @@ -402,7 +413,8 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) !(features & NETIF_F_CSUM_MASK) && skb_checksum_help(skb)) goto drop; - skb_queue_tail(&q->sk.sk_receive_queue, skb); + if (skb_array_produce(&q->skb_array, skb)) + goto drop; } wake_up: @@ -519,7 +531,9 @@ static void macvtap_sock_write_space(struct sock *sk) static void macvtap_sock_destruct(struct sock *sk) { - skb_queue_purge(&sk->sk_receive_queue); + struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk); + + skb_array_cleanup(&q->skb_array); } static int macvtap_open(struct inode *inode, struct file *file) @@ -532,13 +546,13 @@ static int macvtap_open(struct inode *inode, struct file *file) rtnl_lock(); dev = dev_get_by_macvtap_minor(iminor(inode)); if (!dev) - goto out; + goto err; err = -ENOMEM; q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &macvtap_proto, 0); if (!q) - goto out; + goto err; RCU_INIT_POINTER(q->sock.wq, &q->wq); init_waitqueue_head(&q->wq.wait); @@ -562,11 +576,24 @@ static int macvtap_open(struct inode *inode, struct file *file) if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG)) sock_set_flag(&q->sk, SOCK_ZEROCOPY); + err = -ENOMEM; + if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) + goto err_array; + err = macvtap_set_queue(dev, file, q); if (err) - sock_put(&q->sk); + goto err_queue; -out: + dev_put(dev); + + rtnl_unlock(); + return err; + +err_queue: + skb_array_cleanup(&q->skb_array); +err_array: + sock_put(&q->sk); +err: if (dev) dev_put(dev); @@ -592,7 +619,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait) mask = 0; poll_wait(file, &q->wq.wait, wait); - if (!skb_queue_empty(&q->sk.sk_receive_queue)) + if (!skb_array_empty(&q->skb_array)) mask |= POLLIN | POLLRDNORM; if (sock_writeable(&q->sk) || @@ -627,93 +654,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad, return skb; } -/* - * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should - * be shared with the tun/tap driver. - */ -static int macvtap_skb_from_vnet_hdr(struct macvtap_queue *q, - struct sk_buff *skb, - struct virtio_net_hdr *vnet_hdr) -{ - unsigned short gso_type = 0; - if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { - switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { - case VIRTIO_NET_HDR_GSO_TCPV4: - gso_type = SKB_GSO_TCPV4; - break; - case VIRTIO_NET_HDR_GSO_TCPV6: - gso_type = SKB_GSO_TCPV6; - break; - case VIRTIO_NET_HDR_GSO_UDP: - gso_type = SKB_GSO_UDP; - break; - default: - return -EINVAL; - } - - if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) - gso_type |= SKB_GSO_TCP_ECN; - - if (vnet_hdr->gso_size == 0) - return -EINVAL; - } - - if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - if (!skb_partial_csum_set(skb, macvtap16_to_cpu(q, vnet_hdr->csum_start), - macvtap16_to_cpu(q, vnet_hdr->csum_offset))) - return -EINVAL; - } - - if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { - skb_shinfo(skb)->gso_size = macvtap16_to_cpu(q, vnet_hdr->gso_size); - skb_shinfo(skb)->gso_type = gso_type; - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; - } - return 0; -} - -static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, - const struct sk_buff *skb, - struct virtio_net_hdr *vnet_hdr) -{ - memset(vnet_hdr, 0, sizeof(*vnet_hdr)); - - if (skb_is_gso(skb)) { - struct skb_shared_info *sinfo = skb_shinfo(skb); - - /* This is a hint as to how much should be linear. */ - vnet_hdr->hdr_len = cpu_to_macvtap16(q, skb_headlen(skb)); - vnet_hdr->gso_size = cpu_to_macvtap16(q, sinfo->gso_size); - if (sinfo->gso_type & SKB_GSO_TCPV4) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; - else if (sinfo->gso_type & SKB_GSO_TCPV6) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; - else - BUG(); - if (sinfo->gso_type & SKB_GSO_TCP_ECN) - vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; - } else - vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - if (skb_vlan_tag_present(skb)) - vnet_hdr->csum_start = cpu_to_macvtap16(q, - skb_checksum_start_offset(skb) + VLAN_HLEN); - else - vnet_hdr->csum_start = cpu_to_macvtap16(q, - skb_checksum_start_offset(skb)); - vnet_hdr->csum_offset = cpu_to_macvtap16(q, skb->csum_offset); - } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { - vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; - } /* else everything is zero */ -} - /* Neighbour code has some assumptions on HH_DATA_MOD alignment */ #define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) @@ -812,7 +752,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { - err = macvtap_skb_from_vnet_hdr(q, skb, &vnet_hdr); + err = virtio_net_hdr_to_skb(skb, &vnet_hdr, + macvtap_is_little_endian(q)); if (err) goto err_kfree; } @@ -880,7 +821,10 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if (iov_iter_count(iter) < vnet_hdr_len) return -EINVAL; - macvtap_skb_to_vnet_hdr(q, skb, &vnet_hdr); + ret = virtio_net_hdr_from_skb(skb, &vnet_hdr, + macvtap_is_little_endian(q)); + if (ret) + BUG(); if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != sizeof(vnet_hdr)) @@ -935,7 +879,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, TASK_INTERRUPTIBLE); /* Read frames from the queue */ - skb = skb_dequeue(&q->sk.sk_receive_queue); + skb = skb_array_consume(&q->skb_array); if (skb) break; if (noblock) { @@ -1259,10 +1203,18 @@ static int macvtap_recvmsg(struct socket *sock, struct msghdr *m, return ret; } +static int macvtap_peek_len(struct socket *sock) +{ + struct macvtap_queue *q = container_of(sock, struct macvtap_queue, + sock); + return skb_array_peek_len(&q->skb_array); +} + /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops macvtap_socket_ops = { .sendmsg = macvtap_sendmsg, .recvmsg = macvtap_recvmsg, + .peek_len = macvtap_peek_len, }; /* Get an underlying socket object from tun file. Returns error unless file is @@ -1281,6 +1233,28 @@ struct socket *macvtap_get_socket(struct file *file) } EXPORT_SYMBOL_GPL(macvtap_get_socket); +static int macvtap_queue_resize(struct macvlan_dev *vlan) +{ + struct net_device *dev = vlan->dev; + struct macvtap_queue *q; + struct skb_array **arrays; + int n = vlan->numqueues; + int ret, i = 0; + + arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + if (!arrays) + return -ENOMEM; + + list_for_each_entry(q, &vlan->queue_list, next) + arrays[i++] = &q->skb_array; + + ret = skb_array_resize_multiple(arrays, n, + dev->tx_queue_len, GFP_KERNEL); + + kfree(arrays); + return ret; +} + static int macvtap_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -1328,6 +1302,10 @@ static int macvtap_device_event(struct notifier_block *unused, device_destroy(&macvtap_class, devt); macvtap_free_minor(vlan); break; + case NETDEV_CHANGE_TX_QUEUE_LEN: + if (macvtap_queue_resize(vlan)) + return NOTIFY_BAD; + break; } return NOTIFY_DONE; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 6dad9a9c3..b4863e4e5 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -12,6 +12,9 @@ menuconfig PHYLIB if PHYLIB +config SWPHY + bool + comment "MII PHY device drivers" config AQUANTIA_PHY @@ -159,6 +162,7 @@ config MICROCHIP_PHY config FIXED_PHY tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB + select SWPHY ---help--- Adds the platform "fixed" MDIO Bus to cover the boards that use PHYs that are not connected to the real MDIO bus. @@ -254,6 +258,17 @@ config MDIO_BUS_MUX_MMIOREG Currently, only 8-bit registers are supported. +config MDIO_BUS_MUX_BCM_IPROC + tristate "Support for iProc based MDIO bus multiplexers" + depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) + select MDIO_BUS_MUX + default ARCH_BCM_IPROC + help + This module provides a driver for MDIO bus multiplexers found in + iProc based Broadcom SoCs. This multiplexer connects one of several + child MDIO bus to a parent bus. Buses could be internal as well as + external and selection logic lies inside the same multiplexer. + config MDIO_BCM_UNIMAC tristate "Broadcom UniMAC MDIO bus controller" depends on HAS_IOMEM @@ -271,6 +286,28 @@ config MDIO_BCM_IPROC This module provides a driver for the MDIO busses found in the Broadcom iProc SoC's. +config INTEL_XWAY_PHY + tristate "Driver for Intel XWAY PHYs" + ---help--- + Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. + These PHYs are marked as standalone chips under the names + PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel + SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. + +config MDIO_HISI_FEMAC + tristate "Hisilicon FEMAC MDIO bus controller" + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Hisilicon SoC that have an Fast Ethernet MAC. + +config MDIO_XGENE + tristate "APM X-Gene SoC MDIO bus controller" + depends on ARCH_XGENE || COMPILE_TEST + help + This module provides a driver for the MDIO busses found in the + APM X-Gene SoC's. + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index fcdbb9299..534dfa74d 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,6 +1,7 @@ # Makefile for Linux PHY drivers -libphy-objs := phy.o phy_device.o mdio_bus.o mdio_device.o +libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o +libphy-$(CONFIG_SWPHY) += swphy.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o @@ -39,8 +40,12 @@ obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o +obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o +obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o +obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 9ec7f7353..c649c101b 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -23,9 +23,10 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/gpio.h> +#include <linux/seqlock.h> #include <linux/idr.h> -#define MII_REGS_NUM 29 +#include "swphy.h" struct fixed_mdio_bus { struct mii_bus *mii_bus; @@ -34,8 +35,8 @@ struct fixed_mdio_bus { struct fixed_phy { int addr; - u16 regs[MII_REGS_NUM]; struct phy_device *phydev; + seqcount_t seqcount; struct fixed_phy_status status; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; @@ -47,103 +48,10 @@ static struct fixed_mdio_bus platform_fmb = { .phys = LIST_HEAD_INIT(platform_fmb.phys), }; -static int fixed_phy_update_regs(struct fixed_phy *fp) +static void fixed_phy_update(struct fixed_phy *fp) { - u16 bmsr = BMSR_ANEGCAPABLE; - u16 bmcr = 0; - u16 lpagb = 0; - u16 lpa = 0; - if (gpio_is_valid(fp->link_gpio)) fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); - - if (fp->status.duplex) { - switch (fp->status.speed) { - case 1000: - bmsr |= BMSR_ESTATEN; - break; - case 100: - bmsr |= BMSR_100FULL; - break; - case 10: - bmsr |= BMSR_10FULL; - break; - default: - break; - } - } else { - switch (fp->status.speed) { - case 1000: - bmsr |= BMSR_ESTATEN; - break; - case 100: - bmsr |= BMSR_100HALF; - break; - case 10: - bmsr |= BMSR_10HALF; - break; - default: - break; - } - } - - if (fp->status.link) { - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; - - if (fp->status.duplex) { - bmcr |= BMCR_FULLDPLX; - - switch (fp->status.speed) { - case 1000: - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000FULL; - break; - case 100: - bmcr |= BMCR_SPEED100; - lpa |= LPA_100FULL; - break; - case 10: - lpa |= LPA_10FULL; - break; - default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; - } - } else { - switch (fp->status.speed) { - case 1000: - bmcr |= BMCR_SPEED1000; - lpagb |= LPA_1000HALF; - break; - case 100: - bmcr |= BMCR_SPEED100; - lpa |= LPA_100HALF; - break; - case 10: - lpa |= LPA_10HALF; - break; - default: - pr_warn("fixed phy: unknown speed\n"); - return -EINVAL; - } - } - - if (fp->status.pause) - lpa |= LPA_PAUSE_CAP; - - if (fp->status.asym_pause) - lpa |= LPA_PAUSE_ASYM; - } - - fp->regs[MII_PHYSID1] = 0; - fp->regs[MII_PHYSID2] = 0; - - fp->regs[MII_BMSR] = bmsr; - fp->regs[MII_BMCR] = bmcr; - fp->regs[MII_LPA] = lpa; - fp->regs[MII_STAT1000] = lpagb; - - return 0; } static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) @@ -151,29 +59,23 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) struct fixed_mdio_bus *fmb = bus->priv; struct fixed_phy *fp; - if (reg_num >= MII_REGS_NUM) - return -1; - - /* We do not support emulating Clause 45 over Clause 22 register reads - * return an error instead of bogus data. - */ - switch (reg_num) { - case MII_MMD_CTRL: - case MII_MMD_DATA: - return -1; - default: - break; - } - list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phy_addr) { - /* Issue callback if user registered it. */ - if (fp->link_update) { - fp->link_update(fp->phydev->attached_dev, - &fp->status); - fixed_phy_update_regs(fp); - } - return fp->regs[reg_num]; + struct fixed_phy_status state; + int s; + + do { + s = read_seqcount_begin(&fp->seqcount); + /* Issue callback if user registered it. */ + if (fp->link_update) { + fp->link_update(fp->phydev->attached_dev, + &fp->status); + fixed_phy_update(fp); + } + state = fp->status; + } while (read_seqcount_retry(&fp->seqcount, s)); + + return swphy_read_reg(reg_num, &state); } } @@ -225,6 +127,7 @@ int fixed_phy_update_state(struct phy_device *phydev, list_for_each_entry(fp, &fmb->phys, node) { if (fp->addr == phydev->mdio.addr) { + write_seqcount_begin(&fp->seqcount); #define _UPD(x) if (changed->x) \ fp->status.x = status->x _UPD(link); @@ -233,7 +136,8 @@ int fixed_phy_update_state(struct phy_device *phydev, _UPD(pause); _UPD(asym_pause); #undef _UPD - fixed_phy_update_regs(fp); + fixed_phy_update(fp); + write_seqcount_end(&fp->seqcount); return 0; } } @@ -250,11 +154,15 @@ int fixed_phy_add(unsigned int irq, int phy_addr, struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp; + ret = swphy_validate_state(status); + if (ret < 0) + return ret; + fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; - memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); + seqcount_init(&fp->seqcount); if (irq != PHY_POLL) fmb->mii_bus->irq[phy_addr] = irq; @@ -270,17 +178,12 @@ int fixed_phy_add(unsigned int irq, int phy_addr, goto err_regs; } - ret = fixed_phy_update_regs(fp); - if (ret) - goto err_gpio; + fixed_phy_update(fp); list_add_tail(&fp->node, &fmb->phys); return 0; -err_gpio: - if (gpio_is_valid(fp->link_gpio)) - gpio_free(fp->link_gpio); err_regs: kfree(fp); return ret; diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c new file mode 100644 index 000000000..c300ab558 --- /dev/null +++ b/drivers/net/phy/intel-xway.c @@ -0,0 +1,376 @@ +/* + * Copyright (C) 2012 Daniel Schwierzeck <daniel.schwierzeck@googlemail.com> + * Copyright (C) 2016 Hauke Mehrtens <hauke@hauke-m.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mdio.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/of.h> + +#define XWAY_MDIO_IMASK 0x19 /* interrupt mask */ +#define XWAY_MDIO_ISTAT 0x1A /* interrupt status */ + +#define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */ +#define XWAY_MDIO_INIT_MSRE BIT(14) +#define XWAY_MDIO_INIT_NPRX BIT(13) +#define XWAY_MDIO_INIT_NPTX BIT(12) +#define XWAY_MDIO_INIT_ANE BIT(11) /* Auto-Neg error */ +#define XWAY_MDIO_INIT_ANC BIT(10) /* Auto-Neg complete */ +#define XWAY_MDIO_INIT_ADSC BIT(5) /* Link auto-downspeed detect */ +#define XWAY_MDIO_INIT_MPIPC BIT(4) +#define XWAY_MDIO_INIT_MDIXC BIT(3) +#define XWAY_MDIO_INIT_DXMC BIT(2) /* Duplex mode change */ +#define XWAY_MDIO_INIT_LSPC BIT(1) /* Link speed change */ +#define XWAY_MDIO_INIT_LSTC BIT(0) /* Link state change */ +#define XWAY_MDIO_INIT_MASK (XWAY_MDIO_INIT_LSTC | \ + XWAY_MDIO_INIT_ADSC) + +#define ADVERTISED_MPD BIT(10) /* Multi-port device */ + +/* LED Configuration */ +#define XWAY_MMD_LEDCH 0x01E0 +/* Inverse of SCAN Function */ +#define XWAY_MMD_LEDCH_NACS_NONE 0x0000 +#define XWAY_MMD_LEDCH_NACS_LINK 0x0001 +#define XWAY_MMD_LEDCH_NACS_PDOWN 0x0002 +#define XWAY_MMD_LEDCH_NACS_EEE 0x0003 +#define XWAY_MMD_LEDCH_NACS_ANEG 0x0004 +#define XWAY_MMD_LEDCH_NACS_ABIST 0x0005 +#define XWAY_MMD_LEDCH_NACS_CDIAG 0x0006 +#define XWAY_MMD_LEDCH_NACS_TEST 0x0007 +/* Slow Blink Frequency */ +#define XWAY_MMD_LEDCH_SBF_F02HZ 0x0000 +#define XWAY_MMD_LEDCH_SBF_F04HZ 0x0010 +#define XWAY_MMD_LEDCH_SBF_F08HZ 0x0020 +#define XWAY_MMD_LEDCH_SBF_F16HZ 0x0030 +/* Fast Blink Frequency */ +#define XWAY_MMD_LEDCH_FBF_F02HZ 0x0000 +#define XWAY_MMD_LEDCH_FBF_F04HZ 0x0040 +#define XWAY_MMD_LEDCH_FBF_F08HZ 0x0080 +#define XWAY_MMD_LEDCH_FBF_F16HZ 0x00C0 +/* LED Configuration */ +#define XWAY_MMD_LEDCL 0x01E1 +/* Complex Blinking Configuration */ +#define XWAY_MMD_LEDCH_CBLINK_NONE 0x0000 +#define XWAY_MMD_LEDCH_CBLINK_LINK 0x0001 +#define XWAY_MMD_LEDCH_CBLINK_PDOWN 0x0002 +#define XWAY_MMD_LEDCH_CBLINK_EEE 0x0003 +#define XWAY_MMD_LEDCH_CBLINK_ANEG 0x0004 +#define XWAY_MMD_LEDCH_CBLINK_ABIST 0x0005 +#define XWAY_MMD_LEDCH_CBLINK_CDIAG 0x0006 +#define XWAY_MMD_LEDCH_CBLINK_TEST 0x0007 +/* Complex SCAN Configuration */ +#define XWAY_MMD_LEDCH_SCAN_NONE 0x0000 +#define XWAY_MMD_LEDCH_SCAN_LINK 0x0010 +#define XWAY_MMD_LEDCH_SCAN_PDOWN 0x0020 +#define XWAY_MMD_LEDCH_SCAN_EEE 0x0030 +#define XWAY_MMD_LEDCH_SCAN_ANEG 0x0040 +#define XWAY_MMD_LEDCH_SCAN_ABIST 0x0050 +#define XWAY_MMD_LEDCH_SCAN_CDIAG 0x0060 +#define XWAY_MMD_LEDCH_SCAN_TEST 0x0070 +/* Configuration for LED Pin x */ +#define XWAY_MMD_LED0H 0x01E2 +/* Fast Blinking Configuration */ +#define XWAY_MMD_LEDxH_BLINKF_MASK 0x000F +#define XWAY_MMD_LEDxH_BLINKF_NONE 0x0000 +#define XWAY_MMD_LEDxH_BLINKF_LINK10 0x0001 +#define XWAY_MMD_LEDxH_BLINKF_LINK100 0x0002 +#define XWAY_MMD_LEDxH_BLINKF_LINK10X 0x0003 +#define XWAY_MMD_LEDxH_BLINKF_LINK1000 0x0004 +#define XWAY_MMD_LEDxH_BLINKF_LINK10_0 0x0005 +#define XWAY_MMD_LEDxH_BLINKF_LINK100X 0x0006 +#define XWAY_MMD_LEDxH_BLINKF_LINK10XX 0x0007 +#define XWAY_MMD_LEDxH_BLINKF_PDOWN 0x0008 +#define XWAY_MMD_LEDxH_BLINKF_EEE 0x0009 +#define XWAY_MMD_LEDxH_BLINKF_ANEG 0x000A +#define XWAY_MMD_LEDxH_BLINKF_ABIST 0x000B +#define XWAY_MMD_LEDxH_BLINKF_CDIAG 0x000C +/* Constant On Configuration */ +#define XWAY_MMD_LEDxH_CON_MASK 0x00F0 +#define XWAY_MMD_LEDxH_CON_NONE 0x0000 +#define XWAY_MMD_LEDxH_CON_LINK10 0x0010 +#define XWAY_MMD_LEDxH_CON_LINK100 0x0020 +#define XWAY_MMD_LEDxH_CON_LINK10X 0x0030 +#define XWAY_MMD_LEDxH_CON_LINK1000 0x0040 +#define XWAY_MMD_LEDxH_CON_LINK10_0 0x0050 +#define XWAY_MMD_LEDxH_CON_LINK100X 0x0060 +#define XWAY_MMD_LEDxH_CON_LINK10XX 0x0070 +#define XWAY_MMD_LEDxH_CON_PDOWN 0x0080 +#define XWAY_MMD_LEDxH_CON_EEE 0x0090 +#define XWAY_MMD_LEDxH_CON_ANEG 0x00A0 +#define XWAY_MMD_LEDxH_CON_ABIST 0x00B0 +#define XWAY_MMD_LEDxH_CON_CDIAG 0x00C0 +#define XWAY_MMD_LEDxH_CON_COPPER 0x00D0 +#define XWAY_MMD_LEDxH_CON_FIBER 0x00E0 +/* Configuration for LED Pin x */ +#define XWAY_MMD_LED0L 0x01E3 +/* Pulsing Configuration */ +#define XWAY_MMD_LEDxL_PULSE_MASK 0x000F +#define XWAY_MMD_LEDxL_PULSE_NONE 0x0000 +#define XWAY_MMD_LEDxL_PULSE_TXACT 0x0001 +#define XWAY_MMD_LEDxL_PULSE_RXACT 0x0002 +#define XWAY_MMD_LEDxL_PULSE_COL 0x0004 +/* Slow Blinking Configuration */ +#define XWAY_MMD_LEDxL_BLINKS_MASK 0x00F0 +#define XWAY_MMD_LEDxL_BLINKS_NONE 0x0000 +#define XWAY_MMD_LEDxL_BLINKS_LINK10 0x0010 +#define XWAY_MMD_LEDxL_BLINKS_LINK100 0x0020 +#define XWAY_MMD_LEDxL_BLINKS_LINK10X 0x0030 +#define XWAY_MMD_LEDxL_BLINKS_LINK1000 0x0040 +#define XWAY_MMD_LEDxL_BLINKS_LINK10_0 0x0050 +#define XWAY_MMD_LEDxL_BLINKS_LINK100X 0x0060 +#define XWAY_MMD_LEDxL_BLINKS_LINK10XX 0x0070 +#define XWAY_MMD_LEDxL_BLINKS_PDOWN 0x0080 +#define XWAY_MMD_LEDxL_BLINKS_EEE 0x0090 +#define XWAY_MMD_LEDxL_BLINKS_ANEG 0x00A0 +#define XWAY_MMD_LEDxL_BLINKS_ABIST 0x00B0 +#define XWAY_MMD_LEDxL_BLINKS_CDIAG 0x00C0 +#define XWAY_MMD_LED1H 0x01E4 +#define XWAY_MMD_LED1L 0x01E5 +#define XWAY_MMD_LED2H 0x01E6 +#define XWAY_MMD_LED2L 0x01E7 +#define XWAY_MMD_LED3H 0x01E8 +#define XWAY_MMD_LED3L 0x01E9 + +#define PHY_ID_PHY11G_1_3 0x030260D1 +#define PHY_ID_PHY22F_1_3 0x030260E1 +#define PHY_ID_PHY11G_1_4 0xD565A400 +#define PHY_ID_PHY22F_1_4 0xD565A410 +#define PHY_ID_PHY11G_1_5 0xD565A401 +#define PHY_ID_PHY22F_1_5 0xD565A411 +#define PHY_ID_PHY11G_VR9 0xD565A409 +#define PHY_ID_PHY22F_VR9 0xD565A419 + +static int xway_gphy_config_init(struct phy_device *phydev) +{ + int err; + u32 ledxh; + u32 ledxl; + + /* Mask all interrupts */ + err = phy_write(phydev, XWAY_MDIO_IMASK, 0); + if (err) + return err; + + /* Clear all pending interrupts */ + phy_read(phydev, XWAY_MDIO_ISTAT); + + phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCH, MDIO_MMD_VEND2, + XWAY_MMD_LEDCH_NACS_NONE | + XWAY_MMD_LEDCH_SBF_F02HZ | + XWAY_MMD_LEDCH_FBF_F16HZ); + phy_write_mmd_indirect(phydev, XWAY_MMD_LEDCL, MDIO_MMD_VEND2, + XWAY_MMD_LEDCH_CBLINK_NONE | + XWAY_MMD_LEDCH_SCAN_NONE); + + /** + * In most cases only one LED is connected to this phy, so + * configure them all to constant on and pulse mode. LED3 is + * only available in some packages, leave it in its reset + * configuration. + */ + ledxh = XWAY_MMD_LEDxH_BLINKF_NONE | XWAY_MMD_LEDxH_CON_LINK10XX; + ledxl = XWAY_MMD_LEDxL_PULSE_TXACT | XWAY_MMD_LEDxL_PULSE_RXACT | + XWAY_MMD_LEDxL_BLINKS_NONE; + phy_write_mmd_indirect(phydev, XWAY_MMD_LED0H, MDIO_MMD_VEND2, ledxh); + phy_write_mmd_indirect(phydev, XWAY_MMD_LED0L, MDIO_MMD_VEND2, ledxl); + phy_write_mmd_indirect(phydev, XWAY_MMD_LED1H, MDIO_MMD_VEND2, ledxh); + phy_write_mmd_indirect(phydev, XWAY_MMD_LED1L, MDIO_MMD_VEND2, ledxl); + phy_write_mmd_indirect(phydev, XWAY_MMD_LED2H, MDIO_MMD_VEND2, ledxh); + phy_write_mmd_indirect(phydev, XWAY_MMD_LED2L, MDIO_MMD_VEND2, ledxl); + + return 0; +} + +static int xway_gphy14_config_aneg(struct phy_device *phydev) +{ + int reg, err; + + /* Advertise as multi-port device, see IEEE802.3-2002 40.5.1.1 */ + /* This is a workaround for an errata in rev < 1.5 devices */ + reg = phy_read(phydev, MII_CTRL1000); + reg |= ADVERTISED_MPD; + err = phy_write(phydev, MII_CTRL1000, reg); + if (err) + return err; + + return genphy_config_aneg(phydev); +} + +static int xway_gphy_ack_interrupt(struct phy_device *phydev) +{ + int reg; + + reg = phy_read(phydev, XWAY_MDIO_ISTAT); + return (reg < 0) ? reg : 0; +} + +static int xway_gphy_did_interrupt(struct phy_device *phydev) +{ + int reg; + + reg = phy_read(phydev, XWAY_MDIO_ISTAT); + return reg & XWAY_MDIO_INIT_MASK; +} + +static int xway_gphy_config_intr(struct phy_device *phydev) +{ + u16 mask = 0; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + mask = XWAY_MDIO_INIT_MASK; + + return phy_write(phydev, XWAY_MDIO_IMASK, mask); +} + +static struct phy_driver xway_gphy[] = { + { + .phy_id = PHY_ID_PHY11G_1_3, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3", + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = xway_gphy14_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY22F_1_3, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY22F (PEF 7061) v1.3", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = xway_gphy14_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY11G_1_4, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4", + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = xway_gphy14_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY22F_1_4, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY22F (PEF 7061) v1.4", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = xway_gphy14_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY11G_1_5, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6", + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY22F_1_5, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY11G_VR9, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY11G (xRX integrated)", + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { + .phy_id = PHY_ID_PHY22F_VR9, + .phy_id_mask = 0xffffffff, + .name = "Intel XWAY PHY22F (xRX integrated)", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = xway_gphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = xway_gphy_ack_interrupt, + .did_interrupt = xway_gphy_did_interrupt, + .config_intr = xway_gphy_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, +}; +module_phy_driver(xway_gphy); + +static struct mdio_device_id __maybe_unused xway_gphy_tbl[] = { + { PHY_ID_PHY11G_1_3, 0xffffffff }, + { PHY_ID_PHY22F_1_3, 0xffffffff }, + { PHY_ID_PHY11G_1_4, 0xffffffff }, + { PHY_ID_PHY22F_1_4, 0xffffffff }, + { PHY_ID_PHY11G_1_5, 0xffffffff }, + { PHY_ID_PHY22F_1_5, 0xffffffff }, + { PHY_ID_PHY11G_VR9, 0xffffffff }, + { PHY_ID_PHY22F_VR9, 0xffffffff }, + { } +}; +MODULE_DEVICE_TABLE(mdio, xway_gphy_tbl); + +MODULE_DESCRIPTION("Intel XWAY PHY driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index ec2c1eee6..c2dcf02df 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -138,6 +138,21 @@ #define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */ #define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ +#define LPA_FIBER_1000HALF 0x40 +#define LPA_FIBER_1000FULL 0x20 + +#define LPA_PAUSE_FIBER 0x180 +#define LPA_PAUSE_ASYM_FIBER 0x100 + +#define ADVERTISE_FIBER_1000HALF 0x40 +#define ADVERTISE_FIBER_1000FULL 0x20 + +#define ADVERTISE_PAUSE_FIBER 0x180 +#define ADVERTISE_PAUSE_ASYM_FIBER 0x100 + +#define REGISTER_LINK_STATUS 0x400 +#define NB_FIBER_STATS 1 + MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); @@ -150,8 +165,9 @@ struct marvell_hw_stat { }; static struct marvell_hw_stat marvell_hw_stats[] = { - { "phy_receive_errors", 0, 21, 16}, + { "phy_receive_errors_copper", 0, 21, 16}, { "phy_idle_errors", 0, 10, 8 }, + { "phy_receive_errors_fiber", 1, 21, 16}, }; struct marvell_priv { @@ -477,15 +493,122 @@ static int m88e1318_config_aneg(struct phy_device *phydev) return m88e1121_config_aneg(phydev); } +/** + * ethtool_adv_to_fiber_adv_t + * @ethadv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement + * settings to phy autonegotiation advertisements for the + * MII_ADV register for fiber link. + */ +static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv) +{ + u32 result = 0; + + if (ethadv & ADVERTISED_1000baseT_Half) + result |= ADVERTISE_FIBER_1000HALF; + if (ethadv & ADVERTISED_1000baseT_Full) + result |= ADVERTISE_FIBER_1000FULL; + + if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP)) + result |= LPA_PAUSE_ASYM_FIBER; + else if (ethadv & ADVERTISE_PAUSE_CAP) + result |= (ADVERTISE_PAUSE_FIBER + & (~ADVERTISE_PAUSE_ASYM_FIBER)); + + return result; +} + +/** + * marvell_config_aneg_fiber - restart auto-negotiation or write BMCR + * @phydev: target phy_device struct + * + * Description: If auto-negotiation is enabled, we configure the + * advertising, and then restart auto-negotiation. If it is not + * enabled, then we write the BMCR. Adapted for fiber link in + * some Marvell's devices. + */ +static int marvell_config_aneg_fiber(struct phy_device *phydev) +{ + int changed = 0; + int err; + int adv, oldadv; + u32 advertise; + + if (phydev->autoneg != AUTONEG_ENABLE) + return genphy_setup_forced(phydev); + + /* Only allow advertising what this PHY supports */ + phydev->advertising &= phydev->supported; + advertise = phydev->advertising; + + /* Setup fiber advertisement */ + adv = phy_read(phydev, MII_ADVERTISE); + if (adv < 0) + return adv; + + oldadv = adv; + adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL + | LPA_PAUSE_FIBER); + adv |= ethtool_adv_to_fiber_adv_t(advertise); + + if (adv != oldadv) { + err = phy_write(phydev, MII_ADVERTISE, adv); + if (err < 0) + return err; + + changed = 1; + } + + if (changed == 0) { + /* Advertisement hasn't changed, but maybe aneg was never on to + * begin with? Or maybe phy was isolated? + */ + int ctl = phy_read(phydev, MII_BMCR); + + if (ctl < 0) + return ctl; + + if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE)) + changed = 1; /* do restart aneg */ + } + + /* Only restart aneg if we are advertising something different + * than we were before. + */ + if (changed > 0) + changed = genphy_restart_aneg(phydev); + + return changed; +} + static int m88e1510_config_aneg(struct phy_device *phydev) { int err; + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + if (err < 0) + goto error; + + /* Configure the copper link first */ err = m88e1318_config_aneg(phydev); if (err < 0) - return err; + goto error; - return 0; + /* Then the fiber link */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); + if (err < 0) + goto error; + + err = marvell_config_aneg_fiber(phydev); + if (err < 0) + goto error; + + return phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + +error: + phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + return err; } static int marvell_config_init(struct phy_device *phydev) @@ -890,26 +1013,79 @@ static int m88e1145_config_init(struct phy_device *phydev) return 0; } -/* marvell_read_status +/** + * fiber_lpa_to_ethtool_lpa_t + * @lpa: value of the MII_LPA register for fiber link + * + * A small helper function that translates MII_LPA + * bits to ethtool LP advertisement settings. + */ +static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_FIBER_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_FIBER_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +/** + * marvell_update_link - update link status in real time in @phydev + * @phydev: target phy_device struct + * + * Description: Update the value in phydev->link to reflect the + * current link value. + */ +static int marvell_update_link(struct phy_device *phydev, int fiber) +{ + int status; + + /* Use the generic register for copper link, or specific + * register for fiber case */ + if (fiber) { + status = phy_read(phydev, MII_M1011_PHY_STATUS); + if (status < 0) + return status; + + if ((status & REGISTER_LINK_STATUS) == 0) + phydev->link = 0; + else + phydev->link = 1; + } else { + return genphy_update_link(phydev); + } + + return 0; +} + +/* marvell_read_status_page * - * Generic status code does not detect Fiber correctly! * Description: * Check the link, then figure out the current state * by comparing what we advertise with what the link partner * advertises. Start by checking the gigabit possibilities, * then move on to 10/100. */ -static int marvell_read_status(struct phy_device *phydev) +static int marvell_read_status_page(struct phy_device *phydev, int page) { int adv; int err; int lpa; int lpagb; int status = 0; + int fiber; - /* Update the link, but return if there + /* Detect and update the link, but return if there * was an error */ - err = genphy_update_link(phydev); + if (page == MII_M1111_FIBER) + fiber = 1; + else + fiber = 0; + + err = marvell_update_link(phydev, fiber); if (err) return err; @@ -930,9 +1106,6 @@ static int marvell_read_status(struct phy_device *phydev) if (adv < 0) return adv; - phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | - mii_lpa_to_ethtool_lpa_t(lpa); - lpa &= adv; if (status & MII_M1011_PHY_STATUS_FULLDUPLEX) @@ -957,9 +1130,30 @@ static int marvell_read_status(struct phy_device *phydev) break; } - if (phydev->duplex == DUPLEX_FULL) { - phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; - phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + if (!fiber) { + phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) | + mii_lpa_to_ethtool_lpa_t(lpa); + + if (phydev->duplex == DUPLEX_FULL) { + phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; + phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + } + } else { + /* The fiber link is only 1000M capable */ + phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa); + + if (phydev->duplex == DUPLEX_FULL) { + if (!(lpa & LPA_PAUSE_FIBER)) { + phydev->pause = 0; + phydev->asym_pause = 0; + } else if ((lpa & LPA_PAUSE_ASYM_FIBER)) { + phydev->pause = 1; + phydev->asym_pause = 1; + } else { + phydev->pause = 1; + phydev->asym_pause = 0; + } + } } } else { int bmcr = phy_read(phydev, MII_BMCR); @@ -986,6 +1180,119 @@ static int marvell_read_status(struct phy_device *phydev) return 0; } +/* marvell_read_status + * + * Some Marvell's phys have two modes: fiber and copper. + * Both need status checked. + * Description: + * First, check the fiber link and status. + * If the fiber link is down, check the copper link and status which + * will be the default value if both link are down. + */ +static int marvell_read_status(struct phy_device *phydev) +{ + int err; + + /* Check the fiber mode first */ + if (phydev->supported & SUPPORTED_FIBRE) { + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); + if (err < 0) + goto error; + + err = marvell_read_status_page(phydev, MII_M1111_FIBER); + if (err < 0) + goto error; + + /* If the fiber link is up, it is the selected and used link. + * In this case, we need to stay in the fiber page. + * Please to be careful about that, avoid to restore Copper page + * in other functions which could break the behaviour + * for some fiber phy like 88E1512. + * */ + if (phydev->link) + return 0; + + /* If fiber link is down, check and save copper mode state */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + if (err < 0) + goto error; + } + + return marvell_read_status_page(phydev, MII_M1111_COPPER); + +error: + phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + return err; +} + +/* marvell_suspend + * + * Some Marvell's phys have two modes: fiber and copper. + * Both need to be suspended + */ +static int marvell_suspend(struct phy_device *phydev) +{ + int err; + + /* Suspend the fiber mode first */ + if (!(phydev->supported & SUPPORTED_FIBRE)) { + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); + if (err < 0) + goto error; + + /* With the page set, use the generic suspend */ + err = genphy_suspend(phydev); + if (err < 0) + goto error; + + /* Then, the copper link */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + if (err < 0) + goto error; + } + + /* With the page set, use the generic suspend */ + return genphy_suspend(phydev); + +error: + phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + return err; +} + +/* marvell_resume + * + * Some Marvell's phys have two modes: fiber and copper. + * Both need to be resumed + */ +static int marvell_resume(struct phy_device *phydev) +{ + int err; + + /* Resume the fiber mode first */ + if (!(phydev->supported & SUPPORTED_FIBRE)) { + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER); + if (err < 0) + goto error; + + /* With the page set, use the generic resume */ + err = genphy_resume(phydev); + if (err < 0) + goto error; + + /* Then, the copper link */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + if (err < 0) + goto error; + } + + /* With the page set, use the generic resume */ + return genphy_resume(phydev); + +error: + phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_COPPER); + return err; +} + static int marvell_aneg_done(struct phy_device *phydev) { int retval = phy_read(phydev, MII_M1011_PHY_STATUS); @@ -1107,7 +1414,10 @@ static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *w static int marvell_get_sset_count(struct phy_device *phydev) { - return ARRAY_SIZE(marvell_hw_stats); + if (phydev->supported & SUPPORTED_FIBRE) + return ARRAY_SIZE(marvell_hw_stats); + else + return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS; } static void marvell_get_strings(struct phy_device *phydev, u8 *data) @@ -1361,7 +1671,7 @@ static struct phy_driver marvell_drivers[] = { .phy_id = MARVELL_PHY_ID_88E1510, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", - .features = PHY_GBIT_FEATURES, + .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE, .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1510_config_init, @@ -1370,8 +1680,8 @@ static struct phy_driver marvell_drivers[] = { .ack_interrupt = &marvell_ack_interrupt, .config_intr = &marvell_config_intr, .did_interrupt = &m88e1121_did_interrupt, - .resume = &genphy_resume, - .suspend = &genphy_suspend, + .resume = &marvell_resume, + .suspend = &marvell_suspend, .get_sset_count = marvell_get_sset_count, .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/phy/mdio-hisi-femac.c new file mode 100644 index 000000000..b03fedd6c --- /dev/null +++ b/drivers/net/phy/mdio-hisi-femac.c @@ -0,0 +1,166 @@ +/* + * Hisilicon Fast Ethernet MDIO Bus Driver + * + * Copyright (c) 2016 HiSilicon Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> +#include <linux/platform_device.h> + +#define MDIO_RWCTRL 0x00 +#define MDIO_RO_DATA 0x04 +#define MDIO_WRITE BIT(13) +#define MDIO_RW_FINISH BIT(15) +#define BIT_PHY_ADDR_OFFSET 8 +#define BIT_WR_DATA_OFFSET 16 + +struct hisi_femac_mdio_data { + struct clk *clk; + void __iomem *membase; +}; + +static int hisi_femac_mdio_wait_ready(struct hisi_femac_mdio_data *data) +{ + u32 val; + + return readl_poll_timeout(data->membase + MDIO_RWCTRL, + val, val & MDIO_RW_FINISH, 20, 10000); +} + +static int hisi_femac_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct hisi_femac_mdio_data *data = bus->priv; + int ret; + + ret = hisi_femac_mdio_wait_ready(data); + if (ret) + return ret; + + writel((mii_id << BIT_PHY_ADDR_OFFSET) | regnum, + data->membase + MDIO_RWCTRL); + + ret = hisi_femac_mdio_wait_ready(data); + if (ret) + return ret; + + return readl(data->membase + MDIO_RO_DATA) & 0xFFFF; +} + +static int hisi_femac_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) +{ + struct hisi_femac_mdio_data *data = bus->priv; + int ret; + + ret = hisi_femac_mdio_wait_ready(data); + if (ret) + return ret; + + writel(MDIO_WRITE | (value << BIT_WR_DATA_OFFSET) | + (mii_id << BIT_PHY_ADDR_OFFSET) | regnum, + data->membase + MDIO_RWCTRL); + + return hisi_femac_mdio_wait_ready(data); +} + +static int hisi_femac_mdio_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct hisi_femac_mdio_data *data; + struct resource *res; + int ret; + + bus = mdiobus_alloc_size(sizeof(*data)); + if (!bus) + return -ENOMEM; + + bus->name = "hisi_femac_mii_bus"; + bus->read = &hisi_femac_mdio_read; + bus->write = &hisi_femac_mdio_write; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + bus->parent = &pdev->dev; + + data = bus->priv; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->membase)) { + ret = PTR_ERR(data->membase); + goto err_out_free_mdiobus; + } + + data->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->clk)) { + ret = PTR_ERR(data->clk); + goto err_out_free_mdiobus; + } + + ret = clk_prepare_enable(data->clk); + if (ret) + goto err_out_free_mdiobus; + + ret = of_mdiobus_register(bus, np); + if (ret) + goto err_out_disable_clk; + + platform_set_drvdata(pdev, bus); + + return 0; + +err_out_disable_clk: + clk_disable_unprepare(data->clk); +err_out_free_mdiobus: + mdiobus_free(bus); + return ret; +} + +static int hisi_femac_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = platform_get_drvdata(pdev); + struct hisi_femac_mdio_data *data = bus->priv; + + mdiobus_unregister(bus); + clk_disable_unprepare(data->clk); + mdiobus_free(bus); + + return 0; +} + +static const struct of_device_id hisi_femac_mdio_dt_ids[] = { + { .compatible = "hisilicon,hisi-femac-mdio" }, + { } +}; +MODULE_DEVICE_TABLE(of, hisi_femac_mdio_dt_ids); + +static struct platform_driver hisi_femac_mdio_driver = { + .probe = hisi_femac_mdio_probe, + .remove = hisi_femac_mdio_remove, + .driver = { + .name = "hisi-femac-mdio", + .of_match_table = hisi_femac_mdio_dt_ids, + }, +}; + +module_platform_driver(hisi_femac_mdio_driver); + +MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC MDIO interface driver"); +MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c new file mode 100644 index 000000000..0a0412524 --- /dev/null +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c @@ -0,0 +1,248 @@ +/* + * Copyright 2016 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation (the "GPL"). + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 (GPLv2) for more details. + * + * You should have received a copy of the GNU General Public License + * version 2 (GPLv2) along with this source code. + */ + +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/mdio-mux.h> +#include <linux/delay.h> + +#define MDIO_PARAM_OFFSET 0x00 +#define MDIO_PARAM_MIIM_CYCLE 29 +#define MDIO_PARAM_INTERNAL_SEL 25 +#define MDIO_PARAM_BUS_ID 22 +#define MDIO_PARAM_C45_SEL 21 +#define MDIO_PARAM_PHY_ID 16 +#define MDIO_PARAM_PHY_DATA 0 + +#define MDIO_READ_OFFSET 0x04 +#define MDIO_READ_DATA_MASK 0xffff +#define MDIO_ADDR_OFFSET 0x08 + +#define MDIO_CTRL_OFFSET 0x0C +#define MDIO_CTRL_WRITE_OP 0x1 +#define MDIO_CTRL_READ_OP 0x2 + +#define MDIO_STAT_OFFSET 0x10 +#define MDIO_STAT_DONE 1 + +#define BUS_MAX_ADDR 32 +#define EXT_BUS_START_ADDR 16 + +struct iproc_mdiomux_desc { + void *mux_handle; + void __iomem *base; + struct device *dev; + struct mii_bus *mii_bus; +}; + +static int iproc_mdio_wait_for_idle(void __iomem *base, bool result) +{ + unsigned int timeout = 1000; /* loop for 1s */ + u32 val; + + do { + val = readl(base + MDIO_STAT_OFFSET); + if ((val & MDIO_STAT_DONE) == result) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + return -ETIMEDOUT; +} + +/* start_miim_ops- Program and start MDIO transaction over mdio bus. + * @base: Base address + * @phyid: phyid of the selected bus. + * @reg: register offset to be read/written. + * @val :0 if read op else value to be written in @reg; + * @op: Operation that need to be carried out. + * MDIO_CTRL_READ_OP: Read transaction. + * MDIO_CTRL_WRITE_OP: Write transaction. + * + * Return value: Successful Read operation returns read reg values and write + * operation returns 0. Failure operation returns negative error code. + */ +static int start_miim_ops(void __iomem *base, + u16 phyid, u32 reg, u16 val, u32 op) +{ + u32 param; + int ret; + + writel(0, base + MDIO_CTRL_OFFSET); + ret = iproc_mdio_wait_for_idle(base, 0); + if (ret) + goto err; + + param = readl(base + MDIO_PARAM_OFFSET); + param |= phyid << MDIO_PARAM_PHY_ID; + param |= val << MDIO_PARAM_PHY_DATA; + if (reg & MII_ADDR_C45) + param |= BIT(MDIO_PARAM_C45_SEL); + + writel(param, base + MDIO_PARAM_OFFSET); + + writel(reg, base + MDIO_ADDR_OFFSET); + + writel(op, base + MDIO_CTRL_OFFSET); + + ret = iproc_mdio_wait_for_idle(base, 1); + if (ret) + goto err; + + if (op == MDIO_CTRL_READ_OP) + ret = readl(base + MDIO_READ_OFFSET) & MDIO_READ_DATA_MASK; +err: + return ret; +} + +static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg) +{ + struct iproc_mdiomux_desc *md = bus->priv; + int ret; + + ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP); + if (ret < 0) + dev_err(&bus->dev, "mdiomux read operation failed!!!"); + + return ret; +} + +static int iproc_mdiomux_write(struct mii_bus *bus, + int phyid, int reg, u16 val) +{ + struct iproc_mdiomux_desc *md = bus->priv; + int ret; + + /* Write val at reg offset */ + ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP); + if (ret < 0) + dev_err(&bus->dev, "mdiomux write operation failed!!!"); + + return ret; +} + +static int mdio_mux_iproc_switch_fn(int current_child, int desired_child, + void *data) +{ + struct iproc_mdiomux_desc *md = data; + u32 param, bus_id; + bool bus_dir; + + /* select bus and its properties */ + bus_dir = (desired_child < EXT_BUS_START_ADDR); + bus_id = bus_dir ? desired_child : (desired_child - EXT_BUS_START_ADDR); + + param = (bus_dir ? 1 : 0) << MDIO_PARAM_INTERNAL_SEL; + param |= (bus_id << MDIO_PARAM_BUS_ID); + + writel(param, md->base + MDIO_PARAM_OFFSET); + return 0; +} + +static int mdio_mux_iproc_probe(struct platform_device *pdev) +{ + struct iproc_mdiomux_desc *md; + struct mii_bus *bus; + struct resource *res; + int rc; + + md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL); + if (!md) + return -ENOMEM; + md->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + md->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(md->base)) { + dev_err(&pdev->dev, "failed to ioremap register\n"); + return PTR_ERR(md->base); + } + + md->mii_bus = mdiobus_alloc(); + if (!md->mii_bus) { + dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); + return -ENOMEM; + } + + bus = md->mii_bus; + bus->priv = md; + bus->name = "iProc MDIO mux bus"; + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); + bus->parent = &pdev->dev; + bus->read = iproc_mdiomux_read; + bus->write = iproc_mdiomux_write; + + bus->phy_mask = ~0; + bus->dev.of_node = pdev->dev.of_node; + rc = mdiobus_register(bus); + if (rc) { + dev_err(&pdev->dev, "mdiomux registration failed\n"); + goto out; + } + + platform_set_drvdata(pdev, md); + + rc = mdio_mux_init(md->dev, mdio_mux_iproc_switch_fn, + &md->mux_handle, md, md->mii_bus); + if (rc) { + dev_info(md->dev, "mdiomux initialization failed\n"); + goto out; + } + + dev_info(md->dev, "iProc mdiomux registered\n"); + return 0; +out: + mdiobus_free(bus); + return rc; +} + +static int mdio_mux_iproc_remove(struct platform_device *pdev) +{ + struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); + + mdio_mux_uninit(md->mux_handle); + mdiobus_unregister(md->mii_bus); + mdiobus_free(md->mii_bus); + + return 0; +} + +static const struct of_device_id mdio_mux_iproc_match[] = { + { + .compatible = "brcm,mdio-mux-iproc", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mdio_mux_iproc_match); + +static struct platform_driver mdiomux_iproc_driver = { + .driver = { + .name = "mdio-mux-iproc", + .of_match_table = mdio_mux_iproc_match, + }, + .probe = mdio_mux_iproc_probe, + .remove = mdio_mux_iproc_remove, +}; + +module_platform_driver(mdiomux_iproc_driver); + +MODULE_DESCRIPTION("iProc MDIO Mux Bus Driver"); +MODULE_AUTHOR("Pramod Kumar <pramod.kumar@broadcom.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index 7ddb1ab70..919949960 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -55,7 +55,7 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev) return PTR_ERR(s->gpios); r = mdio_mux_init(&pdev->dev, - mdio_mux_gpio_switch_fn, &s->mux_handle, s); + mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); if (r != 0) { gpiod_put_array(s->gpios); diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 7fde454fb..d0bed52c8 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -126,7 +126,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) } ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, - &s->mux_handle, s); + &s->mux_handle, s, NULL); if (ret) { dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", np->full_name); diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 5c81d6faf..963838d4f 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -89,7 +89,8 @@ static int parent_count; int mdio_mux_init(struct device *dev, int (*switch_fn)(int cur, int desired, void *data), void **mux_handle, - void *data) + void *data, + struct mii_bus *mux_bus) { struct device_node *parent_bus_node; struct device_node *child_bus_node; @@ -101,10 +102,22 @@ int mdio_mux_init(struct device *dev, if (!dev->of_node) return -ENODEV; - parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0); + if (!mux_bus) { + parent_bus_node = of_parse_phandle(dev->of_node, + "mdio-parent-bus", 0); - if (!parent_bus_node) - return -ENODEV; + if (!parent_bus_node) + return -ENODEV; + + parent_bus = of_mdio_find_bus(parent_bus_node); + if (!parent_bus) { + ret_val = -EPROBE_DEFER; + goto err_parent_bus; + } + } else { + parent_bus_node = NULL; + parent_bus = mux_bus; + } pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); if (pb == NULL) { @@ -112,11 +125,6 @@ int mdio_mux_init(struct device *dev, goto err_parent_bus; } - parent_bus = of_mdio_find_bus(parent_bus_node); - if (parent_bus == NULL) { - ret_val = -EPROBE_DEFER; - goto err_parent_bus; - } pb->switch_data = data; pb->switch_fn = switch_fn; diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/phy/mdio-xgene.c new file mode 100644 index 000000000..92af18295 --- /dev/null +++ b/drivers/net/phy/mdio-xgene.c @@ -0,0 +1,473 @@ +/* Applied Micro X-Gene SoC MDIO Driver + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Author: Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/acpi.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/efi.h> +#include <linux/if_vlan.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/prefetch.h> +#include <linux/phy.h> +#include <net/ip.h> +#include "mdio-xgene.h" + +static bool xgene_mdio_status; + +static u32 xgene_enet_rd_mac(void __iomem *base_addr, u32 rd_addr) +{ + void __iomem *addr, *rd, *cmd, *cmd_done; + u32 done, rd_data = BUSY_MASK; + u8 wait = 10; + + addr = base_addr + MAC_ADDR_REG_OFFSET; + rd = base_addr + MAC_READ_REG_OFFSET; + cmd = base_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET; + + iowrite32(rd_addr, addr); + iowrite32(XGENE_ENET_RD_CMD, cmd); + + while (wait--) { + done = ioread32(cmd_done); + if (done) + break; + udelay(1); + } + + if (!done) + return rd_data; + + rd_data = ioread32(rd); + iowrite32(0, cmd); + + return rd_data; +} + +static void xgene_enet_wr_mac(void __iomem *base_addr, u32 wr_addr, u32 wr_data) +{ + void __iomem *addr, *wr, *cmd, *cmd_done; + u8 wait = 10; + u32 done; + + addr = base_addr + MAC_ADDR_REG_OFFSET; + wr = base_addr + MAC_WRITE_REG_OFFSET; + cmd = base_addr + MAC_COMMAND_REG_OFFSET; + cmd_done = base_addr + MAC_COMMAND_DONE_REG_OFFSET; + + iowrite32(wr_addr, addr); + iowrite32(wr_data, wr); + iowrite32(XGENE_ENET_WR_CMD, cmd); + + while (wait--) { + done = ioread32(cmd_done); + if (done) + break; + udelay(1); + } + + if (!done) + pr_err("MCX mac write failed, addr: 0x%04x\n", wr_addr); + + iowrite32(0, cmd); +} + +int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg) +{ + void __iomem *addr = (void __iomem *)bus->priv; + u32 data, done; + u8 wait = 10; + + data = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg); + xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, data); + xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK); + do { + usleep_range(5, 10); + done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + dev_err(&bus->dev, "MII_MGMT read failed\n"); + return -EBUSY; + } + + data = xgene_enet_rd_mac(addr, MII_MGMT_STATUS_ADDR); + xgene_enet_wr_mac(addr, MII_MGMT_COMMAND_ADDR, 0); + + return data; +} +EXPORT_SYMBOL(xgene_mdio_rgmii_read); + +int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data) +{ + void __iomem *addr = (void __iomem *)bus->priv; + u32 val, done; + u8 wait = 10; + + val = SET_VAL(PHY_ADDR, phy_id) | SET_VAL(REG_ADDR, reg); + xgene_enet_wr_mac(addr, MII_MGMT_ADDRESS_ADDR, val); + + xgene_enet_wr_mac(addr, MII_MGMT_CONTROL_ADDR, data); + do { + usleep_range(5, 10); + done = xgene_enet_rd_mac(addr, MII_MGMT_INDICATORS_ADDR); + } while ((done & BUSY_MASK) && wait--); + + if (done & BUSY_MASK) { + dev_err(&bus->dev, "MII_MGMT write failed\n"); + return -EBUSY; + } + + return 0; +} +EXPORT_SYMBOL(xgene_mdio_rgmii_write); + +static u32 xgene_menet_rd_diag_csr(struct xgene_mdio_pdata *pdata, u32 offset) +{ + return ioread32(pdata->diag_csr_addr + offset); +} + +static void xgene_menet_wr_diag_csr(struct xgene_mdio_pdata *pdata, + u32 offset, u32 val) +{ + iowrite32(val, pdata->diag_csr_addr + offset); +} + +static int xgene_enet_ecc_init(struct xgene_mdio_pdata *pdata) +{ + u32 data; + u8 wait = 10; + + xgene_menet_wr_diag_csr(pdata, MENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); + do { + usleep_range(100, 110); + data = xgene_menet_rd_diag_csr(pdata, MENET_BLOCK_MEM_RDY_ADDR); + } while ((data != 0xffffffff) && wait--); + + if (data != 0xffffffff) { + dev_err(pdata->dev, "Failed to release memory from shutdown\n"); + return -ENODEV; + } + + return 0; +} + +static void xgene_gmac_reset(struct xgene_mdio_pdata *pdata) +{ + xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, SOFT_RESET); + xgene_enet_wr_mac(pdata->mac_csr_addr, MAC_CONFIG_1_ADDR, 0); +} + +static int xgene_mdio_reset(struct xgene_mdio_pdata *pdata) +{ + int ret; + + if (pdata->dev->of_node) { + clk_prepare_enable(pdata->clk); + udelay(5); + clk_disable_unprepare(pdata->clk); + udelay(5); + clk_prepare_enable(pdata->clk); + udelay(5); + } else { +#ifdef CONFIG_ACPI + acpi_evaluate_object(ACPI_HANDLE(pdata->dev), + "_RST", NULL, NULL); +#endif + } + + ret = xgene_enet_ecc_init(pdata); + if (ret) + return ret; + xgene_gmac_reset(pdata); + + return 0; +} + +static void xgene_enet_rd_mdio_csr(void __iomem *base_addr, + u32 offset, u32 *val) +{ + void __iomem *addr = base_addr + offset; + + *val = ioread32(addr); +} + +static void xgene_enet_wr_mdio_csr(void __iomem *base_addr, + u32 offset, u32 val) +{ + void __iomem *addr = base_addr + offset; + + iowrite32(val, addr); +} + +static int xgene_xfi_mdio_write(struct mii_bus *bus, int phy_id, + int reg, u16 data) +{ + void __iomem *addr = (void __iomem *)bus->priv; + int timeout = 100; + u32 status, val; + + val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg) | + SET_VAL(HSTMIIMWRDAT, data); + xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, data); + + val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_WRITE); + xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); + + do { + usleep_range(5, 10); + xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status); + } while ((status & BUSY_MASK) && timeout--); + + xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0); + + return 0; +} + +static int xgene_xfi_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + void __iomem *addr = (void __iomem *)bus->priv; + u32 data, status, val; + int timeout = 100; + + val = SET_VAL(HSTPHYADX, phy_id) | SET_VAL(HSTREGADX, reg); + xgene_enet_wr_mdio_csr(addr, MIIM_FIELD_ADDR, val); + + val = HSTLDCMD | SET_VAL(HSTMIIMCMD, MIIM_CMD_LEGACY_READ); + xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, val); + + do { + usleep_range(5, 10); + xgene_enet_rd_mdio_csr(addr, MIIM_INDICATOR_ADDR, &status); + } while ((status & BUSY_MASK) && timeout--); + + if (status & BUSY_MASK) { + pr_err("XGENET_MII_MGMT write failed\n"); + return -EBUSY; + } + + xgene_enet_rd_mdio_csr(addr, MIIMRD_FIELD_ADDR, &data); + xgene_enet_wr_mdio_csr(addr, MIIM_COMMAND_ADDR, 0); + + return data; +} + +struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr) +{ + struct phy_device *phy_dev; + + phy_dev = get_phy_device(bus, phy_addr, false); + if (!phy_dev || IS_ERR(phy_dev)) + return NULL; + + if (phy_device_register(phy_dev)) + phy_device_free(phy_dev); + + return phy_dev; +} +EXPORT_SYMBOL(xgene_enet_phy_register); + +#ifdef CONFIG_ACPI +static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl, + void *context, void **ret) +{ + struct mii_bus *mdio = context; + struct acpi_device *adev; + struct phy_device *phy_dev; + const union acpi_object *obj; + u32 phy_addr; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + if (acpi_dev_get_property(adev, "phy-channel", ACPI_TYPE_INTEGER, &obj)) + return AE_OK; + phy_addr = obj->integer.value; + + phy_dev = xgene_enet_phy_register(mdio, phy_addr); + adev->driver_data = phy_dev; + + return AE_OK; +} +#endif + +static int xgene_mdio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mii_bus *mdio_bus; + const struct of_device_id *of_id; + struct resource *res; + struct xgene_mdio_pdata *pdata; + void __iomem *csr_base; + int mdio_id = 0, ret = 0; + + of_id = of_match_device(xgene_mdio_of_match, &pdev->dev); + if (of_id) { + mdio_id = (enum xgene_mdio_id)of_id->data; + } else { +#ifdef CONFIG_ACPI + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(xgene_mdio_acpi_match, &pdev->dev); + if (acpi_id) + mdio_id = (enum xgene_mdio_id)acpi_id->driver_data; +#endif + } + + if (!mdio_id) + return -ENODEV; + + pdata = devm_kzalloc(dev, sizeof(struct xgene_mdio_pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + pdata->mdio_id = mdio_id; + pdata->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + csr_base = devm_ioremap_resource(dev, res); + if (IS_ERR(csr_base)) + return PTR_ERR(csr_base); + pdata->mac_csr_addr = csr_base; + pdata->mdio_csr_addr = csr_base + BLOCK_XG_MDIO_CSR_OFFSET; + pdata->diag_csr_addr = csr_base + BLOCK_DIAG_CSR_OFFSET; + + if (dev->of_node) { + pdata->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pdata->clk)) { + dev_err(dev, "Unable to retrieve clk\n"); + return PTR_ERR(pdata->clk); + } + } + + ret = xgene_mdio_reset(pdata); + if (ret) + return ret; + + mdio_bus = mdiobus_alloc(); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "APM X-Gene MDIO bus"; + + if (mdio_id == XGENE_MDIO_RGMII) { + mdio_bus->read = xgene_mdio_rgmii_read; + mdio_bus->write = xgene_mdio_rgmii_write; + mdio_bus->priv = (void __force *)pdata->mac_csr_addr; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s", + "xgene-mii-rgmii"); + } else { + mdio_bus->read = xgene_xfi_mdio_read; + mdio_bus->write = xgene_xfi_mdio_write; + mdio_bus->priv = (void __force *)pdata->mdio_csr_addr; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s", + "xgene-mii-xfi"); + } + + mdio_bus->parent = dev; + platform_set_drvdata(pdev, pdata); + + if (dev->of_node) { + ret = of_mdiobus_register(mdio_bus, dev->of_node); + } else { +#ifdef CONFIG_ACPI + /* Mask out all PHYs from auto probing. */ + mdio_bus->phy_mask = ~0; + ret = mdiobus_register(mdio_bus); + if (ret) + goto out; + + acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_HANDLE(dev), 1, + acpi_register_phy, NULL, mdio_bus, NULL); +#endif + } + + if (ret) + goto out; + + pdata->mdio_bus = mdio_bus; + xgene_mdio_status = true; + + return 0; + +out: + mdiobus_free(mdio_bus); + + return ret; +} + +static int xgene_mdio_remove(struct platform_device *pdev) +{ + struct xgene_mdio_pdata *pdata = platform_get_drvdata(pdev); + struct mii_bus *mdio_bus = pdata->mdio_bus; + struct device *dev = &pdev->dev; + + mdiobus_unregister(mdio_bus); + mdiobus_free(mdio_bus); + + if (dev->of_node) + clk_disable_unprepare(pdata->clk); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id xgene_mdio_of_match[] = { + { + .compatible = "apm,xgene-mdio-rgmii", + .data = (void *)XGENE_MDIO_RGMII + }, + { + .compatible = "apm,xgene-mdio-xfi", + .data = (void *)XGENE_MDIO_XFI + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, xgene_mdio_of_match); +#endif + +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_mdio_acpi_match[] = { + { "APMC0D65", XGENE_MDIO_RGMII }, + { "APMC0D66", XGENE_MDIO_XFI }, + { } +}; + +MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match); +#endif + +static struct platform_driver xgene_mdio_driver = { + .driver = { + .name = "xgene-mdio", + .of_match_table = of_match_ptr(xgene_mdio_of_match), + .acpi_match_table = ACPI_PTR(xgene_mdio_acpi_match), + }, + .probe = xgene_mdio_probe, + .remove = xgene_mdio_remove, +}; + +module_platform_driver(xgene_mdio_driver); + +MODULE_DESCRIPTION("APM X-Gene SoC MDIO driver"); +MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/mdio-xgene.h b/drivers/net/phy/mdio-xgene.h new file mode 100644 index 000000000..354241b53 --- /dev/null +++ b/drivers/net/phy/mdio-xgene.h @@ -0,0 +1,143 @@ +/* Applied Micro X-Gene SoC MDIO Driver + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Author: Iyappan Subramanian <isubramanian@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __MDIO_XGENE_H__ +#define __MDIO_XGENE_H__ + +#define BLOCK_XG_MDIO_CSR_OFFSET 0x5000 +#define BLOCK_DIAG_CSR_OFFSET 0xd000 +#define XGENET_CONFIG_REG_ADDR 0x20 + +#define MAC_ADDR_REG_OFFSET 0x00 +#define MAC_COMMAND_REG_OFFSET 0x04 +#define MAC_WRITE_REG_OFFSET 0x08 +#define MAC_READ_REG_OFFSET 0x0c +#define MAC_COMMAND_DONE_REG_OFFSET 0x10 + +#define CLKEN_OFFSET 0x08 +#define SRST_OFFSET 0x00 + +#define MENET_CFG_MEM_RAM_SHUTDOWN_ADDR 0x70 +#define MENET_BLOCK_MEM_RDY_ADDR 0x74 + +#define MAC_CONFIG_1_ADDR 0x00 +#define MII_MGMT_COMMAND_ADDR 0x24 +#define MII_MGMT_ADDRESS_ADDR 0x28 +#define MII_MGMT_CONTROL_ADDR 0x2c +#define MII_MGMT_STATUS_ADDR 0x30 +#define MII_MGMT_INDICATORS_ADDR 0x34 +#define SOFT_RESET BIT(31) + +#define MII_MGMT_CONFIG_ADDR 0x20 +#define MII_MGMT_COMMAND_ADDR 0x24 +#define MII_MGMT_ADDRESS_ADDR 0x28 +#define MII_MGMT_CONTROL_ADDR 0x2c +#define MII_MGMT_STATUS_ADDR 0x30 +#define MII_MGMT_INDICATORS_ADDR 0x34 + +#define MIIM_COMMAND_ADDR 0x20 +#define MIIM_FIELD_ADDR 0x24 +#define MIIM_CONFIGURATION_ADDR 0x28 +#define MIIM_LINKFAILVECTOR_ADDR 0x2c +#define MIIM_INDICATOR_ADDR 0x30 +#define MIIMRD_FIELD_ADDR 0x34 + +#define MDIO_CSR_OFFSET 0x5000 + +#define REG_ADDR_POS 0 +#define REG_ADDR_LEN 5 +#define PHY_ADDR_POS 8 +#define PHY_ADDR_LEN 5 + +#define HSTMIIMWRDAT_POS 0 +#define HSTMIIMWRDAT_LEN 16 +#define HSTPHYADX_POS 23 +#define HSTPHYADX_LEN 5 +#define HSTREGADX_POS 18 +#define HSTREGADX_LEN 5 +#define HSTLDCMD BIT(3) +#define HSTMIIMCMD_POS 0 +#define HSTMIIMCMD_LEN 3 + +#define BUSY_MASK BIT(0) +#define READ_CYCLE_MASK BIT(0) + +enum xgene_enet_cmd { + XGENE_ENET_WR_CMD = BIT(31), + XGENE_ENET_RD_CMD = BIT(30) +}; + +enum { + MIIM_CMD_IDLE, + MIIM_CMD_LEGACY_WRITE, + MIIM_CMD_LEGACY_READ, +}; + +enum xgene_mdio_id { + XGENE_MDIO_RGMII = 1, + XGENE_MDIO_XFI +}; + +struct xgene_mdio_pdata { + struct clk *clk; + struct device *dev; + void __iomem *mac_csr_addr; + void __iomem *diag_csr_addr; + void __iomem *mdio_csr_addr; + struct mii_bus *mdio_bus; + int mdio_id; +}; + +/* Set the specified value into a bit-field defined by its starting position + * and length within a single u64. + */ +static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val) +{ + return (val & ((1ULL << len) - 1)) << pos; +} + +#define SET_VAL(field, val) \ + xgene_enet_set_field_value(field ## _POS, field ## _LEN, val) + +#define SET_BIT(field) \ + xgene_enet_set_field_value(field ## _POS, 1, 1) + +/* Get the value from a bit-field defined by its starting position + * and length within the specified u64. + */ +static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src) +{ + return (src >> pos) & ((1ULL << len) - 1); +} + +#define GET_VAL(field, src) \ + xgene_enet_get_field_value(field ## _POS, field ## _LEN, src) + +#define GET_BIT(field, src) \ + xgene_enet_get_field_value(field ## _POS, 1, src) + +static const struct of_device_id xgene_mdio_of_match[]; +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_mdio_acpi_match[]; +#endif +int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg); +int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data); +struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr); + +#endif /* __MDIO_XGENE_H__ */ diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 5a8fefc25..885ac9cba 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -311,6 +311,36 @@ static int kszphy_config_init(struct phy_device *phydev) return 0; } +static int ksz8041_config_init(struct phy_device *phydev) +{ + struct device_node *of_node = phydev->mdio.dev.of_node; + + /* Limit supported and advertised modes in fiber mode */ + if (of_property_read_bool(of_node, "micrel,fiber-mode")) { + phydev->dev_flags |= MICREL_PHY_FXEN; + phydev->supported &= SUPPORTED_FIBRE | + SUPPORTED_100baseT_Full | + SUPPORTED_100baseT_Half; + phydev->advertising &= ADVERTISED_FIBRE | + ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half; + phydev->autoneg = AUTONEG_DISABLE; + } + + return kszphy_config_init(phydev); +} + +static int ksz8041_config_aneg(struct phy_device *phydev) +{ + /* Skip auto-negotiation in fiber mode */ + if (phydev->dev_flags & MICREL_PHY_FXEN) { + phydev->speed = SPEED_100; + return 0; + } + + return genphy_config_aneg(phydev); +} + static int ksz9021_load_values_from_of(struct phy_device *phydev, const struct device_node *of_node, u16 reg, @@ -647,17 +677,28 @@ static void kszphy_get_stats(struct phy_device *phydev, data[i] = kszphy_get_stat(phydev, i); } -static int kszphy_resume(struct phy_device *phydev) +static int kszphy_suspend(struct phy_device *phydev) { - int value; + /* Disable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_DISABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } - mutex_lock(&phydev->lock); + return genphy_suspend(phydev); +} - value = phy_read(phydev, MII_BMCR); - phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); +static int kszphy_resume(struct phy_device *phydev) +{ + genphy_resume(phydev); - kszphy_config_intr(phydev); - mutex_unlock(&phydev->lock); + /* Enable PHY Interrupts */ + if (phy_interrupt_is_valid(phydev)) { + phydev->interrupts = PHY_INTERRUPT_ENABLED; + if (phydev->drv->config_intr) + phydev->drv->config_intr(phydev); + } return 0; } @@ -788,8 +829,8 @@ static struct phy_driver ksphy_driver[] = { .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, - .config_init = kszphy_config_init, - .config_aneg = genphy_config_aneg, + .config_init = ksz8041_config_init, + .config_aneg = ksz8041_config_aneg, .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, @@ -839,7 +880,7 @@ static struct phy_driver ksphy_driver[] = { }, { .phy_id = PHY_ID_KSZ8001, .name = "Micrel KSZ8001 or KS8721", - .phy_id_mask = 0x00ffffff, + .phy_id_mask = 0x00fffffc, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, @@ -870,7 +911,7 @@ static struct phy_driver ksphy_driver[] = { .get_sset_count = kszphy_get_sset_count, .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, - .suspend = genphy_suspend, + .suspend = kszphy_suspend, .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8061, @@ -923,7 +964,7 @@ static struct phy_driver ksphy_driver[] = { .get_strings = kszphy_get_strings, .get_stats = kszphy_get_stats, .suspend = genphy_suspend, - .resume = genphy_resume, + .resume = kszphy_resume, }, { .phy_id = PHY_ID_KSZ8873MLL, .phy_id_mask = MICREL_PHY_ID_MASK, @@ -963,7 +1004,7 @@ MODULE_LICENSE("GPL"); static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000ffffe }, { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK }, - { PHY_ID_KSZ8001, 0x00ffffff }, + { PHY_ID_KSZ8001, 0x00fffffc }, { PHY_ID_KS8737, MICREL_PHY_ID_MASK }, { PHY_ID_KSZ8021, 0x00ffffff }, { PHY_ID_KSZ8031, 0x00ffffff }, diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c new file mode 100644 index 000000000..34f58f234 --- /dev/null +++ b/drivers/net/phy/swphy.c @@ -0,0 +1,179 @@ +/* + * Software PHY emulation + * + * Code taken from fixed_phy.c by Russell King <rmk+kernel@arm.linux.org.uk> + * + * Author: Vitaly Bordug <vbordug@ru.mvista.com> + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * Copyright (c) 2006-2007 MontaVista Software, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/export.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> + +#include "swphy.h" + +#define MII_REGS_NUM 29 + +struct swmii_regs { + u16 bmcr; + u16 bmsr; + u16 lpa; + u16 lpagb; +}; + +enum { + SWMII_SPEED_10 = 0, + SWMII_SPEED_100, + SWMII_SPEED_1000, + SWMII_DUPLEX_HALF = 0, + SWMII_DUPLEX_FULL, +}; + +/* + * These two tables get bitwise-anded together to produce the final result. + * This means the speed table must contain both duplex settings, and the + * duplex table must contain all speed settings. + */ +static const struct swmii_regs speed[] = { + [SWMII_SPEED_10] = { + .bmcr = BMCR_FULLDPLX, + .lpa = LPA_10FULL | LPA_10HALF, + }, + [SWMII_SPEED_100] = { + .bmcr = BMCR_FULLDPLX | BMCR_SPEED100, + .bmsr = BMSR_100FULL | BMSR_100HALF, + .lpa = LPA_100FULL | LPA_100HALF, + }, + [SWMII_SPEED_1000] = { + .bmcr = BMCR_FULLDPLX | BMCR_SPEED1000, + .bmsr = BMSR_ESTATEN, + .lpagb = LPA_1000FULL | LPA_1000HALF, + }, +}; + +static const struct swmii_regs duplex[] = { + [SWMII_DUPLEX_HALF] = { + .bmcr = ~BMCR_FULLDPLX, + .bmsr = BMSR_ESTATEN | BMSR_100HALF, + .lpa = LPA_10HALF | LPA_100HALF, + .lpagb = LPA_1000HALF, + }, + [SWMII_DUPLEX_FULL] = { + .bmcr = ~0, + .bmsr = BMSR_ESTATEN | BMSR_100FULL, + .lpa = LPA_10FULL | LPA_100FULL, + .lpagb = LPA_1000FULL, + }, +}; + +static int swphy_decode_speed(int speed) +{ + switch (speed) { + case 1000: + return SWMII_SPEED_1000; + case 100: + return SWMII_SPEED_100; + case 10: + return SWMII_SPEED_10; + default: + return -EINVAL; + } +} + +/** + * swphy_validate_state - validate the software phy status + * @state: software phy status + * + * This checks that we can represent the state stored in @state can be + * represented in the emulated MII registers. Returns 0 if it can, + * otherwise returns -EINVAL. + */ +int swphy_validate_state(const struct fixed_phy_status *state) +{ + int err; + + if (state->link) { + err = swphy_decode_speed(state->speed); + if (err < 0) { + pr_warn("swphy: unknown speed\n"); + return -EINVAL; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(swphy_validate_state); + +/** + * swphy_read_reg - return a MII register from the fixed phy state + * @reg: MII register + * @state: fixed phy status + * + * Return the MII @reg register generated from the fixed phy state @state. + */ +int swphy_read_reg(int reg, const struct fixed_phy_status *state) +{ + int speed_index, duplex_index; + u16 bmsr = BMSR_ANEGCAPABLE; + u16 bmcr = 0; + u16 lpagb = 0; + u16 lpa = 0; + + if (reg > MII_REGS_NUM) + return -1; + + speed_index = swphy_decode_speed(state->speed); + if (WARN_ON(speed_index < 0)) + return 0; + + duplex_index = state->duplex ? SWMII_DUPLEX_FULL : SWMII_DUPLEX_HALF; + + bmsr |= speed[speed_index].bmsr & duplex[duplex_index].bmsr; + + if (state->link) { + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; + + bmcr |= speed[speed_index].bmcr & duplex[duplex_index].bmcr; + lpa |= speed[speed_index].lpa & duplex[duplex_index].lpa; + lpagb |= speed[speed_index].lpagb & duplex[duplex_index].lpagb; + + if (state->pause) + lpa |= LPA_PAUSE_CAP; + + if (state->asym_pause) + lpa |= LPA_PAUSE_ASYM; + } + + switch (reg) { + case MII_BMCR: + return bmcr; + case MII_BMSR: + return bmsr; + case MII_PHYSID1: + case MII_PHYSID2: + return 0; + case MII_LPA: + return lpa; + case MII_STAT1000: + return lpagb; + + /* + * We do not support emulating Clause 45 over Clause 22 register + * reads. Return an error instead of bogus data. + */ + case MII_MMD_CTRL: + case MII_MMD_DATA: + return -1; + + default: + return 0xffff; + } +} +EXPORT_SYMBOL_GPL(swphy_read_reg); diff --git a/drivers/net/phy/swphy.h b/drivers/net/phy/swphy.h new file mode 100644 index 000000000..2f09ac324 --- /dev/null +++ b/drivers/net/phy/swphy.h @@ -0,0 +1,9 @@ +#ifndef SWPHY_H +#define SWPHY_H + +struct fixed_phy_status; + +int swphy_validate_state(const struct fixed_phy_status *state); +int swphy_read_reg(int reg, const struct fixed_phy_status *state); + +#endif diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a30ee427e..f226db461 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1312,10 +1312,9 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) return stats64; } -static struct lock_class_key ppp_tx_busylock; static int ppp_dev_init(struct net_device *dev) { - dev->qdisc_tx_busylock = &ppp_tx_busylock; + netdev_lockdep_set_classes(dev); return 0; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index fdee77207..a380649bf 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1576,23 +1576,6 @@ static const struct team_option team_options[] = { }, }; -static struct lock_class_key team_netdev_xmit_lock_key; -static struct lock_class_key team_netdev_addr_lock_key; -static struct lock_class_key team_tx_busylock_key; - -static void team_set_lockdep_class_one(struct net_device *dev, - struct netdev_queue *txq, - void *unused) -{ - lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key); -} - -static void team_set_lockdep_class(struct net_device *dev) -{ - lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key); - netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL); - dev->qdisc_tx_busylock = &team_tx_busylock_key; -} static int team_init(struct net_device *dev) { @@ -1628,7 +1611,7 @@ static int team_init(struct net_device *dev) goto err_options_register; netif_carrier_off(dev); - team_set_lockdep_class(dev); + netdev_lockdep_set_classes(dev); return 0; @@ -2019,6 +2002,8 @@ static const struct net_device_ops team_netdev_ops = { .ndo_add_slave = team_add_slave, .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, + .ndo_neigh_construct = netdev_default_l2upper_neigh_construct, + .ndo_neigh_destroy = netdev_default_l2upper_neigh_destroy, .ndo_change_carrier = team_change_carrier, .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c index cdb19b385..b228bea79 100644 --- a/drivers/net/team/team_mode_loadbalance.c +++ b/drivers/net/team/team_mode_loadbalance.c @@ -14,9 +14,23 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include <linux/filter.h> #include <linux/if_team.h> +static rx_handler_result_t lb_receive(struct team *team, struct team_port *port, + struct sk_buff *skb) +{ + if (unlikely(skb->protocol == htons(ETH_P_SLOW))) { + /* LACPDU packets should go to exact delivery */ + const unsigned char *dest = eth_hdr(skb)->h_dest; + + if (is_link_local_ether_addr(dest) && dest[5] == 0x02) + return RX_HANDLER_EXACT; + } + return RX_HANDLER_ANOTHER; +} + struct lb_priv; typedef struct team_port *lb_select_tx_port_func_t(struct team *, @@ -652,6 +666,7 @@ static const struct team_mode_ops lb_mode_ops = { .port_enter = lb_port_enter, .port_leave = lb_port_leave, .port_disabled = lb_port_disabled, + .receive = lb_receive, .transmit = lb_transmit, }; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 34259bd0a..6f9df375c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -71,6 +71,7 @@ #include <net/sock.h> #include <linux/seq_file.h> #include <linux/uio.h> +#include <linux/skb_array.h> #include <asm/uaccess.h> @@ -167,6 +168,7 @@ struct tun_file { }; struct list_head next; struct tun_struct *detached; + struct skb_array tx_array; }; struct tun_flow_entry { @@ -515,7 +517,11 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile) static void tun_queue_purge(struct tun_file *tfile) { - skb_queue_purge(&tfile->sk.sk_receive_queue); + struct sk_buff *skb; + + while ((skb = skb_array_consume(&tfile->tx_array)) != NULL) + kfree_skb(skb); + skb_queue_purge(&tfile->sk.sk_error_queue); } @@ -560,6 +566,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } + if (tun) + skb_array_cleanup(&tfile->tx_array); sock_put(&tfile->sk); } } @@ -613,6 +621,7 @@ static void tun_detach_all(struct net_device *dev) static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter) { struct tun_file *tfile = file->private_data; + struct net_device *dev = tun->dev; int err; err = security_tun_dev_attach(tfile->socket.sk, tun->security); @@ -642,6 +651,13 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte if (!err) goto out; } + + if (!tfile->detached && + skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) { + err = -ENOMEM; + goto out; + } + tfile->queue_index = tun->numqueues; tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN; rcu_assign_pointer(tfile->tun, tun); @@ -887,8 +903,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset(skb); - /* Enqueue packet */ - skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); + if (skb_array_produce(&tfile->tx_array, skb)) + goto drop; /* Notify and wake up reader process */ if (tfile->flags & TUN_FASYNC) @@ -1103,7 +1119,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait) poll_wait(file, sk_sleep(sk), wait); - if (!skb_queue_empty(&sk->sk_receive_queue)) + if (!skb_array_empty(&tfile->tx_array)) mask |= POLLIN | POLLRDNORM; if (sock_writeable(sk) || @@ -1250,13 +1266,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, return -EFAULT; } - if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start), - tun16_to_cpu(tun, gso.csum_offset))) { - this_cpu_inc(tun->pcpu_stats->rx_frame_errors); - kfree_skb(skb); - return -EINVAL; - } + err = virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun)); + if (err) { + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + kfree_skb(skb); + return -EINVAL; } switch (tun->flags & TUN_TYPE_MASK) { @@ -1285,39 +1299,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, break; } - if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { - pr_debug("GSO!\n"); - switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { - case VIRTIO_NET_HDR_GSO_TCPV4: - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - break; - case VIRTIO_NET_HDR_GSO_TCPV6: - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - break; - case VIRTIO_NET_HDR_GSO_UDP: - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - break; - default: - this_cpu_inc(tun->pcpu_stats->rx_frame_errors); - kfree_skb(skb); - return -EINVAL; - } - - if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) - skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - - skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size); - if (skb_shinfo(skb)->gso_size == 0) { - this_cpu_inc(tun->pcpu_stats->rx_frame_errors); - kfree_skb(skb); - return -EINVAL; - } - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; - } - /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { skb_shinfo(skb)->destructor_arg = msg_control; @@ -1395,46 +1376,26 @@ static ssize_t tun_put_user(struct tun_struct *tun, if (vnet_hdr_sz) { struct virtio_net_hdr gso = { 0 }; /* no info leak */ + int ret; + if (iov_iter_count(iter) < vnet_hdr_sz) return -EINVAL; - if (skb_is_gso(skb)) { + ret = virtio_net_hdr_from_skb(skb, &gso, + tun_is_little_endian(tun)); + if (ret) { struct skb_shared_info *sinfo = skb_shinfo(skb); - - /* This is a hint as to how much should be linear. */ - gso.hdr_len = cpu_to_tun16(tun, skb_headlen(skb)); - gso.gso_size = cpu_to_tun16(tun, sinfo->gso_size); - if (sinfo->gso_type & SKB_GSO_TCPV4) - gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; - else if (sinfo->gso_type & SKB_GSO_TCPV6) - gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; - else { - pr_err("unexpected GSO type: " - "0x%x, gso_size %d, hdr_len %d\n", - sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), - tun16_to_cpu(tun, gso.hdr_len)); - print_hex_dump(KERN_ERR, "tun: ", - DUMP_PREFIX_NONE, - 16, 1, skb->head, - min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); - WARN_ON_ONCE(1); - return -EINVAL; - } - if (sinfo->gso_type & SKB_GSO_TCP_ECN) - gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; - } else - gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - gso.csum_start = cpu_to_tun16(tun, skb_checksum_start_offset(skb) + - vlan_hlen); - gso.csum_offset = cpu_to_tun16(tun, skb->csum_offset); - } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { - gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; - } /* else everything is zero */ + pr_err("unexpected GSO type: " + "0x%x, gso_size %d, hdr_len %d\n", + sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size), + tun16_to_cpu(tun, gso.hdr_len)); + print_hex_dump(KERN_ERR, "tun: ", + DUMP_PREFIX_NONE, + 16, 1, skb->head, + min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true); + WARN_ON_ONCE(1); + return -EINVAL; + } if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso)) return -EFAULT; @@ -1477,22 +1438,63 @@ done: return total; } +static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock, + int *err) +{ + DECLARE_WAITQUEUE(wait, current); + struct sk_buff *skb = NULL; + int error = 0; + + skb = skb_array_consume(&tfile->tx_array); + if (skb) + goto out; + if (noblock) { + error = -EAGAIN; + goto out; + } + + add_wait_queue(&tfile->wq.wait, &wait); + current->state = TASK_INTERRUPTIBLE; + + while (1) { + skb = skb_array_consume(&tfile->tx_array); + if (skb) + break; + if (signal_pending(current)) { + error = -ERESTARTSYS; + break; + } + if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) { + error = -EFAULT; + break; + } + + schedule(); + } + + current->state = TASK_RUNNING; + remove_wait_queue(&tfile->wq.wait, &wait); + +out: + *err = error; + return skb; +} + static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *to, int noblock) { struct sk_buff *skb; ssize_t ret; - int peeked, err, off = 0; + int err; tun_debug(KERN_INFO, tun, "tun_do_read\n"); if (!iov_iter_count(to)) return 0; - /* Read frames from queue */ - skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0, - &peeked, &off, &err); + /* Read frames from ring */ + skb = tun_ring_recv(tfile, noblock, &err); if (!skb) return err; @@ -1625,8 +1627,25 @@ out: return ret; } +static int tun_peek_len(struct socket *sock) +{ + struct tun_file *tfile = container_of(sock, struct tun_file, socket); + struct tun_struct *tun; + int ret = 0; + + tun = __tun_get(tfile); + if (!tun) + return 0; + + ret = skb_array_peek_len(&tfile->tx_array); + tun_put(tun); + + return ret; +} + /* Ops structure to mimic raw sockets with tun */ static const struct proto_ops tun_socket_ops = { + .peek_len = tun_peek_len, .sendmsg = tun_sendmsg, .recvmsg = tun_recvmsg, }; @@ -2448,6 +2467,56 @@ static const struct ethtool_ops tun_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; +static int tun_queue_resize(struct tun_struct *tun) +{ + struct net_device *dev = tun->dev; + struct tun_file *tfile; + struct skb_array **arrays; + int n = tun->numqueues + tun->numdisabled; + int ret, i; + + arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + if (!arrays) + return -ENOMEM; + + for (i = 0; i < tun->numqueues; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); + arrays[i] = &tfile->tx_array; + } + list_for_each_entry(tfile, &tun->disabled, next) + arrays[i++] = &tfile->tx_array; + + ret = skb_array_resize_multiple(arrays, n, + dev->tx_queue_len, GFP_KERNEL); + + kfree(arrays); + return ret; +} + +static int tun_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct tun_struct *tun = netdev_priv(dev); + + if (dev->rtnl_link_ops != &tun_link_ops) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGE_TX_QUEUE_LEN: + if (tun_queue_resize(tun)) + return NOTIFY_BAD; + break; + default: + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block tun_notifier_block __read_mostly = { + .notifier_call = tun_device_event, +}; static int __init tun_init(void) { @@ -2467,6 +2536,8 @@ static int __init tun_init(void) pr_err("Can't register misc device %d\n", TUN_MINOR); goto err_misc; } + + register_netdevice_notifier(&tun_notifier_block); return 0; err_misc: rtnl_link_unregister(&tun_link_ops); @@ -2478,6 +2549,7 @@ static void tun_cleanup(void) { misc_deregister(&tun_miscdev); rtnl_link_unregister(&tun_link_ops); + unregister_netdevice_notifier(&tun_notifier_block); } /* Get an underlying socket object from tun file. Returns error unless file is diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c index cf77f2dff..163a2c576 100644 --- a/drivers/net/usb/ax88172a.c +++ b/drivers/net/usb/ax88172a.c @@ -149,24 +149,6 @@ static const struct net_device_ops ax88172a_netdev_ops = { .ndo_set_rx_mode = asix_set_multicast, }; -static int ax88172a_get_settings(struct net_device *net, - struct ethtool_cmd *cmd) -{ - if (!net->phydev) - return -ENODEV; - - return phy_ethtool_gset(net->phydev, cmd); -} - -static int ax88172a_set_settings(struct net_device *net, - struct ethtool_cmd *cmd) -{ - if (!net->phydev) - return -ENODEV; - - return phy_ethtool_sset(net->phydev, cmd); -} - static int ax88172a_nway_reset(struct net_device *net) { if (!net->phydev) @@ -185,9 +167,9 @@ static const struct ethtool_ops ax88172a_ethtool_ops = { .get_eeprom_len = asix_get_eeprom_len, .get_eeprom = asix_get_eeprom, .set_eeprom = asix_set_eeprom, - .get_settings = ax88172a_get_settings, - .set_settings = ax88172a_set_settings, .nway_reset = ax88172a_nway_reset, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 7cba2c375..c47ec0a04 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -388,6 +388,12 @@ void usbnet_cdc_status(struct usbnet *dev, struct urb *urb) case USB_CDC_NOTIFY_NETWORK_CONNECTION: netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n", event->wValue ? "on" : "off"); + + /* Work-around for devices with broken off-notifications */ + if (event->wValue && + !test_bit(__LINK_STATE_NOCARRIER, &dev->net->state)) + usbnet_link_change(dev, 0, 0); + usbnet_link_change(dev, !!event->wValue, 0); break; case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */ @@ -432,6 +438,34 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf) } EXPORT_SYMBOL_GPL(usbnet_cdc_bind); +static int usbnet_cdc_zte_bind(struct usbnet *dev, struct usb_interface *intf) +{ + int status = usbnet_cdc_bind(dev, intf); + + if (!status && (dev->net->dev_addr[0] & 0x02)) + eth_hw_addr_random(dev->net); + + return status; +} + +/* Make sure packets have correct destination MAC address + * + * A firmware bug observed on some devices (ZTE MF823/831/910) is that the + * device sends packets with a static, bogus, random MAC address (event if + * device MAC address has been updated). Always set MAC address to that of the + * device. + */ +static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + if (skb->len < ETH_HLEN || !(skb->data[0] & 0x02)) + return 1; + + skb_reset_mac_header(skb); + ether_addr_copy(eth_hdr(skb)->h_dest, dev->net->dev_addr); + + return 1; +} + static const struct driver_info cdc_info = { .description = "CDC Ethernet Device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT, @@ -442,6 +476,17 @@ static const struct driver_info cdc_info = { .manage_power = usbnet_manage_power, }; +static const struct driver_info zte_cdc_info = { + .description = "ZTE CDC Ethernet Device", + .flags = FLAG_ETHER | FLAG_POINTTOPOINT, + .bind = usbnet_cdc_zte_bind, + .unbind = usbnet_cdc_unbind, + .status = usbnet_cdc_status, + .set_rx_mode = usbnet_cdc_update_filter, + .manage_power = usbnet_manage_power, + .rx_fixup = usbnet_cdc_zte_rx_fixup, +}; + static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", .flags = FLAG_WWAN, @@ -707,6 +752,12 @@ static const struct usb_device_id products[] = { USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (kernel_ulong_t)&wwan_info, }, { + /* ZTE modules */ + USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&zte_cdc_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 52c72ed22..ff95b2857 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1006,6 +1006,7 @@ static int kaweth_probe( struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; + int rv = -EIO; dev_dbg(dev, "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", @@ -1026,6 +1027,7 @@ static int kaweth_probe( kaweth = netdev_priv(netdev); kaweth->dev = udev; kaweth->net = netdev; + kaweth->intf = intf; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); @@ -1045,6 +1047,10 @@ static int kaweth_probe( /* Download the firmware */ dev_info(dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); + if (!kaweth->firmware_buf) { + rv = -ENOMEM; + goto err_free_netdev; + } if ((result = kaweth_download_firmware(kaweth, "/*(DEBLOBBED)*/", 100, @@ -1136,8 +1142,6 @@ err_fw: dev_dbg(dev, "Initializing net device.\n"); - kaweth->intf = intf; - kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!kaweth->tx_urb) goto err_free_netdev; @@ -1201,7 +1205,7 @@ err_only_tx: err_free_netdev: free_netdev(netdev); - return -EIO; + return rv; } /**************************************************************** diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index e9654a685..c25424886 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "08" /* Information for net */ -#define NET_VERSION "5" +#define NET_VERSION "6" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -613,7 +613,7 @@ struct r8152 { struct list_head rx_done, tx_free; struct sk_buff_head tx_queue, rx_queue; spinlock_t rx_lock, tx_lock; - struct delayed_work schedule; + struct delayed_work schedule, hw_phy_work; struct mii_if_info mii; struct mutex control; /* use for hw setting */ #ifdef CONFIG_PM_SLEEP @@ -630,6 +630,7 @@ struct r8152 { int (*eee_get)(struct r8152 *, struct ethtool_eee *); int (*eee_set)(struct r8152 *, struct ethtool_eee *); bool (*in_nway)(struct r8152 *); + void (*hw_phy_cfg)(struct r8152 *); void (*autosuspend_en)(struct r8152 *tp, bool enable); } rtl_ops; @@ -639,8 +640,11 @@ struct r8152 { u32 tx_qlen; u32 coalesce; u16 ocp_base; + u16 speed; u8 *intr_buff; u8 version; + u8 duplex; + u8 autoneg; }; enum rtl_version { @@ -1820,7 +1824,7 @@ static int rx_bottom(struct r8152 *tp, int budget) pkt_len -= CRC_SIZE; rx_data += sizeof(struct rx_desc); - skb = netdev_alloc_skb_ip_align(netdev, pkt_len); + skb = napi_alloc_skb(&tp->napi, pkt_len); if (!skb) { stats->rx_dropped++; goto find_next_rx; @@ -2512,27 +2516,6 @@ static void rtl8153_runtime_enable(struct r8152 *tp, bool enable) } } -static void rtl_phy_reset(struct r8152 *tp) -{ - u16 data; - int i; - - data = r8152_mdio_read(tp, MII_BMCR); - - /* don't reset again before the previous one complete */ - if (data & BMCR_RESET) - return; - - data |= BMCR_RESET; - r8152_mdio_write(tp, MII_BMCR, data); - - for (i = 0; i < 50; i++) { - msleep(20); - if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0) - break; - } -} - static void r8153_teredo_off(struct r8152 *tp) { u32 ocp_data; @@ -2569,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable) } } +static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) +{ + ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); + ocp_reg_write(tp, OCP_EEE_DATA, reg); + ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); +} + +static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) +{ + u16 data; + + r8152_mmd_indirect(tp, dev, reg); + data = ocp_reg_read(tp, OCP_EEE_DATA); + ocp_reg_write(tp, OCP_EEE_AR, 0x0000); + + return data; +} + +static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) +{ + r8152_mmd_indirect(tp, dev, reg); + ocp_reg_write(tp, OCP_EEE_DATA, data); + ocp_reg_write(tp, OCP_EEE_AR, 0x0000); +} + +static void r8152_eee_en(struct r8152 *tp, bool enable) +{ + u16 config1, config2, config3; + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; + config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); + config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; + config1 |= sd_rise_time(1); + config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; + config3 |= fast_snr(42); + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | + RX_QUIET_EN); + config1 |= sd_rise_time(7); + config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); + config3 |= fast_snr(511); + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); + ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); + ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); + ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); +} + +static void r8152b_enable_eee(struct r8152 *tp) +{ + r8152_eee_en(tp, true); + r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); +} + +static void r8152b_enable_fc(struct r8152 *tp) +{ + u16 anar; + + anar = r8152_mdio_read(tp, MII_ADVERTISE); + anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + r8152_mdio_write(tp, MII_ADVERTISE, anar); +} + static void rtl8152_disable(struct r8152 *tp) { r8152_aldps_en(tp, false); @@ -2578,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp) static void r8152b_hw_phy_cfg(struct r8152 *tp) { - u16 data; - - data = r8152_mdio_read(tp, MII_BMCR); - if (data & BMCR_PDOWN) { - data &= ~BMCR_PDOWN; - r8152_mdio_write(tp, MII_BMCR, data); - } + r8152b_enable_eee(tp); + r8152_aldps_en(tp, true); + r8152b_enable_fc(tp); set_bit(PHY_RESET, &tp->flags); } @@ -2600,8 +2650,6 @@ static void r8152b_exit_oob(struct r8152 *tp) rxdy_gated_en(tp, true); r8153_teredo_off(tp); - r8152b_hw_phy_cfg(tp); - ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00); @@ -2720,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } +static void r8153_aldps_en(struct r8152 *tp, bool enable) +{ + u16 data; + + data = ocp_reg_read(tp, OCP_POWER_CFG); + if (enable) { + data |= EN_ALDPS; + ocp_reg_write(tp, OCP_POWER_CFG, data); + } else { + data &= ~EN_ALDPS; + ocp_reg_write(tp, OCP_POWER_CFG, data); + msleep(20); + } +} + +static void r8153_eee_en(struct r8152 *tp, bool enable) +{ + u32 ocp_data; + u16 config; + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); + config = ocp_reg_read(tp, OCP_EEE_CFG); + + if (enable) { + ocp_data |= EEE_RX_EN | EEE_TX_EN; + config |= EEE10_EN; + } else { + ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); + config &= ~EEE10_EN; + } + + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); + ocp_reg_write(tp, OCP_EEE_CFG, config); +} + static void r8153_hw_phy_cfg(struct r8152 *tp) { u32 ocp_data; u16 data; - if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || - tp->version == RTL_VER_05) - ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + /* disable ALDPS before updating the PHY parameters */ + r8153_aldps_en(tp, false); - data = r8152_mdio_read(tp, MII_BMCR); - if (data & BMCR_PDOWN) { - data &= ~BMCR_PDOWN; - r8152_mdio_write(tp, MII_BMCR, data); - } + /* disable EEE before updating the PHY parameters */ + r8153_eee_en(tp, false); + ocp_reg_write(tp, OCP_EEE_ADV, 0); if (tp->version == RTL_VER_03) { data = ocp_reg_read(tp, OCP_EEE_CFG); @@ -2764,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) sram_write(tp, SRAM_10M_AMP1, 0x00af); sram_write(tp, SRAM_10M_AMP2, 0x0208); + r8153_eee_en(tp, true); + ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); + + r8153_aldps_en(tp, true); + r8152b_enable_fc(tp); + set_bit(PHY_RESET, &tp->flags); } @@ -2779,8 +2865,6 @@ static void r8153_first_init(struct r8152 *tp) ocp_data &= ~RCR_ACPT_ALL; ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); - r8153_hw_phy_cfg(tp); - rtl8152_nic_reset(tp); rtl_reset_bmu(tp); @@ -2887,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); } -static void r8153_aldps_en(struct r8152 *tp, bool enable) -{ - u16 data; - - data = ocp_reg_read(tp, OCP_POWER_CFG); - if (enable) { - data |= EN_ALDPS; - ocp_reg_write(tp, OCP_POWER_CFG, data); - } else { - data &= ~EN_ALDPS; - ocp_reg_write(tp, OCP_POWER_CFG, data); - msleep(20); - } -} - static void rtl8153_disable(struct r8152 *tp) { r8153_aldps_en(tp, false); @@ -2916,7 +2985,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) u16 bmcr, anar, gbcr; int ret = 0; - cancel_delayed_work_sync(&tp->schedule); anar = r8152_mdio_read(tp, MII_ADVERTISE); anar &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL); @@ -2976,7 +3044,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) bmcr = BMCR_ANENABLE | BMCR_ANRESTART; } - if (test_bit(PHY_RESET, &tp->flags)) + if (test_and_clear_bit(PHY_RESET, &tp->flags)) bmcr |= BMCR_RESET; if (tp->mii.supports_gmii) @@ -2985,7 +3053,7 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) r8152_mdio_write(tp, MII_ADVERTISE, anar); r8152_mdio_write(tp, MII_BMCR, bmcr); - if (test_and_clear_bit(PHY_RESET, &tp->flags)) { + if (bmcr & BMCR_RESET) { int i; for (i = 0; i < 50; i++) { @@ -3135,15 +3203,33 @@ static void rtl_work_func_t(struct work_struct *work) netif_carrier_ok(tp->netdev)) napi_schedule(&tp->napi); - if (test_and_clear_bit(PHY_RESET, &tp->flags)) - rtl_phy_reset(tp); - mutex_unlock(&tp->control); out1: usb_autopm_put_interface(tp->intf); } +static void rtl_hw_phy_work_func_t(struct work_struct *work) +{ + struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work); + + if (test_bit(RTL8152_UNPLUG, &tp->flags)) + return; + + if (usb_autopm_get_interface(tp->intf) < 0) + return; + + mutex_lock(&tp->control); + + tp->rtl_ops.hw_phy_cfg(tp); + + rtl8152_set_speed(tp, tp->autoneg, tp->speed, tp->duplex); + + mutex_unlock(&tp->control); + + usb_autopm_put_interface(tp->intf); +} + #ifdef CONFIG_PM_SLEEP static int rtl_notifier(struct notifier_block *nb, unsigned long action, void *data) @@ -3180,8 +3266,6 @@ static int rtl8152_open(struct net_device *netdev) if (res) goto out; - netif_carrier_off(netdev); - res = usb_autopm_get_interface(tp->intf); if (res < 0) { free_all_mem(tp); @@ -3192,9 +3276,6 @@ static int rtl8152_open(struct net_device *netdev) tp->rtl_ops.up(tp); - rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(netdev); netif_start_queue(netdev); set_bit(WORK_ENABLE, &tp->flags); @@ -3255,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev) return res; } -static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg) -{ - ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev); - ocp_reg_write(tp, OCP_EEE_DATA, reg); - ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev); -} - -static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg) -{ - u16 data; - - r8152_mmd_indirect(tp, dev, reg); - data = ocp_reg_read(tp, OCP_EEE_DATA); - ocp_reg_write(tp, OCP_EEE_AR, 0x0000); - - return data; -} - -static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data) -{ - r8152_mmd_indirect(tp, dev, reg); - ocp_reg_write(tp, OCP_EEE_DATA, data); - ocp_reg_write(tp, OCP_EEE_AR, 0x0000); -} - -static void r8152_eee_en(struct r8152 *tp, bool enable) -{ - u16 config1, config2, config3; - u32 ocp_data; - - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask; - config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2); - config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask; - - if (enable) { - ocp_data |= EEE_RX_EN | EEE_TX_EN; - config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN; - config1 |= sd_rise_time(1); - config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN; - config3 |= fast_snr(42); - } else { - ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); - config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | - RX_QUIET_EN); - config1 |= sd_rise_time(7); - config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN); - config3 |= fast_snr(511); - } - - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - ocp_reg_write(tp, OCP_EEE_CONFIG1, config1); - ocp_reg_write(tp, OCP_EEE_CONFIG2, config2); - ocp_reg_write(tp, OCP_EEE_CONFIG3, config3); -} - -static void r8152b_enable_eee(struct r8152 *tp) -{ - r8152_eee_en(tp, true); - r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX); -} - -static void r8153_eee_en(struct r8152 *tp, bool enable) -{ - u32 ocp_data; - u16 config; - - ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR); - config = ocp_reg_read(tp, OCP_EEE_CFG); - - if (enable) { - ocp_data |= EEE_RX_EN | EEE_TX_EN; - config |= EEE10_EN; - } else { - ocp_data &= ~(EEE_RX_EN | EEE_TX_EN); - config &= ~EEE10_EN; - } - - ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data); - ocp_reg_write(tp, OCP_EEE_CFG, config); -} - -static void r8153_enable_eee(struct r8152 *tp) -{ - r8153_eee_en(tp, true); - ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX); -} - -static void r8152b_enable_fc(struct r8152 *tp) -{ - u16 anar; - - anar = r8152_mdio_read(tp, MII_ADVERTISE); - anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; - r8152_mdio_write(tp, MII_ADVERTISE, anar); -} - static void rtl_tally_reset(struct r8152 *tp) { u32 ocp_data; @@ -3364,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp) static void r8152b_init(struct r8152 *tp) { u32 ocp_data; + u16 data; if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + r8152_aldps_en(tp, false); if (tp->version == RTL_VER_01) { @@ -3389,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp) SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK; ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data); - r8152b_enable_eee(tp); - r8152_aldps_en(tp, true); - r8152b_enable_fc(tp); rtl_tally_reset(tp); /* enable rx aggregation */ @@ -3403,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp) static void r8153_init(struct r8152 *tp) { u32 ocp_data; + u16 data; int i; if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; - r8153_aldps_en(tp, false); r8153_u1u2en(tp, false); for (i = 0; i < 500; i++) { @@ -3425,6 +3413,23 @@ static void r8153_init(struct r8152 *tp) msleep(20); } + if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 || + tp->version == RTL_VER_05) + ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L); + + data = r8152_mdio_read(tp, MII_BMCR); + if (data & BMCR_PDOWN) { + data &= ~BMCR_PDOWN; + r8152_mdio_write(tp, MII_BMCR, data); + } + + for (i = 0; i < 500; i++) { + ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK; + if (ocp_data == PHY_STAT_LAN_ON) + break; + msleep(20); + } + usb_disable_lpm(tp->udev); r8153_u2p3en(tp, false); @@ -3492,9 +3497,6 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0); ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0); - r8153_enable_eee(tp); - r8153_aldps_en(tp, true); - r8152b_enable_fc(tp); rtl_tally_reset(tp); r8153_u2p3en(tp, true); } @@ -3618,6 +3620,7 @@ static int rtl8152_resume(struct usb_interface *intf) if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) { tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); netif_device_attach(tp->netdev); } @@ -3632,10 +3635,6 @@ static int rtl8152_resume(struct usb_interface *intf) napi_enable(&tp->napi); } else { tp->rtl_ops.up(tp); - rtl8152_set_speed(tp, AUTONEG_ENABLE, - tp->mii.supports_gmii ? - SPEED_1000 : SPEED_100, - DUPLEX_FULL); netif_carrier_off(tp->netdev); set_bit(WORK_ENABLE, &tp->flags); } @@ -3765,6 +3764,11 @@ static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) mutex_lock(&tp->control); ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex); + if (!ret) { + tp->autoneg = cmd->autoneg; + tp->speed = cmd->speed; + tp->duplex = cmd->duplex; + } mutex_unlock(&tp->control); @@ -4222,6 +4226,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->eee_get = r8152_get_eee; ops->eee_set = r8152_set_eee; ops->in_nway = rtl8152_in_nway; + ops->hw_phy_cfg = r8152b_hw_phy_cfg; ops->autosuspend_en = rtl_runtime_suspend_enable; break; @@ -4238,6 +4243,7 @@ static int rtl_ops_init(struct r8152 *tp) ops->eee_get = r8153_get_eee; ops->eee_set = r8153_set_eee; ops->in_nway = rtl8153_in_nway; + ops->hw_phy_cfg = r8153_hw_phy_cfg; ops->autosuspend_en = rtl8153_runtime_enable; break; @@ -4285,6 +4291,7 @@ static int rtl8152_probe(struct usb_interface *intf, mutex_init(&tp->control); INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t); + INIT_DELAYED_WORK(&tp->hw_phy_work, rtl_hw_phy_work_func_t); netdev->netdev_ops = &rtl8152_netdev_ops; netdev->watchdog_timeo = RTL8152_TX_TIMEOUT; @@ -4324,9 +4331,14 @@ static int rtl8152_probe(struct usb_interface *intf, break; } + tp->autoneg = AUTONEG_ENABLE; + tp->speed = tp->mii.supports_gmii ? SPEED_1000 : SPEED_100; + tp->duplex = DUPLEX_FULL; + intf->needs_remote_wakeup = 1; tp->rtl_ops.init(tp); + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); set_ethernet_addr(tp); usb_set_intfdata(intf, tp); @@ -4372,6 +4384,7 @@ static void rtl8152_disconnect(struct usb_interface *intf) netif_napi_del(&tp->napi); unregister_netdev(tp->netdev); + cancel_delayed_work_sync(&tp->hw_phy_work); tp->rtl_ops.unload(tp); free_netdev(tp->netdev); } diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 524a47a28..4f4f71b29 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -428,7 +428,11 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval); goto halt_fail_and_release; } - memcpy(net->dev_addr, bp, ETH_ALEN); + + if (bp[0] & 0x02) + eth_hw_addr_random(net); + else + ether_addr_copy(net->dev_addr, bp); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 6086a0163..3bfb59209 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -42,7 +42,6 @@ #include <linux/mii.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> -#include <linux/usb/cdc.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/pm_runtime.h> @@ -1972,143 +1971,6 @@ out: return err; } -int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr, - struct usb_interface *intf, - u8 *buffer, - int buflen) -{ - /* duplicates are ignored */ - struct usb_cdc_union_desc *union_header = NULL; - - /* duplicates are not tolerated */ - struct usb_cdc_header_desc *header = NULL; - struct usb_cdc_ether_desc *ether = NULL; - struct usb_cdc_mdlm_detail_desc *detail = NULL; - struct usb_cdc_mdlm_desc *desc = NULL; - - unsigned int elength; - int cnt = 0; - - memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header)); - hdr->phonet_magic_present = false; - while (buflen > 0) { - elength = buffer[0]; - if (!elength) { - dev_err(&intf->dev, "skipping garbage byte\n"); - elength = 1; - goto next_desc; - } - if (buffer[1] != USB_DT_CS_INTERFACE) { - dev_err(&intf->dev, "skipping garbage\n"); - goto next_desc; - } - - switch (buffer[2]) { - case USB_CDC_UNION_TYPE: /* we've found it */ - if (elength < sizeof(struct usb_cdc_union_desc)) - goto next_desc; - if (union_header) { - dev_err(&intf->dev, "More than one union descriptor, skipping ...\n"); - goto next_desc; - } - union_header = (struct usb_cdc_union_desc *)buffer; - break; - case USB_CDC_COUNTRY_TYPE: - if (elength < sizeof(struct usb_cdc_country_functional_desc)) - goto next_desc; - hdr->usb_cdc_country_functional_desc = - (struct usb_cdc_country_functional_desc *)buffer; - break; - case USB_CDC_HEADER_TYPE: - if (elength != sizeof(struct usb_cdc_header_desc)) - goto next_desc; - if (header) - return -EINVAL; - header = (struct usb_cdc_header_desc *)buffer; - break; - case USB_CDC_ACM_TYPE: - if (elength < sizeof(struct usb_cdc_acm_descriptor)) - goto next_desc; - hdr->usb_cdc_acm_descriptor = - (struct usb_cdc_acm_descriptor *)buffer; - break; - case USB_CDC_ETHERNET_TYPE: - if (elength != sizeof(struct usb_cdc_ether_desc)) - goto next_desc; - if (ether) - return -EINVAL; - ether = (struct usb_cdc_ether_desc *)buffer; - break; - case USB_CDC_CALL_MANAGEMENT_TYPE: - if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor)) - goto next_desc; - hdr->usb_cdc_call_mgmt_descriptor = - (struct usb_cdc_call_mgmt_descriptor *)buffer; - break; - case USB_CDC_DMM_TYPE: - if (elength < sizeof(struct usb_cdc_dmm_desc)) - goto next_desc; - hdr->usb_cdc_dmm_desc = - (struct usb_cdc_dmm_desc *)buffer; - break; - case USB_CDC_MDLM_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_desc *)) - goto next_desc; - if (desc) - return -EINVAL; - desc = (struct usb_cdc_mdlm_desc *)buffer; - break; - case USB_CDC_MDLM_DETAIL_TYPE: - if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *)) - goto next_desc; - if (detail) - return -EINVAL; - detail = (struct usb_cdc_mdlm_detail_desc *)buffer; - break; - case USB_CDC_NCM_TYPE: - if (elength < sizeof(struct usb_cdc_ncm_desc)) - goto next_desc; - hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer; - break; - case USB_CDC_MBIM_TYPE: - if (elength < sizeof(struct usb_cdc_mbim_desc)) - goto next_desc; - - hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer; - break; - case USB_CDC_MBIM_EXTENDED_TYPE: - if (elength < sizeof(struct usb_cdc_mbim_extended_desc)) - break; - hdr->usb_cdc_mbim_extended_desc = - (struct usb_cdc_mbim_extended_desc *)buffer; - break; - case CDC_PHONET_MAGIC_NUMBER: - hdr->phonet_magic_present = true; - break; - default: - /* - * there are LOTS more CDC descriptors that - * could legitimately be found here. - */ - dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n", - buffer[2], elength); - goto next_desc; - } - cnt++; -next_desc: - buflen -= elength; - buffer += elength; - } - hdr->usb_cdc_union_desc = union_header; - hdr->usb_cdc_header_desc = header; - hdr->usb_cdc_mdlm_detail_desc = detail; - hdr->usb_cdc_mdlm_desc = desc; - hdr->usb_cdc_ether_desc = ether; - return cnt; -} - -EXPORT_SYMBOL(cdc_parse_cdc_header); - /* * The function can't be called inside suspend/resume callback, * otherwise deadlock will be caused. diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index e0638e556..1b5f531ee 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -144,8 +144,10 @@ struct virtnet_info { /* Control VQ buffers: protected by the rtnl lock */ struct virtio_net_ctrl_hdr ctrl_hdr; virtio_net_ctrl_ack ctrl_status; + struct virtio_net_ctrl_mq ctrl_mq; u8 ctrl_promisc; u8 ctrl_allmulti; + u16 ctrl_vid; /* Ethtool settings */ u8 duplex; @@ -479,53 +481,21 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, stats->rx_packets++; u64_stats_update_end(&stats->rx_syncp); - if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { - pr_debug("Needs csum!\n"); - if (!skb_partial_csum_set(skb, - virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start), - virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset))) - goto frame_err; - } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { + if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (virtio_net_hdr_to_skb(skb, &hdr->hdr, + virtio_is_little_endian(vi->vdev))) { + net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", + dev->name, hdr->hdr.gso_type, + hdr->hdr.gso_size); + goto frame_err; } skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i\n", ntohs(skb->protocol), skb->len, skb->pkt_type); - if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { - pr_debug("GSO!\n"); - switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { - case VIRTIO_NET_HDR_GSO_TCPV4: - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; - break; - case VIRTIO_NET_HDR_GSO_UDP: - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - break; - case VIRTIO_NET_HDR_GSO_TCPV6: - skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; - break; - default: - net_warn_ratelimited("%s: bad gso type %u.\n", - dev->name, hdr->hdr.gso_type); - goto frame_err; - } - - if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) - skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; - - skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev, - hdr->hdr.gso_size); - if (skb_shinfo(skb)->gso_size == 0) { - net_warn_ratelimited("%s: zero gso size.\n", dev->name); - goto frame_err; - } - - /* Header must be checked, and gso_segs computed. */ - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; - skb_shinfo(skb)->gso_segs = 0; - } - napi_gro_receive(&rq->napi, skb); return; @@ -868,35 +838,9 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) else hdr = skb_vnet_hdr(skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) { - hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev, - skb_checksum_start_offset(skb)); - hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev, - skb->csum_offset); - } else { - hdr->hdr.flags = 0; - hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; - } - - if (skb_is_gso(skb)) { - hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb)); - hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev, - skb_shinfo(skb)->gso_size); - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; - else - BUG(); - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) - hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; - } else { - hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; - hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; - } + if (virtio_net_hdr_from_skb(skb, &hdr->hdr, + virtio_is_little_endian(vi->vdev))) + BUG(); if (vi->mergeable_rx_bufs) hdr->num_buffers = 0; @@ -1116,14 +1060,13 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi) static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) { struct scatterlist sg; - struct virtio_net_ctrl_mq s; struct net_device *dev = vi->dev; if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) return 0; - s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); - sg_init_one(&sg, &s, sizeof(s)); + vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { @@ -1230,7 +1173,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - sg_init_one(&sg, &vid, sizeof(vid)); + vi->ctrl_vid = vid; + sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg)) @@ -1244,7 +1188,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; - sg_init_one(&sg, &vid, sizeof(vid)); + vi->ctrl_vid = vid; + sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg)) @@ -1780,6 +1725,7 @@ static int virtnet_probe(struct virtio_device *vdev) struct net_device *dev; struct virtnet_info *vi; u16 max_queue_pairs; + int mtu; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", @@ -1896,6 +1842,14 @@ static int virtnet_probe(struct virtio_device *vdev) if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) vi->has_cvq = true; + if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) { + mtu = virtio_cread16(vdev, + offsetof(struct virtio_net_config, + mtu)); + if (virtnet_change_mtu(dev, mtu)) + __virtio_clear_bit(vdev, VIRTIO_NET_F_MTU); + } + if (vi->any_header_sg) dev->needed_headroom = vi->hdr_len; @@ -2067,6 +2021,7 @@ static unsigned int features[] = { VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, VIRTIO_NET_F_CTRL_MAC_ADDR, VIRTIO_F_ANY_LAYOUT, + VIRTIO_NET_F_MTU, }; static struct virtio_driver virtio_net_driver = { diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index 880f5098e..8cdbb63d1 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -2,7 +2,7 @@ # # Linux driver for VMware's vmxnet3 ethernet NIC. # -# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved. +# Copyright (C) 2007-2016, VMware, Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the @@ -21,7 +21,7 @@ # The full GNU General Public License is included in this distribution in # the file called "COPYING". # -# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> +# Maintained by: pv-drivers@vmware.com # # ################################################################################ diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h index 969c751ee..db9f1fde3 100644 --- a/drivers/net/vmxnet3/upt1_defs.h +++ b/drivers/net/vmxnet3/upt1_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,7 +20,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * Maintained by: pv-drivers@vmware.com * */ diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h index 72ba8ae7f..c3a316461 100644 --- a/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2015, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,7 +20,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * Maintained by: pv-drivers@vmware.com * */ @@ -76,7 +76,12 @@ enum { VMXNET3_CMD_UPDATE_IML, VMXNET3_CMD_UPDATE_PMCFG, VMXNET3_CMD_UPDATE_FEATURE, + VMXNET3_CMD_RESERVED1, VMXNET3_CMD_LOAD_PLUGIN, + VMXNET3_CMD_RESERVED2, + VMXNET3_CMD_RESERVED3, + VMXNET3_CMD_SET_COALESCE, + VMXNET3_CMD_REGISTER_MEMREGS, VMXNET3_CMD_FIRST_GET = 0xF00D0000, VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, @@ -87,7 +92,10 @@ enum { VMXNET3_CMD_GET_DID_LO, VMXNET3_CMD_GET_DID_HI, VMXNET3_CMD_GET_DEV_EXTRA_INFO, - VMXNET3_CMD_GET_CONF_INTR + VMXNET3_CMD_GET_CONF_INTR, + VMXNET3_CMD_GET_RESERVED1, + VMXNET3_CMD_GET_TXDATA_DESC_SIZE, + VMXNET3_CMD_GET_COALESCE, }; /* @@ -169,6 +177,8 @@ struct Vmxnet3_TxDataDesc { u8 data[VMXNET3_HDR_COPY_SIZE]; }; +typedef u8 Vmxnet3_RxDataDesc; + #define VMXNET3_TCD_GEN_SHIFT 31 #define VMXNET3_TCD_GEN_SIZE 1 #define VMXNET3_TCD_TXIDX_SHIFT 0 @@ -373,6 +383,14 @@ union Vmxnet3_GenericDesc { #define VMXNET3_RING_SIZE_ALIGN 32 #define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) +/* Tx Data Ring buffer size must be a multiple of 64 */ +#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64 +#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1) + +/* Rx Data Ring buffer size must be a multiple of 64 */ +#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64 +#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1) + /* Max ring size */ #define VMXNET3_TX_RING_MAX_SIZE 4096 #define VMXNET3_TC_RING_MAX_SIZE 4096 @@ -380,6 +398,11 @@ union Vmxnet3_GenericDesc { #define VMXNET3_RX_RING2_MAX_SIZE 4096 #define VMXNET3_RC_RING_MAX_SIZE 8192 +#define VMXNET3_TXDATA_DESC_MIN_SIZE 128 +#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048 + +#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048 + /* a list of reasons for queue stop */ enum { @@ -466,7 +489,9 @@ struct Vmxnet3_TxQueueConf { __le32 compRingSize; /* # of comp desc */ __le32 ddLen; /* size of driver data */ u8 intrIdx; - u8 _pad[7]; + u8 _pad1[1]; + __le16 txDataRingDescSize; + u8 _pad2[4]; }; @@ -474,12 +499,14 @@ struct Vmxnet3_RxQueueConf { __le64 rxRingBasePA[2]; __le64 compRingBasePA; __le64 ddPA; /* driver data */ - __le64 reserved; + __le64 rxDataRingBasePA; __le32 rxRingSize[2]; /* # of rx desc */ __le32 compRingSize; /* # of rx comp desc */ __le32 ddLen; /* size of driver data */ u8 intrIdx; - u8 _pad[7]; + u8 _pad1[1]; + __le16 rxDataRingDescSize; /* size of rx data ring buffer */ + u8 _pad2[4]; }; @@ -609,6 +636,63 @@ struct Vmxnet3_RxQueueDesc { u8 __pad[88]; /* 128 aligned */ }; +struct Vmxnet3_SetPolling { + u8 enablePolling; +}; + +#define VMXNET3_COAL_STATIC_MAX_DEPTH 128 +#define VMXNET3_COAL_RBC_MIN_RATE 100 +#define VMXNET3_COAL_RBC_MAX_RATE 100000 + +enum Vmxnet3_CoalesceMode { + VMXNET3_COALESCE_DISABLED = 0, + VMXNET3_COALESCE_ADAPT = 1, + VMXNET3_COALESCE_STATIC = 2, + VMXNET3_COALESCE_RBC = 3 +}; + +struct Vmxnet3_CoalesceRbc { + u32 rbc_rate; +}; + +struct Vmxnet3_CoalesceStatic { + u32 tx_depth; + u32 tx_comp_depth; + u32 rx_depth; +}; + +struct Vmxnet3_CoalesceScheme { + enum Vmxnet3_CoalesceMode coalMode; + union { + struct Vmxnet3_CoalesceRbc coalRbc; + struct Vmxnet3_CoalesceStatic coalStatic; + } coalPara; +}; + +struct Vmxnet3_MemoryRegion { + __le64 startPA; + __le32 length; + __le16 txQueueBits; + __le16 rxQueueBits; +}; + +#define MAX_MEMORY_REGION_PER_QUEUE 16 +#define MAX_MEMORY_REGION_PER_DEVICE 256 + +struct Vmxnet3_MemRegs { + __le16 numRegs; + __le16 pad[3]; + struct Vmxnet3_MemoryRegion memRegs[1]; +}; + +/* If the command data <= 16 bytes, use the shared memory directly. + * otherwise, use variable length configuration descriptor. + */ +union Vmxnet3_CmdInfo { + struct Vmxnet3_VariableLenConfDesc varConf; + struct Vmxnet3_SetPolling setPolling; + __le64 data[2]; +}; struct Vmxnet3_DSDevRead { /* read-only region for device, read by dev in response to a SET cmd */ @@ -627,7 +711,14 @@ struct Vmxnet3_DriverShared { __le32 pad; struct Vmxnet3_DSDevRead devRead; __le32 ecr; - __le32 reserved[5]; + __le32 reserved; + union { + __le32 reserved1[4]; + union Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of + * executing the relevant + * command + */ + } cu; }; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 08885bc8d..4244b9d44 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,7 +20,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * Maintained by: pv-drivers@vmware.com * */ @@ -435,8 +435,8 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, tq->tx_ring.base = NULL; } if (tq->data_ring.base) { - dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc), + dma_free_coherent(&adapter->pdev->dev, + tq->data_ring.size * tq->txdata_desc_size, tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base = NULL; } @@ -478,8 +478,8 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; tq->tx_ring.gen = VMXNET3_INIT_GEN; - memset(tq->data_ring.base, 0, tq->data_ring.size * - sizeof(struct Vmxnet3_TxDataDesc)); + memset(tq->data_ring.base, 0, + tq->data_ring.size * tq->txdata_desc_size); /* reset the tx comp ring contents to 0 and reset comp ring states */ memset(tq->comp_ring.base, 0, tq->comp_ring.size * @@ -514,10 +514,10 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, } tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, - tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc), + tq->data_ring.size * tq->txdata_desc_size, &tq->data_ring.basePA, GFP_KERNEL); if (!tq->data_ring.base) { - netdev_err(adapter->netdev, "failed to allocate data ring\n"); + netdev_err(adapter->netdev, "failed to allocate tx data ring\n"); goto err; } @@ -689,7 +689,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, if (ctx->copy_size) { ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + tq->tx_ring.next2fill * - sizeof(struct Vmxnet3_TxDataDesc)); + tq->txdata_desc_size); ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size); ctx->sop_txd->dword[3] = 0; @@ -873,8 +873,9 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, ctx->eth_ip_hdr_size = 0; ctx->l4_hdr_size = 0; /* copy as much as allowed */ - ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE - , skb_headlen(skb)); + ctx->copy_size = min_t(unsigned int, + tq->txdata_desc_size, + skb_headlen(skb)); } if (skb->len <= VMXNET3_HDR_COPY_SIZE) @@ -885,7 +886,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, goto err; } - if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { + if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { tq->stats.oversized_hdr++; ctx->copy_size = 0; return 0; @@ -913,7 +914,9 @@ vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, { struct Vmxnet3_TxDataDesc *tdd; - tdd = tq->data_ring.base + tq->tx_ring.next2fill; + tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + + tq->tx_ring.next2fill * + tq->txdata_desc_size); memcpy(tdd->data, skb->data, ctx->copy_size); netdev_dbg(adapter->netdev, @@ -1283,9 +1286,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, */ break; } - BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2); + BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && + rcd->rqID != rq->dataRingQid); idx = rcd->rxdIdx; - ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1; + ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID); ring = rq->rx_ring + ring_idx; vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, &rxCmdDesc); @@ -1300,8 +1304,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, } if (rcd->sop) { /* first buf of the pkt */ + bool rxDataRingUsed; + u16 len; + BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || - rcd->rqID != rq->qid); + (rcd->rqID != rq->qid && + rcd->rqID != rq->dataRingQid)); BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); BUG_ON(ctx->skb != NULL || rbi->skb == NULL); @@ -1317,8 +1325,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = false; ctx->skb = rbi->skb; + + rxDataRingUsed = + VMXNET3_RX_DATA_RING(adapter, rcd->rqID); + len = rxDataRingUsed ? rcd->len : rbi->len; new_skb = netdev_alloc_skb_ip_align(adapter->netdev, - rbi->len); + len); if (new_skb == NULL) { /* Skb allocation failed, do not handover this * skb to stack. Reuse it. Drop the existing pkt @@ -1329,25 +1341,48 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, skip_page_frags = true; goto rcd_done; } - new_dma_addr = dma_map_single(&adapter->pdev->dev, - new_skb->data, rbi->len, - PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&adapter->pdev->dev, - new_dma_addr)) { - dev_kfree_skb(new_skb); - /* Skb allocation failed, do not handover this - * skb to stack. Reuse it. Drop the existing pkt - */ - rq->stats.rx_buf_alloc_failure++; - ctx->skb = NULL; - rq->stats.drop_total++; - skip_page_frags = true; - goto rcd_done; - } - dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr, - rbi->len, - PCI_DMA_FROMDEVICE); + if (rxDataRingUsed) { + size_t sz; + + BUG_ON(rcd->len > rq->data_ring.desc_size); + + ctx->skb = new_skb; + sz = rcd->rxdIdx * rq->data_ring.desc_size; + memcpy(new_skb->data, + &rq->data_ring.base[sz], rcd->len); + } else { + ctx->skb = rbi->skb; + + new_dma_addr = + dma_map_single(&adapter->pdev->dev, + new_skb->data, rbi->len, + PCI_DMA_FROMDEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + new_dma_addr)) { + dev_kfree_skb(new_skb); + /* Skb allocation failed, do not + * handover this skb to stack. Reuse + * it. Drop the existing pkt. + */ + rq->stats.rx_buf_alloc_failure++; + ctx->skb = NULL; + rq->stats.drop_total++; + skip_page_frags = true; + goto rcd_done; + } + + dma_unmap_single(&adapter->pdev->dev, + rbi->dma_addr, + rbi->len, + PCI_DMA_FROMDEVICE); + + /* Immediate refill */ + rbi->skb = new_skb; + rbi->dma_addr = new_dma_addr; + rxd->addr = cpu_to_le64(rbi->dma_addr); + rxd->len = rbi->len; + } #ifdef VMXNET3_RSS if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE && @@ -1358,12 +1393,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, #endif skb_put(ctx->skb, rcd->len); - /* Immediate refill */ - rbi->skb = new_skb; - rbi->dma_addr = new_dma_addr; - rxd->addr = cpu_to_le64(rbi->dma_addr); - rxd->len = rbi->len; - if (adapter->version == 2 && + if (VMXNET3_VERSION_GE_2(adapter) && rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { struct Vmxnet3_RxCompDescExt *rcdlro; rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; @@ -1589,6 +1619,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, rq->buf_info[i] = NULL; } + if (rq->data_ring.base) { + dma_free_coherent(&adapter->pdev->dev, + rq->rx_ring[0].size * rq->data_ring.desc_size, + rq->data_ring.base, rq->data_ring.basePA); + rq->data_ring.base = NULL; + } + if (rq->comp_ring.base) { dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc), @@ -1604,6 +1641,25 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, } } +void +vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) { + struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; + + if (rq->data_ring.base) { + dma_free_coherent(&adapter->pdev->dev, + (rq->rx_ring[0].size * + rq->data_ring.desc_size), + rq->data_ring.base, + rq->data_ring.basePA); + rq->data_ring.base = NULL; + rq->data_ring.desc_size = 0; + } + } +} static int vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, @@ -1697,6 +1753,22 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) } } + if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) { + sz = rq->rx_ring[0].size * rq->data_ring.desc_size; + rq->data_ring.base = + dma_alloc_coherent(&adapter->pdev->dev, sz, + &rq->data_ring.basePA, + GFP_KERNEL); + if (!rq->data_ring.base) { + netdev_err(adapter->netdev, + "rx data ring will be disabled\n"); + adapter->rxdataring_enabled = false; + } + } else { + rq->data_ring.base = NULL; + rq->data_ring.desc_size = 0; + } + sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->comp_ring.basePA, @@ -1729,6 +1801,8 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) { int i, err = 0; + adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) { err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter); if (unlikely(err)) { @@ -1738,6 +1812,10 @@ vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter) goto err_out; } } + + if (!adapter->rxdataring_enabled) + vmxnet3_rq_destroy_all_rxdataring(adapter); + return err; err_out: vmxnet3_rq_destroy_all(adapter); @@ -2045,10 +2123,9 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; rq->qid = i; rq->qid2 = i + adapter->num_rx_queues; + rq->dataRingQid = i + 2 * adapter->num_rx_queues; } - - /* init our intr settings */ for (i = 0; i < intr->num_intrs; i++) intr->mod_levels[i] = UPT1_IML_ADAPTIVE; @@ -2336,6 +2413,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) tqc->ddPA = cpu_to_le64(tq->buf_info_pa); tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); + tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); tqc->ddLen = cpu_to_le32( sizeof(struct vmxnet3_tx_buf_info) * @@ -2360,6 +2438,12 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) (rqc->rxRingSize[0] + rqc->rxRingSize[1])); rqc->intrIdx = rq->comp_ring.intr_idx; + if (VMXNET3_VERSION_GE_3(adapter)) { + rqc->rxDataRingBasePA = + cpu_to_le64(rq->data_ring.basePA); + rqc->rxDataRingDescSize = + cpu_to_le16(rq->data_ring.desc_size); + } } #ifdef VMXNET3_RSS @@ -2409,6 +2493,32 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) /* the rest are already zeroed */ } +static void +vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_DriverShared *shared = adapter->shared; + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; + unsigned long flags; + + if (!VMXNET3_VERSION_GE_3(adapter)) + return; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + cmdInfo->varConf.confVer = 1; + cmdInfo->varConf.confLen = + cpu_to_le32(sizeof(*adapter->coal_conf)); + cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); + + if (adapter->default_coal_mode) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_COALESCE); + } else { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_SET_COALESCE); + } + + spin_unlock_irqrestore(&adapter->cmd_lock, flags); +} int vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) @@ -2458,6 +2568,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) goto activate_err; } + vmxnet3_init_coalesce(adapter); + for (i = 0; i < adapter->num_rx_queues; i++) { VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN, @@ -2689,7 +2801,8 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) int vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, - u32 rx_ring_size, u32 rx_ring2_size) + u32 rx_ring_size, u32 rx_ring2_size, + u16 txdata_desc_size, u16 rxdata_desc_size) { int err = 0, i; @@ -2698,6 +2811,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, tq->tx_ring.size = tx_ring_size; tq->data_ring.size = tx_ring_size; tq->comp_ring.size = tx_ring_size; + tq->txdata_desc_size = txdata_desc_size; tq->shared = &adapter->tqd_start[i].ctrl; tq->stopped = true; tq->adapter = adapter; @@ -2714,12 +2828,15 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, adapter->rx_queue[0].rx_ring[0].size = rx_ring_size; adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size; vmxnet3_adjust_rx_ring_size(adapter); + + adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter); for (i = 0; i < adapter->num_rx_queues; i++) { struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; /* qid and qid2 for rx queues will be assigned later when num * of rx queues is finalized after allocating intrs */ rq->shared = &adapter->rqd_start[i].ctrl; rq->adapter = adapter; + rq->data_ring.desc_size = rxdata_desc_size; err = vmxnet3_rq_create(rq, adapter); if (err) { if (i == 0) { @@ -2737,6 +2854,10 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, } } } + + if (!adapter->rxdataring_enabled) + vmxnet3_rq_destroy_all_rxdataring(adapter); + return err; queue_err: vmxnet3_tq_destroy_all(adapter); @@ -2754,9 +2875,35 @@ vmxnet3_open(struct net_device *netdev) for (i = 0; i < adapter->num_tx_queues; i++) spin_lock_init(&adapter->tx_queue[i].tx_lock); - err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, + if (VMXNET3_VERSION_GE_3(adapter)) { + unsigned long flags; + u16 txdata_desc_size; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_TXDATA_DESC_SIZE); + txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter, + VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + + if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) || + (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) || + (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) { + adapter->txdata_desc_size = + sizeof(struct Vmxnet3_TxDataDesc); + } else { + adapter->txdata_desc_size = txdata_desc_size; + } + } else { + adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc); + } + + err = vmxnet3_create_queues(adapter, + adapter->tx_ring_size, adapter->rx_ring_size, - adapter->rx_ring2_size); + adapter->rx_ring2_size, + adapter->txdata_desc_size, + adapter->rxdata_desc_size); if (err) goto queue_err; @@ -3200,12 +3347,21 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_alloc_pci; ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); - if (ver & 2) { - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 2); - adapter->version = 2; - } else if (ver & 1) { - VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); - adapter->version = 1; + if (ver & (1 << VMXNET3_REV_3)) { + VMXNET3_WRITE_BAR1_REG(adapter, + VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_3); + adapter->version = VMXNET3_REV_3 + 1; + } else if (ver & (1 << VMXNET3_REV_2)) { + VMXNET3_WRITE_BAR1_REG(adapter, + VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_2); + adapter->version = VMXNET3_REV_2 + 1; + } else if (ver & (1 << VMXNET3_REV_1)) { + VMXNET3_WRITE_BAR1_REG(adapter, + VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_1); + adapter->version = VMXNET3_REV_1 + 1; } else { dev_err(&pdev->dev, "Incompatible h/w version (0x%x) for adapter\n", ver); @@ -3224,9 +3380,28 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_ver; } + if (VMXNET3_VERSION_GE_3(adapter)) { + adapter->coal_conf = + dma_alloc_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_CoalesceScheme) + , + &adapter->coal_conf_pa, + GFP_KERNEL); + if (!adapter->coal_conf) { + err = -ENOMEM; + goto err_ver; + } + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; + adapter->default_coal_mode = true; + } + SET_NETDEV_DEV(netdev, &pdev->dev); vmxnet3_declare_features(adapter, dma64); + adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? + VMXNET3_DEF_RXDATA_DESC_SIZE : 0; + if (adapter->num_tx_queues == adapter->num_rx_queues) adapter->share_intr = VMXNET3_INTR_BUDDYSHARE; else @@ -3283,6 +3458,11 @@ vmxnet3_probe_device(struct pci_dev *pdev, return 0; err_register: + if (VMXNET3_VERSION_GE_3(adapter)) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_CoalesceScheme), + adapter->coal_conf, adapter->coal_conf_pa); + } vmxnet3_free_intr_resources(adapter); err_ver: vmxnet3_free_pci_resources(adapter); @@ -3333,6 +3513,11 @@ vmxnet3_remove_device(struct pci_dev *pdev) vmxnet3_free_intr_resources(adapter); vmxnet3_free_pci_resources(adapter); + if (VMXNET3_VERSION_GE_3(adapter)) { + dma_free_coherent(&adapter->pdev->dev, + sizeof(struct Vmxnet3_CoalesceScheme), + adapter->coal_conf, adapter->coal_conf_pa); + } #ifdef VMXNET3_RSS dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf), adapter->rss_conf, adapter->rss_conf_pa); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 9ba11d737..aabc6ef36 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,7 +20,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * Maintained by: pv-drivers@vmware.com * */ @@ -396,8 +396,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); buf[j++] = tq->data_ring.size; - /* transmit data ring buffer size */ - buf[j++] = VMXNET3_HDR_COPY_SIZE; + buf[j++] = tq->txdata_desc_size; buf[j++] = VMXNET3_GET_ADDR_LO(tq->comp_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_HI(tq->comp_ring.basePA); @@ -431,11 +430,10 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) buf[j++] = rq->rx_ring[1].next2comp; buf[j++] = rq->rx_ring[1].gen; - /* receive data ring */ - buf[j++] = 0; - buf[j++] = 0; - buf[j++] = 0; - buf[j++] = 0; + buf[j++] = VMXNET3_GET_ADDR_LO(rq->data_ring.basePA); + buf[j++] = VMXNET3_GET_ADDR_HI(rq->data_ring.basePA); + buf[j++] = rq->rx_ring[0].size; + buf[j++] = rq->data_ring.desc_size; buf[j++] = VMXNET3_GET_ADDR_LO(rq->comp_ring.basePA); buf[j++] = VMXNET3_GET_ADDR_HI(rq->comp_ring.basePA); @@ -504,12 +502,14 @@ vmxnet3_get_ringparam(struct net_device *netdev, param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; - param->rx_mini_max_pending = 0; + param->rx_mini_max_pending = VMXNET3_VERSION_GE_3(adapter) ? + VMXNET3_RXDATA_DESC_MAX_SIZE : 0; param->rx_jumbo_max_pending = VMXNET3_RX_RING2_MAX_SIZE; param->rx_pending = adapter->rx_ring_size; param->tx_pending = adapter->tx_ring_size; - param->rx_mini_pending = 0; + param->rx_mini_pending = VMXNET3_VERSION_GE_3(adapter) ? + adapter->rxdata_desc_size : 0; param->rx_jumbo_pending = adapter->rx_ring2_size; } @@ -520,6 +520,7 @@ vmxnet3_set_ringparam(struct net_device *netdev, { struct vmxnet3_adapter *adapter = netdev_priv(netdev); u32 new_tx_ring_size, new_rx_ring_size, new_rx_ring2_size; + u16 new_rxdata_desc_size; u32 sz; int err = 0; @@ -542,6 +543,15 @@ vmxnet3_set_ringparam(struct net_device *netdev, return -EOPNOTSUPP; } + if (VMXNET3_VERSION_GE_3(adapter)) { + if (param->rx_mini_pending < 0 || + param->rx_mini_pending > VMXNET3_RXDATA_DESC_MAX_SIZE) { + return -EINVAL; + } + } else if (param->rx_mini_pending != 0) { + return -EINVAL; + } + /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & ~VMXNET3_RING_SIZE_MASK; @@ -568,9 +578,19 @@ vmxnet3_set_ringparam(struct net_device *netdev, new_rx_ring2_size = min_t(u32, new_rx_ring2_size, VMXNET3_RX_RING2_MAX_SIZE); + /* rx data ring buffer size has to be a multiple of + * VMXNET3_RXDATA_DESC_SIZE_ALIGN + */ + new_rxdata_desc_size = + (param->rx_mini_pending + VMXNET3_RXDATA_DESC_SIZE_MASK) & + ~VMXNET3_RXDATA_DESC_SIZE_MASK; + new_rxdata_desc_size = min_t(u16, new_rxdata_desc_size, + VMXNET3_RXDATA_DESC_MAX_SIZE); + if (new_tx_ring_size == adapter->tx_ring_size && new_rx_ring_size == adapter->rx_ring_size && - new_rx_ring2_size == adapter->rx_ring2_size) { + new_rx_ring2_size == adapter->rx_ring2_size && + new_rxdata_desc_size == adapter->rxdata_desc_size) { return 0; } @@ -591,8 +611,9 @@ vmxnet3_set_ringparam(struct net_device *netdev, vmxnet3_rq_destroy_all(adapter); err = vmxnet3_create_queues(adapter, new_tx_ring_size, - new_rx_ring_size, new_rx_ring2_size); - + new_rx_ring_size, new_rx_ring2_size, + adapter->txdata_desc_size, + new_rxdata_desc_size); if (err) { /* failed, most likely because of OOM, try default * size */ @@ -601,10 +622,15 @@ vmxnet3_set_ringparam(struct net_device *netdev, new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; new_rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE; + new_rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ? + VMXNET3_DEF_RXDATA_DESC_SIZE : 0; + err = vmxnet3_create_queues(adapter, new_tx_ring_size, new_rx_ring_size, - new_rx_ring2_size); + new_rx_ring2_size, + adapter->txdata_desc_size, + new_rxdata_desc_size); if (err) { netdev_err(netdev, "failed to create queues " "with default sizes. Closing it\n"); @@ -620,6 +646,7 @@ vmxnet3_set_ringparam(struct net_device *netdev, adapter->tx_ring_size = new_tx_ring_size; adapter->rx_ring_size = new_rx_ring_size; adapter->rx_ring2_size = new_rx_ring2_size; + adapter->rxdata_desc_size = new_rxdata_desc_size; out: clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); @@ -698,6 +725,162 @@ vmxnet3_set_rss(struct net_device *netdev, const u32 *p, const u8 *key, } #endif +static int +vmxnet3_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (!VMXNET3_VERSION_GE_3(adapter)) + return -EOPNOTSUPP; + + switch (adapter->coal_conf->coalMode) { + case VMXNET3_COALESCE_DISABLED: + /* struct ethtool_coalesce is already initialized to 0 */ + break; + case VMXNET3_COALESCE_ADAPT: + ec->use_adaptive_rx_coalesce = true; + break; + case VMXNET3_COALESCE_STATIC: + ec->tx_max_coalesced_frames = + adapter->coal_conf->coalPara.coalStatic.tx_comp_depth; + ec->rx_max_coalesced_frames = + adapter->coal_conf->coalPara.coalStatic.rx_depth; + break; + case VMXNET3_COALESCE_RBC: { + u32 rbc_rate; + + rbc_rate = adapter->coal_conf->coalPara.coalRbc.rbc_rate; + ec->rx_coalesce_usecs = VMXNET3_COAL_RBC_USECS(rbc_rate); + } + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int +vmxnet3_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_DriverShared *shared = adapter->shared; + union Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo; + unsigned long flags; + + if (!VMXNET3_VERSION_GE_3(adapter)) + return -EOPNOTSUPP; + + if (ec->rx_coalesce_usecs_irq || + ec->rx_max_coalesced_frames_irq || + ec->tx_coalesce_usecs || + ec->tx_coalesce_usecs_irq || + ec->tx_max_coalesced_frames_irq || + ec->stats_block_coalesce_usecs || + ec->use_adaptive_tx_coalesce || + ec->pkt_rate_low || + ec->rx_coalesce_usecs_low || + ec->rx_max_coalesced_frames_low || + ec->tx_coalesce_usecs_low || + ec->tx_max_coalesced_frames_low || + ec->pkt_rate_high || + ec->rx_coalesce_usecs_high || + ec->rx_max_coalesced_frames_high || + ec->tx_coalesce_usecs_high || + ec->tx_max_coalesced_frames_high || + ec->rate_sample_interval) { + return -EINVAL; + } + + if ((ec->rx_coalesce_usecs == 0) && + (ec->use_adaptive_rx_coalesce == 0) && + (ec->tx_max_coalesced_frames == 0) && + (ec->rx_max_coalesced_frames == 0)) { + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED; + goto done; + } + + if (ec->rx_coalesce_usecs != 0) { + u32 rbc_rate; + + if ((ec->use_adaptive_rx_coalesce != 0) || + (ec->tx_max_coalesced_frames != 0) || + (ec->rx_max_coalesced_frames != 0)) { + return -EINVAL; + } + + rbc_rate = VMXNET3_COAL_RBC_RATE(ec->rx_coalesce_usecs); + if (rbc_rate < VMXNET3_COAL_RBC_MIN_RATE || + rbc_rate > VMXNET3_COAL_RBC_MAX_RATE) { + return -EINVAL; + } + + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_RBC; + adapter->coal_conf->coalPara.coalRbc.rbc_rate = rbc_rate; + goto done; + } + + if (ec->use_adaptive_rx_coalesce != 0) { + if ((ec->rx_coalesce_usecs != 0) || + (ec->tx_max_coalesced_frames != 0) || + (ec->rx_max_coalesced_frames != 0)) { + return -EINVAL; + } + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_ADAPT; + goto done; + } + + if ((ec->tx_max_coalesced_frames != 0) || + (ec->rx_max_coalesced_frames != 0)) { + if ((ec->rx_coalesce_usecs != 0) || + (ec->use_adaptive_rx_coalesce != 0)) { + return -EINVAL; + } + + if ((ec->tx_max_coalesced_frames > + VMXNET3_COAL_STATIC_MAX_DEPTH) || + (ec->rx_max_coalesced_frames > + VMXNET3_COAL_STATIC_MAX_DEPTH)) { + return -EINVAL; + } + + memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf)); + adapter->coal_conf->coalMode = VMXNET3_COALESCE_STATIC; + + adapter->coal_conf->coalPara.coalStatic.tx_comp_depth = + (ec->tx_max_coalesced_frames ? + ec->tx_max_coalesced_frames : + VMXNET3_COAL_STATIC_DEFAULT_DEPTH); + + adapter->coal_conf->coalPara.coalStatic.rx_depth = + (ec->rx_max_coalesced_frames ? + ec->rx_max_coalesced_frames : + VMXNET3_COAL_STATIC_DEFAULT_DEPTH); + + adapter->coal_conf->coalPara.coalStatic.tx_depth = + VMXNET3_COAL_STATIC_DEFAULT_DEPTH; + goto done; + } + +done: + adapter->default_coal_mode = false; + if (netif_running(netdev)) { + spin_lock_irqsave(&adapter->cmd_lock, flags); + cmdInfo->varConf.confVer = 1; + cmdInfo->varConf.confLen = + cpu_to_le32(sizeof(*adapter->coal_conf)); + cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_SET_COALESCE); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } + + return 0; +} + static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_settings = vmxnet3_get_settings, .get_drvinfo = vmxnet3_get_drvinfo, @@ -706,6 +889,8 @@ static const struct ethtool_ops vmxnet3_ethtool_ops = { .get_wol = vmxnet3_get_wol, .set_wol = vmxnet3_set_wol, .get_link = ethtool_op_get_link, + .get_coalesce = vmxnet3_get_coalesce, + .set_coalesce = vmxnet3_set_coalesce, .get_strings = vmxnet3_get_strings, .get_sset_count = vmxnet3_get_sset_count, .get_ethtool_stats = vmxnet3_get_ethtool_stats, diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 3d2b64e63..7dc37a090 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2016, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -20,7 +20,7 @@ * The full GNU General Public License is included in this distribution in * the file called "COPYING". * - * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * Maintained by: pv-drivers@vmware.com * */ @@ -69,16 +69,20 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.a.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040800 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040a00 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ #define VMXNET3_RSS #endif +#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */ +#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */ +#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */ + /* * Capabilities */ @@ -237,6 +241,7 @@ struct vmxnet3_tx_queue { int num_stop; /* # of times the queue is * stopped */ int qid; + u16 txdata_desc_size; } __attribute__((__aligned__(SMP_CACHE_BYTES))); enum vmxnet3_rx_buf_type { @@ -267,15 +272,23 @@ struct vmxnet3_rq_driver_stats { u64 rx_buf_alloc_failure; }; +struct vmxnet3_rx_data_ring { + Vmxnet3_RxDataDesc *base; + dma_addr_t basePA; + u16 desc_size; +}; + struct vmxnet3_rx_queue { char name[IFNAMSIZ + 8]; /* To identify interrupt */ struct vmxnet3_adapter *adapter; struct napi_struct napi; struct vmxnet3_cmd_ring rx_ring[2]; + struct vmxnet3_rx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_rx_ctx rx_ctx; u32 qid; /* rqID in RCD for buffer from 1st ring */ u32 qid2; /* rqID in RCD for buffer from 2nd ring */ + u32 dataRingQid; /* rqID in RCD for buffer from data ring */ struct vmxnet3_rx_buf_info *buf_info[2]; dma_addr_t buf_info_pa; struct Vmxnet3_RxQueueCtrl *shared; @@ -345,6 +358,7 @@ struct vmxnet3_adapter { int rx_buf_per_pkt; /* only apply to the 1st ring */ dma_addr_t shared_pa; dma_addr_t queue_desc_pa; + dma_addr_t coal_conf_pa; /* Wake-on-LAN */ u32 wol; @@ -359,12 +373,21 @@ struct vmxnet3_adapter { u32 rx_ring_size; u32 rx_ring2_size; + /* Size of buffer in the data ring */ + u16 txdata_desc_size; + u16 rxdata_desc_size; + + bool rxdataring_enabled; + struct work_struct work; unsigned long state; /* VMXNET3_STATE_BIT_xxx */ int share_intr; + struct Vmxnet3_CoalesceScheme *coal_conf; + bool default_coal_mode; + dma_addr_t adapter_pa; dma_addr_t pm_conf_pa; dma_addr_t rss_conf_pa; @@ -387,14 +410,34 @@ struct vmxnet3_adapter { #define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) #define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) +#define VMXNET3_VERSION_GE_2(adapter) \ + (adapter->version >= VMXNET3_REV_2 + 1) +#define VMXNET3_VERSION_GE_3(adapter) \ + (adapter->version >= VMXNET3_REV_3 + 1) + /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ #define VMXNET3_DEF_TX_RING_SIZE 512 #define VMXNET3_DEF_RX_RING_SIZE 256 #define VMXNET3_DEF_RX_RING2_SIZE 128 +#define VMXNET3_DEF_RXDATA_DESC_SIZE 128 + #define VMXNET3_MAX_ETH_HDR_SIZE 22 #define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) +#define VMXNET3_GET_RING_IDX(adapter, rqID) \ + ((rqID >= adapter->num_rx_queues && \ + rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \ + +#define VMXNET3_RX_DATA_RING(adapter, rqID) \ + (rqID >= 2 * adapter->num_rx_queues && \ + rqID < 3 * adapter->num_rx_queues) \ + +#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64 + +#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs) +#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate) + int vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); @@ -418,7 +461,8 @@ vmxnet3_set_features(struct net_device *netdev, netdev_features_t features); int vmxnet3_create_queues(struct vmxnet3_adapter *adapter, - u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); + u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size, + u16 txdata_desc_size, u16 rxdata_desc_size); void vmxnet3_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 8bd8c7e1e..1ce742032 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -35,6 +35,7 @@ #include <net/route.h> #include <net/addrconf.h> #include <net/l3mdev.h> +#include <net/fib_rules.h> #define RT_FL_TOS(oldflp4) \ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)) @@ -42,9 +43,14 @@ #define DRV_NAME "vrf" #define DRV_VERSION "1.0" +#define FIB_RULE_PREF 1000 /* default preference for FIB rules */ +static bool add_fib_rules = true; + struct net_vrf { struct rtable __rcu *rth; + struct rtable __rcu *rth_local; struct rt6_info __rcu *rt6; + struct rt6_info __rcu *rt6_local; u32 tb_id; }; @@ -54,9 +60,20 @@ struct pcpu_dstats { u64 tx_drps; u64 rx_pkts; u64 rx_bytes; + u64 rx_drps; struct u64_stats_sync syncp; }; +static void vrf_rx_stats(struct net_device *dev, int len) +{ + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + dstats->rx_pkts++; + dstats->rx_bytes += len; + u64_stats_update_end(&dstats->syncp); +} + static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) { vrf_dev->stats.tx_errors++; @@ -91,6 +108,34 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev, return stats; } +/* Local traffic destined to local address. Reinsert the packet to rx + * path, similar to loopback handling. + */ +static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, + struct dst_entry *dst) +{ + int len = skb->len; + + skb_orphan(skb); + + skb_dst_set(skb, dst); + skb_dst_force(skb); + + /* set pkt_type to avoid skb hitting packet taps twice - + * once on Tx and again in Rx processing + */ + skb->pkt_type = PACKET_LOOPBACK; + + skb->protocol = eth_type_trans(skb, dev); + + if (likely(netif_rx(skb) == NET_RX_SUCCESS)) + vrf_rx_stats(dev, len); + else + this_cpu_inc(dev->dstats->rx_drps); + + return NETDEV_TX_OK; +} + #if IS_ENABLED(CONFIG_IPV6) static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, struct net_device *dev) @@ -117,8 +162,51 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, goto err; skb_dst_drop(skb); + + /* if dst.dev is loopback or the VRF device again this is locally + * originated traffic destined to a local address. Short circuit + * to Rx path using our local dst + */ + if (dst->dev == net->loopback_dev || dst->dev == dev) { + struct net_vrf *vrf = netdev_priv(dev); + struct rt6_info *rt6_local; + + /* release looked up dst and use cached local dst */ + dst_release(dst); + + rcu_read_lock(); + + rt6_local = rcu_dereference(vrf->rt6_local); + if (unlikely(!rt6_local)) { + rcu_read_unlock(); + goto err; + } + + /* Ordering issue: cached local dst is created on newlink + * before the IPv6 initialization. Using the local dst + * requires rt6i_idev to be set so make sure it is. + */ + if (unlikely(!rt6_local->rt6i_idev)) { + rt6_local->rt6i_idev = in6_dev_get(dev); + if (!rt6_local->rt6i_idev) { + rcu_read_unlock(); + goto err; + } + } + + dst = &rt6_local->dst; + dst_hold(dst); + + rcu_read_unlock(); + + return vrf_local_xmit(skb, dev, &rt6_local->dst); + } + skb_dst_set(skb, dst); + /* strip the ethernet header added for pass through VRF device */ + __skb_pull(skb, skb_network_offset(skb)); + ret = ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(ret))) dev->stats.tx_errors++; @@ -139,29 +227,6 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, } #endif -static int vrf_send_v4_prep(struct sk_buff *skb, struct flowi4 *fl4, - struct net_device *vrf_dev) -{ - struct rtable *rt; - int err = 1; - - rt = ip_route_output_flow(dev_net(vrf_dev), fl4, NULL); - if (IS_ERR(rt)) - goto out; - - /* TO-DO: what about broadcast ? */ - if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { - ip_rt_put(rt); - goto out; - } - - skb_dst_drop(skb); - skb_dst_set(skb, &rt->dst); - err = 0; -out: - return err; -} - static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { @@ -176,9 +241,51 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, FLOWI_FLAG_SKIP_NH_OIF, .daddr = ip4h->daddr, }; + struct net *net = dev_net(vrf_dev); + struct rtable *rt; - if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) + rt = ip_route_output_flow(net, &fl4, NULL); + if (IS_ERR(rt)) + goto err; + + if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { + ip_rt_put(rt); goto err; + } + + skb_dst_drop(skb); + + /* if dst.dev is loopback or the VRF device again this is locally + * originated traffic destined to a local address. Short circuit + * to Rx path using our local dst + */ + if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) { + struct net_vrf *vrf = netdev_priv(vrf_dev); + struct rtable *rth_local; + struct dst_entry *dst = NULL; + + ip_rt_put(rt); + + rcu_read_lock(); + + rth_local = rcu_dereference(vrf->rth_local); + if (likely(rth_local)) { + dst = &rth_local->dst; + dst_hold(dst); + } + + rcu_read_unlock(); + + if (unlikely(!dst)) + goto err; + + return vrf_local_xmit(skb, vrf_dev, dst); + } + + skb_dst_set(skb, &rt->dst); + + /* strip the ethernet header added for pass through VRF device */ + __skb_pull(skb, skb_network_offset(skb)); if (!ip4h->saddr) { ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, @@ -200,9 +307,6 @@ err: static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) { - /* strip the ethernet header added for pass through VRF device */ - __skb_pull(skb, skb_network_offset(skb)); - switch (skb->protocol) { case htons(ETH_P_IP): return vrf_process_v4_outbound(skb, dev); @@ -274,45 +378,92 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb) } /* holding rtnl */ -static void vrf_rt6_release(struct net_vrf *vrf) +static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); + struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local); + struct net *net = dev_net(dev); + struct dst_entry *dst; - rcu_assign_pointer(vrf->rt6, NULL); + RCU_INIT_POINTER(vrf->rt6, NULL); + RCU_INIT_POINTER(vrf->rt6_local, NULL); + synchronize_rcu(); + + /* move dev in dst's to loopback so this VRF device can be deleted + * - based on dst_ifdown + */ + if (rt6) { + dst = &rt6->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + } - if (rt6) - dst_release(&rt6->dst); + if (rt6_local) { + if (rt6_local->rt6i_idev) + in6_dev_put(rt6_local->rt6i_idev); + + dst = &rt6_local->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + } } static int vrf_rt6_create(struct net_device *dev) { + int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE; struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); struct fib6_table *rt6i_table; - struct rt6_info *rt6; + struct rt6_info *rt6, *rt6_local; int rc = -ENOMEM; + /* IPv6 can be CONFIG enabled and then disabled runtime */ + if (!ipv6_mod_enabled()) + return 0; + rt6i_table = fib6_new_table(net, vrf->tb_id); if (!rt6i_table) goto out; - rt6 = ip6_dst_alloc(net, dev, - DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE); + /* create a dst for routing packets out a VRF device */ + rt6 = ip6_dst_alloc(net, dev, flags); if (!rt6) goto out; dst_hold(&rt6->dst); rt6->rt6i_table = rt6i_table; - rt6->dst.output = vrf_output6; + rt6->dst.output = vrf_output6; + + /* create a dst for local routing - packets sent locally + * to local address via the VRF device as a loopback + */ + rt6_local = ip6_dst_alloc(net, dev, flags); + if (!rt6_local) { + dst_release(&rt6->dst); + goto out; + } + + dst_hold(&rt6_local->dst); + + rt6_local->rt6i_idev = in6_dev_get(dev); + rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL; + rt6_local->rt6i_table = rt6i_table; + rt6_local->dst.input = ip6_input; + rcu_assign_pointer(vrf->rt6, rt6); + rcu_assign_pointer(vrf->rt6_local, rt6_local); rc = 0; out: return rc; } #else -static void vrf_rt6_release(struct net_vrf *vrf) +static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { } @@ -381,32 +532,66 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb) } /* holding rtnl */ -static void vrf_rtable_release(struct net_vrf *vrf) +static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) { struct rtable *rth = rtnl_dereference(vrf->rth); + struct rtable *rth_local = rtnl_dereference(vrf->rth_local); + struct net *net = dev_net(dev); + struct dst_entry *dst; - rcu_assign_pointer(vrf->rth, NULL); + RCU_INIT_POINTER(vrf->rth, NULL); + RCU_INIT_POINTER(vrf->rth_local, NULL); + synchronize_rcu(); + + /* move dev in dst's to loopback so this VRF device can be deleted + * - based on dst_ifdown + */ + if (rth) { + dst = &rth->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + } - if (rth) - dst_release(&rth->dst); + if (rth_local) { + dst = &rth_local->dst; + dev_put(dst->dev); + dst->dev = net->loopback_dev; + dev_hold(dst->dev); + dst_release(dst); + } } static int vrf_rtable_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct rtable *rth; + struct rtable *rth, *rth_local; if (!fib_new_table(dev_net(dev), vrf->tb_id)) return -ENOMEM; + /* create a dst for routing packets out through a VRF device */ rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0); if (!rth) return -ENOMEM; - rth->dst.output = vrf_output; + /* create a dst for local ingress routing - packets sent locally + * to local address via the VRF device as a loopback + */ + rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0); + if (!rth_local) { + dst_release(&rth->dst); + return -ENOMEM; + } + + rth->dst.output = vrf_output; rth->rt_table_id = vrf->tb_id; + rth_local->rt_table_id = vrf->tb_id; + rcu_assign_pointer(vrf->rth, rth); + rcu_assign_pointer(vrf->rth_local, rth_local); return 0; } @@ -477,8 +662,8 @@ static void vrf_dev_uninit(struct net_device *dev) struct net_device *port_dev; struct list_head *iter; - vrf_rtable_release(vrf); - vrf_rt6_release(vrf); + vrf_rtable_release(dev, vrf); + vrf_rt6_release(dev, vrf); netdev_for_each_lower_dev(dev, port_dev, iter) vrf_del_slave(dev, port_dev); @@ -504,10 +689,16 @@ static int vrf_dev_init(struct net_device *dev) dev->flags = IFF_MASTER | IFF_NOARP; + /* MTU is irrelevant for VRF device; set to 64k similar to lo */ + dev->mtu = 64 * 1024; + + /* similarly, oper state is irrelevant; set to up to avoid confusion */ + dev->operstate = IF_OPER_UP; + netdev_lockdep_set_classes(dev); return 0; out_rth: - vrf_rtable_release(vrf); + vrf_rtable_release(dev, vrf); out_stats: free_percpu(dev->dstats); dev->dstats = NULL; @@ -588,6 +779,25 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4) return rc; } +static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + return 0; +} + +static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook, + struct sk_buff *skb, + struct net_device *dev) +{ + struct net *net = dev_net(dev); + + nf_reset(skb); + + if (NF_HOOK(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) < 0) + skb = NULL; /* kfree_skb(skb) handled by nf code */ + + return skb; +} + #if IS_ENABLED(CONFIG_IPV6) /* neighbor handling is done with actual device; do not want * to flip skb->dev for those ndisc packets. This really fails @@ -623,11 +833,78 @@ out: return rc; } +static struct rt6_info *vrf_ip6_route_lookup(struct net *net, + const struct net_device *dev, + struct flowi6 *fl6, + int ifindex, + int flags) +{ + struct net_vrf *vrf = netdev_priv(dev); + struct fib6_table *table = NULL; + struct rt6_info *rt6; + + rcu_read_lock(); + + /* fib6_table does not have a refcnt and can not be freed */ + rt6 = rcu_dereference(vrf->rt6); + if (likely(rt6)) + table = rt6->rt6i_table; + + rcu_read_unlock(); + + if (!table) + return NULL; + + return ip6_pol_route(net, table, ifindex, fl6, flags); +} + +static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, + int ifindex) +{ + const struct ipv6hdr *iph = ipv6_hdr(skb); + struct flowi6 fl6 = { + .daddr = iph->daddr, + .saddr = iph->saddr, + .flowlabel = ip6_flowinfo(iph), + .flowi6_mark = skb->mark, + .flowi6_proto = iph->nexthdr, + .flowi6_iif = ifindex, + }; + struct net *net = dev_net(vrf_dev); + struct rt6_info *rt6; + + rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, + RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); + if (unlikely(!rt6)) + return; + + if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) + return; + + skb_dst_set(skb, &rt6->dst); +} + static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { - /* if packet is NDISC keep the ingress interface */ - if (!ipv6_ndisc_frame(skb)) { + int orig_iif = skb->skb_iif; + bool need_strict; + + /* loopback traffic; do not push through packet taps again. + * Reset pkt_type for upper layers to process skb + */ + if (skb->pkt_type == PACKET_LOOPBACK) { + skb->dev = vrf_dev; + skb->skb_iif = vrf_dev->ifindex; + skb->pkt_type = PACKET_HOST; + goto out; + } + + /* if packet is NDISC or addressed to multicast or link-local + * then keep the ingress interface + */ + need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); + if (!ipv6_ndisc_frame(skb) && !need_strict) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; @@ -638,6 +915,11 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, IP6CB(skb)->flags |= IP6SKB_L3SLAVE; } + if (need_strict) + vrf_ip6_input_dst(skb, vrf_dev, orig_iif); + + skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev); +out: return skb; } @@ -655,10 +937,20 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev, skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; + /* loopback traffic; do not push through packet taps again. + * Reset pkt_type for upper layers to process skb + */ + if (skb->pkt_type == PACKET_LOOPBACK) { + skb->pkt_type = PACKET_HOST; + goto out; + } + skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); skb_pull(skb, skb->mac_len); + skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev); +out: return skb; } @@ -679,13 +971,37 @@ static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev, #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, - const struct flowi6 *fl6) + struct flowi6 *fl6) { + bool need_strict = rt6_need_strict(&fl6->daddr); + struct net_vrf *vrf = netdev_priv(dev); + struct net *net = dev_net(dev); struct dst_entry *dst = NULL; + struct rt6_info *rt; - if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { - struct net_vrf *vrf = netdev_priv(dev); - struct rt6_info *rt; + /* send to link-local or multicast address */ + if (need_strict) { + int flags = RT6_LOOKUP_F_IFACE; + + /* VRF device does not have a link-local address and + * sending packets to link-local or mcast addresses over + * a VRF device does not make sense + */ + if (fl6->flowi6_oif == dev->ifindex) { + struct dst_entry *dst = &net->ipv6.ip6_null_entry->dst; + + dst_hold(dst); + return dst; + } + + if (!ipv6_addr_any(&fl6->saddr)) + flags |= RT6_LOOKUP_F_HAS_SADDR; + + rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags); + if (rt) + dst = &rt->dst; + + } else if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) { rcu_read_lock(); @@ -698,8 +1014,52 @@ static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev, rcu_read_unlock(); } + /* make sure oif is set to VRF device for lookup */ + if (!need_strict) + fl6->flowi6_oif = dev->ifindex; + return dst; } + +/* called under rcu_read_lock */ +static int vrf_get_saddr6(struct net_device *dev, const struct sock *sk, + struct flowi6 *fl6) +{ + struct net *net = dev_net(dev); + struct dst_entry *dst; + struct rt6_info *rt; + int err; + + if (rt6_need_strict(&fl6->daddr)) { + rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, + RT6_LOOKUP_F_IFACE); + if (unlikely(!rt)) + return 0; + + dst = &rt->dst; + } else { + __u8 flags = fl6->flowi6_flags; + + fl6->flowi6_flags |= FLOWI_FLAG_L3MDEV_SRC; + fl6->flowi6_flags |= FLOWI_FLAG_SKIP_NH_OIF; + + dst = ip6_route_output(net, sk, fl6); + rt = (struct rt6_info *)dst; + + fl6->flowi6_flags = flags; + } + + err = dst->error; + if (!err) { + err = ip6_route_get_saddr(net, rt, &fl6->daddr, + sk ? inet6_sk(sk)->srcprefs : 0, + &fl6->saddr); + } + + dst_release(dst); + + return err; +} #endif static const struct l3mdev_ops vrf_l3mdev_ops = { @@ -709,6 +1069,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = { .l3mdev_l3_rcv = vrf_l3_rcv, #if IS_ENABLED(CONFIG_IPV6) .l3mdev_get_rt6_dst = vrf_get_rt6_dst, + .l3mdev_get_saddr6 = vrf_get_saddr6, #endif }; @@ -723,6 +1084,94 @@ static const struct ethtool_ops vrf_ethtool_ops = { .get_drvinfo = vrf_get_drvinfo, }; +static inline size_t vrf_fib_rule_nl_size(void) +{ + size_t sz; + + sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr)); + sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */ + sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */ + + return sz; +} + +static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) +{ + struct fib_rule_hdr *frh; + struct nlmsghdr *nlh; + struct sk_buff *skb; + int err; + + if (family == AF_INET6 && !ipv6_mod_enabled()) + return 0; + + skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0); + if (!nlh) + goto nla_put_failure; + + /* rule only needs to appear once */ + nlh->nlmsg_flags &= NLM_F_EXCL; + + frh = nlmsg_data(nlh); + memset(frh, 0, sizeof(*frh)); + frh->family = family; + frh->action = FR_ACT_TO_TBL; + + if (nla_put_u32(skb, FRA_L3MDEV, 1)) + goto nla_put_failure; + + if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF)) + goto nla_put_failure; + + nlmsg_end(skb, nlh); + + /* fib_nl_{new,del}rule handling looks for net from skb->sk */ + skb->sk = dev_net(dev)->rtnl; + if (add_it) { + err = fib_nl_newrule(skb, nlh); + if (err == -EEXIST) + err = 0; + } else { + err = fib_nl_delrule(skb, nlh); + if (err == -ENOENT) + err = 0; + } + nlmsg_free(skb); + + return err; + +nla_put_failure: + nlmsg_free(skb); + + return -EMSGSIZE; +} + +static int vrf_add_fib_rules(const struct net_device *dev) +{ + int err; + + err = vrf_fib_rule(dev, AF_INET, true); + if (err < 0) + goto out_err; + + err = vrf_fib_rule(dev, AF_INET6, true); + if (err < 0) + goto ipv6_err; + + return 0; + +ipv6_err: + vrf_fib_rule(dev, AF_INET, false); + +out_err: + netdev_err(dev, "Failed to add FIB rules.\n"); + return err; +} + static void vrf_setup(struct net_device *dev) { ether_setup(dev); @@ -741,6 +1190,20 @@ static void vrf_setup(struct net_device *dev) /* don't allow vrf devices to change network namespaces. */ dev->features |= NETIF_F_NETNS_LOCAL; + + /* does not make sense for a VLAN to be added to a vrf device */ + dev->features |= NETIF_F_VLAN_CHALLENGED; + + /* enable offload features */ + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM; + dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; + + dev->hw_features = dev->features; + dev->hw_enc_features = dev->features; + + /* default to no qdisc; user can add if desired */ + dev->priv_flags |= IFF_NO_QUEUE; } static int vrf_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -763,6 +1226,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct net_vrf *vrf = netdev_priv(dev); + int err; if (!data || !data[IFLA_VRF_TABLE]) return -EINVAL; @@ -771,7 +1235,21 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, dev->priv_flags |= IFF_L3MDEV_MASTER; - return register_netdevice(dev); + err = register_netdevice(dev); + if (err) + goto out; + + if (add_fib_rules) { + err = vrf_add_fib_rules(dev); + if (err) { + unregister_netdevice(dev); + goto out; + } + add_fib_rules = false; + } + +out: + return err; } static size_t vrf_nl_getsize(const struct net_device *dev) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index b3b9db68f..6e6583205 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -11,32 +11,18 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> -#include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> -#include <linux/skbuff.h> -#include <linux/rculist.h> -#include <linux/netdevice.h> -#include <linux/in.h> -#include <linux/ip.h> #include <linux/udp.h> #include <linux/igmp.h> -#include <linux/etherdevice.h> #include <linux/if_ether.h> -#include <linux/if_vlan.h> -#include <linux/hash.h> #include <linux/ethtool.h> #include <net/arp.h> #include <net/ndisc.h> #include <net/ip.h> -#include <net/ip_tunnels.h> #include <net/icmp.h> -#include <net/udp.h> -#include <net/udp_tunnel.h> #include <net/rtnetlink.h> -#include <net/route.h> -#include <net/dsfield.h> #include <net/inet_ecn.h> #include <net/net_namespace.h> #include <net/netns/generic.h> @@ -44,12 +30,9 @@ #include <net/protocol.h> #if IS_ENABLED(CONFIG_IPV6) -#include <net/ipv6.h> -#include <net/addrconf.h> #include <net/ip6_tunnel.h> #include <net/ip6_checksum.h> #endif -#include <net/dst_metadata.h> #define VXLAN_VERSION "0.1" @@ -619,42 +602,6 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); } -/* Notify netdevs that UDP port started listening */ -static void vxlan_notify_add_rx_port(struct vxlan_sock *vs) -{ - struct net_device *dev; - struct sock *sk = vs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = vxlan_get_sk_family(vs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_add_vxlan_port) - dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, - port); - } - rcu_read_unlock(); -} - -/* Notify netdevs that UDP port is no more listening */ -static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) -{ - struct net_device *dev; - struct sock *sk = vs->sock->sk; - struct net *net = sock_net(sk); - sa_family_t sa_family = vxlan_get_sk_family(vs); - __be16 port = inet_sk(sk)->inet_sport; - - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { - if (dev->netdev_ops->ndo_del_vxlan_port) - dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family, - port); - } - rcu_read_unlock(); -} - /* Add new entry to forwarding table -- assumes lock held */ static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, @@ -1050,7 +997,10 @@ static bool __vxlan_sock_release_prep(struct vxlan_sock *vs) vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id); spin_lock(&vn->sock_lock); hlist_del_rcu(&vs->hlist); - vxlan_notify_del_rx_port(vs); + udp_tunnel_notify_del_rx_port(vs->sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); spin_unlock(&vn->sock_lock); return true; @@ -1861,7 +1811,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, fl4.flowi4_mark = skb->mark; fl4.flowi4_proto = IPPROTO_UDP; fl4.daddr = daddr; - fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; + fl4.saddr = *saddr; rt = ip_route_output_key(vxlan->net, &fl4); if (!IS_ERR(rt)) { @@ -1897,7 +1847,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.daddr = *daddr; - fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; + fl6.saddr = *saddr; fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label); fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; @@ -1970,7 +1920,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct rtable *rt = NULL; const struct iphdr *old_iph; union vxlan_addr *dst; - union vxlan_addr remote_ip; + union vxlan_addr remote_ip, local_ip; + union vxlan_addr *src; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; @@ -1988,6 +1939,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = rdst->remote_vni; dst = &rdst->remote_ip; + src = &vxlan->cfg.saddr; dst_cache = &rdst->dst_cache; } else { if (!info) { @@ -1998,11 +1950,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; vni = vxlan_tun_id_to_vni(info->key.tun_id); remote_ip.sa.sa_family = ip_tunnel_info_af(info); - if (remote_ip.sa.sa_family == AF_INET) + if (remote_ip.sa.sa_family == AF_INET) { remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; - else + local_ip.sin.sin_addr.s_addr = info->key.u.ipv4.src; + } else { remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; + local_ip.sin6.sin6_addr = info->key.u.ipv6.src; + } dst = &remote_ip; + src = &local_ip; dst_cache = &info->dst_cache; } @@ -2042,15 +1998,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (dst->sa.sa_family == AF_INET) { - __be32 saddr; - if (!vxlan->vn4_sock) goto drop; sk = vxlan->vn4_sock->sock->sk; rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - dst->sin.sin_addr.s_addr, &saddr, + dst->sin.sin_addr.s_addr, + &src->sin.sin_addr.s_addr, dst_cache, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", @@ -2067,7 +2022,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } /* Bypass encapsulation if the destination is local */ - if (rt->rt_flags & RTCF_LOCAL && + if (!info && rt->rt_flags & RTCF_LOCAL && !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2093,13 +2048,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (err < 0) goto xmit_tx_error; - udp_tunnel_xmit_skb(rt, sk, skb, saddr, + udp_tunnel_xmit_skb(rt, sk, skb, src->sin.sin_addr.s_addr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; - struct in6_addr saddr; u32 rt6i_flags; if (!vxlan->vn6_sock) @@ -2108,7 +2062,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - label, &dst->sin6.sin6_addr, &saddr, + label, &dst->sin6.sin6_addr, + &src->sin6.sin6_addr, dst_cache, info); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", @@ -2127,7 +2082,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, /* Bypass encapsulation if the destination is local */ rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags; - if (rt6i_flags & RTF_LOCAL && + if (!info && rt6i_flags & RTF_LOCAL && !(rt6i_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) { struct vxlan_dev *dst_vxlan; @@ -2154,7 +2109,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } udp_tunnel6_xmit_skb(ndst, sk, skb, dev, - &saddr, &dst->sin6.sin6_addr, tos, ttl, + &src->sin6.sin6_addr, + &dst->sin6.sin6_addr, tos, ttl, label, src_port, dst_port, !udp_sum); #endif } @@ -2525,30 +2481,24 @@ static struct device_type vxlan_type = { .name = "vxlan", }; -/* Calls the ndo_add_vxlan_port of the caller in order to +/* Calls the ndo_udp_tunnel_add of the caller in order to * supply the listening VXLAN udp ports. Callers are expected - * to implement the ndo_add_vxlan_port. + * to implement the ndo_udp_tunnel_add. */ static void vxlan_push_rx_ports(struct net_device *dev) { struct vxlan_sock *vs; struct net *net = dev_net(dev); struct vxlan_net *vn = net_generic(net, vxlan_net_id); - sa_family_t sa_family; - __be16 port; unsigned int i; - if (!dev->netdev_ops->ndo_add_vxlan_port) - return; - spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { - port = inet_sk(vs->sock->sk)->inet_sport; - sa_family = vxlan_get_sk_family(vs); - dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family, - port); - } + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) + udp_tunnel_push_rx_port(dev, vs->sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); } spin_unlock(&vn->sock_lock); } @@ -2750,7 +2700,10 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vs->hlist, vs_head(net, port)); - vxlan_notify_add_rx_port(vs); + udp_tunnel_notify_add_rx_port(sock, + (vs->flags & VXLAN_F_GPE) ? + UDP_TUNNEL_TYPE_VXLAN_GPE : + UDP_TUNNEL_TYPE_VXLAN); spin_unlock(&vn->sock_lock); /* Mark socket as an encapsulation socket. */ @@ -2829,14 +2782,15 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, struct net_device *lowerdev = NULL; if (conf->flags & VXLAN_F_GPE) { - if (conf->flags & ~VXLAN_F_ALLOWED_GPE) - return -EINVAL; /* For now, allow GPE only together with COLLECT_METADATA. * This can be relaxed later; in such case, the other side * of the PtP link will have to be provided. */ - if (!(conf->flags & VXLAN_F_COLLECT_METADATA)) + if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || + !(conf->flags & VXLAN_F_COLLECT_METADATA)) { + pr_info("unsupported combination of extensions\n"); return -EINVAL; + } vxlan_raw_setup(dev); } else { @@ -2889,6 +2843,9 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); needed_headroom = lowerdev->hard_header_len; + } else if (vxlan_addr_multicast(&dst->remote_ip)) { + pr_info("multicast destination requires interface to be specified\n"); + return -EINVAL; } if (conf->mtu) { @@ -2921,8 +2878,10 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 && tmp->cfg.dst_port == vxlan->cfg.dst_port && (tmp->flags & VXLAN_F_RCV_FLAGS) == - (vxlan->flags & VXLAN_F_RCV_FLAGS)) - return -EEXIST; + (vxlan->flags & VXLAN_F_RCV_FLAGS)) { + pr_info("duplicate VNI %u\n", be32_to_cpu(conf->vni)); + return -EEXIST; + } } dev->ethtool_ops = &vxlan_ethtool_ops; @@ -2956,7 +2915,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct vxlan_config conf; - int err; memset(&conf, 0, sizeof(conf)); @@ -3065,26 +3023,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (tb[IFLA_MTU]) conf.mtu = nla_get_u32(tb[IFLA_MTU]); - err = vxlan_dev_configure(src_net, dev, &conf); - switch (err) { - case -ENODEV: - pr_info("ifindex %d does not exist\n", conf.remote_ifindex); - break; - - case -EPERM: - pr_info("IPv6 is disabled via sysctl\n"); - break; - - case -EEXIST: - pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); - break; - - case -EINVAL: - pr_info("unsupported combination of extensions\n"); - break; - } - - return err; + return vxlan_dev_configure(src_net, dev, &conf); } static void vxlan_dellink(struct net_device *dev, struct list_head *head) @@ -3308,7 +3247,7 @@ static int vxlan_netdevice_event(struct notifier_block *unused, if (event == NETDEV_UNREGISTER) vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_OFFLOAD_PUSH_VXLAN) + else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) vxlan_push_rx_ports(dev); return NOTIFY_DONE; diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index a2fdd15f2..33ab3345d 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -280,6 +280,28 @@ config DSCC4 To compile this driver as a module, choose M here: the module will be called dscc4. +config FSL_UCC_HDLC + tristate "Freescale QUICC Engine HDLC support" + depends on HDLC + depends on QUICC_ENGINE + help + Driver for Freescale QUICC Engine HDLC controller. The driver + supports HDLC in NMSI and TDM mode. + + To compile this driver as a module, choose M here: the + module will be called fsl_ucc_hdlc. + +config SLIC_DS26522 + tristate "Slic Maxim ds26522 card support" + depends on SPI + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + help + This module initializes and configures the slic maxim card + in T1 or E1 mode. + + To compile this driver as a module, choose M here: the + module will be called slic_ds26522. + config DSCC4_PCISYNC bool "Etinc PCISYNC features" depends on DSCC4 diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index c135ef47c..73c232660 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -32,6 +32,8 @@ obj-$(CONFIG_WANXL) += wanxl.o obj-$(CONFIG_PCI200SYN) += pci200syn.o obj-$(CONFIG_PC300TOO) += pc300too.o obj-$(CONFIG_IXP4XX_HSS) += ixp4xx_hss.o +obj-$(CONFIG_FSL_UCC_HDLC) += fsl_ucc_hdlc.o +obj-$(CONFIG_SLIC_DS26522) += slic_ds26522.o clean-files := wanxlfw.inc $(obj)/wanxl.o: $(obj)/wanxlfw.inc diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c new file mode 100644 index 000000000..6f044450b --- /dev/null +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -0,0 +1,1178 @@ +/* Freescale QUICC Engine HDLC Device Driver + * + * Copyright 2016 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/hdlc.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/stddef.h> +#include <soc/fsl/qe/qe_tdm.h> +#include <uapi/linux/if_arp.h> + +#include "fsl_ucc_hdlc.h" + +#define DRV_DESC "Freescale QE UCC HDLC Driver" +#define DRV_NAME "ucc_hdlc" + +#define TDM_PPPOHT_SLIC_MAXIN +#define BROKEN_FRAME_INFO + +static struct ucc_tdm_info utdm_primary_info = { + .uf_info = { + .tsa = 0, + .cdp = 0, + .cds = 1, + .ctsp = 1, + .ctss = 1, + .revd = 0, + .urfs = 256, + .utfs = 256, + .urfet = 128, + .urfset = 192, + .utfet = 128, + .utftt = 0x40, + .ufpt = 256, + .mode = UCC_FAST_PROTOCOL_MODE_HDLC, + .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, + .tenc = UCC_FAST_TX_ENCODING_NRZ, + .renc = UCC_FAST_RX_ENCODING_NRZ, + .tcrc = UCC_FAST_16_BIT_CRC, + .synl = UCC_FAST_SYNC_LEN_NOT_USED, + }, + + .si_info = { +#ifdef TDM_PPPOHT_SLIC_MAXIN + .simr_rfsd = 1, + .simr_tfsd = 2, +#else + .simr_rfsd = 0, + .simr_tfsd = 0, +#endif + .simr_crt = 0, + .simr_sl = 0, + .simr_ce = 1, + .simr_fe = 1, + .simr_gm = 0, + }, +}; + +static struct ucc_tdm_info utdm_info[MAX_HDLC_NUM]; + +static int uhdlc_init(struct ucc_hdlc_private *priv) +{ + struct ucc_tdm_info *ut_info; + struct ucc_fast_info *uf_info; + u32 cecr_subblock; + u16 bd_status; + int ret, i; + void *bd_buffer; + dma_addr_t bd_dma_addr; + u32 riptr; + u32 tiptr; + u32 gumr; + + ut_info = priv->ut_info; + uf_info = &ut_info->uf_info; + + if (priv->tsa) { + uf_info->tsa = 1; + uf_info->ctsp = 1; + } + uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF | + UCC_HDLC_UCCE_TXB) << 16); + + ret = ucc_fast_init(uf_info, &priv->uccf); + if (ret) { + dev_err(priv->dev, "Failed to init uccf."); + return ret; + } + + priv->uf_regs = priv->uccf->uf_regs; + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Loopback mode */ + if (priv->loopback) { + dev_info(priv->dev, "Loopback Mode\n"); + gumr = ioread32be(&priv->uf_regs->gumr); + gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS | + UCC_FAST_GUMR_TCI); + gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN); + iowrite32be(gumr, &priv->uf_regs->gumr); + } + + /* Initialize SI */ + if (priv->tsa) + ucc_tdm_init(priv->utdm, priv->ut_info); + + /* Write to QE CECR, UCCx channel to Stop Transmission */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Set UPSMR normal mode (need fixed)*/ + iowrite32be(0, &priv->uf_regs->upsmr); + + priv->rx_ring_size = RX_BD_RING_LEN; + priv->tx_ring_size = TX_BD_RING_LEN; + /* Alloc Rx BD */ + priv->rx_bd_base = dma_alloc_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd *), + &priv->dma_rx_bd, GFP_KERNEL); + + if (!priv->rx_bd_base) { + dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n"); + ret = -ENOMEM; + goto free_uccf; + } + + /* Alloc Tx BD */ + priv->tx_bd_base = dma_alloc_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd *), + &priv->dma_tx_bd, GFP_KERNEL); + + if (!priv->tx_bd_base) { + dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n"); + ret = -ENOMEM; + goto free_rx_bd; + } + + /* Alloc parameter ram for ucc hdlc */ + priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram), + ALIGNMENT_OF_UCC_HDLC_PRAM); + + if (priv->ucc_pram_offset < 0) { + dev_err(priv->dev, "Can not allocate MURAM for hdlc prameter.\n"); + ret = -ENOMEM; + goto free_tx_bd; + } + + priv->rx_skbuff = kzalloc(priv->rx_ring_size * sizeof(*priv->rx_skbuff), + GFP_KERNEL); + if (!priv->rx_skbuff) + goto free_ucc_pram; + + priv->tx_skbuff = kzalloc(priv->tx_ring_size * sizeof(*priv->tx_skbuff), + GFP_KERNEL); + if (!priv->tx_skbuff) + goto free_rx_skbuff; + + priv->skb_curtx = 0; + priv->skb_dirtytx = 0; + priv->curtx_bd = priv->tx_bd_base; + priv->dirty_tx = priv->tx_bd_base; + priv->currx_bd = priv->rx_bd_base; + priv->currx_bdnum = 0; + + /* init parameter base */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) + qe_muram_addr(priv->ucc_pram_offset); + + /* Zero out parameter ram */ + memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param)); + + /* Alloc riptr, tiptr */ + riptr = qe_muram_alloc(32, 32); + if (riptr < 0) { + dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n"); + ret = -ENOMEM; + goto free_tx_skbuff; + } + + tiptr = qe_muram_alloc(32, 32); + if (tiptr < 0) { + dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n"); + ret = -ENOMEM; + goto free_riptr; + } + + /* Set RIPTR, TIPTR */ + iowrite16be(riptr, &priv->ucc_pram->riptr); + iowrite16be(tiptr, &priv->ucc_pram->tiptr); + + /* Set MRBLR */ + iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr); + + /* Set RBASE, TBASE */ + iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase); + iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase); + + /* Set RSTATE, TSTATE */ + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate); + iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate); + + /* Set C_MASK, C_PRES for 16bit CRC */ + iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask); + iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres); + + iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); + iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); + iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); + iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4); + + /* Get BD buffer */ + bd_buffer = dma_alloc_coherent(priv->dev, + (RX_BD_RING_LEN + TX_BD_RING_LEN) * + MAX_RX_BUF_LENGTH, + &bd_dma_addr, GFP_KERNEL); + + if (!bd_buffer) { + dev_err(priv->dev, "Could not allocate buffer descriptors\n"); + ret = -ENOMEM; + goto free_tiptr; + } + + memset(bd_buffer, 0, (RX_BD_RING_LEN + TX_BD_RING_LEN) + * MAX_RX_BUF_LENGTH); + + priv->rx_buffer = bd_buffer; + priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; + + priv->dma_rx_addr = bd_dma_addr; + priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH; + + for (i = 0; i < RX_BD_RING_LEN; i++) { + if (i < (RX_BD_RING_LEN - 1)) + bd_status = R_E_S | R_I_S; + else + bd_status = R_E_S | R_I_S | R_W_S; + + iowrite16be(bd_status, &priv->rx_bd_base[i].status); + iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, + &priv->rx_bd_base[i].buf); + } + + for (i = 0; i < TX_BD_RING_LEN; i++) { + if (i < (TX_BD_RING_LEN - 1)) + bd_status = T_I_S | T_TC_S; + else + bd_status = T_I_S | T_TC_S | T_W_S; + + iowrite16be(bd_status, &priv->tx_bd_base[i].status); + iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, + &priv->tx_bd_base[i].buf); + } + + return 0; + +free_tiptr: + qe_muram_free(tiptr); +free_riptr: + qe_muram_free(riptr); +free_tx_skbuff: + kfree(priv->tx_skbuff); +free_rx_skbuff: + kfree(priv->rx_skbuff); +free_ucc_pram: + qe_muram_free(priv->ucc_pram_offset); +free_tx_bd: + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + priv->tx_bd_base, priv->dma_tx_bd); +free_rx_bd: + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + priv->rx_bd_base, priv->dma_rx_bd); +free_uccf: + ucc_fast_free(priv->uccf); + + return ret; +} + +static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) +{ + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv; + struct qe_bd __iomem *bd; + u16 bd_status; + unsigned long flags; + u8 *send_buf; + int i; + u16 *proto_head; + + switch (dev->type) { + case ARPHRD_RAWHDLC: + if (skb_headroom(skb) < HDLC_HEAD_LEN) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + netdev_err(dev, "No enough space for hdlc head\n"); + return -ENOMEM; + } + + skb_push(skb, HDLC_HEAD_LEN); + + proto_head = (u16 *)skb->data; + *proto_head = htons(DEFAULT_HDLC_HEAD); + + dev->stats.tx_bytes += skb->len; + break; + + case ARPHRD_PPP: + proto_head = (u16 *)skb->data; + if (*proto_head != htons(DEFAULT_PPP_HEAD)) { + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + netdev_err(dev, "Wrong ppp header\n"); + return -ENOMEM; + } + + dev->stats.tx_bytes += skb->len; + break; + + default: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + return -ENOMEM; + } + + pr_info("Tx data skb->len:%d ", skb->len); + send_buf = (u8 *)skb->data; + pr_info("\nTransmitted data:\n"); + for (i = 0; i < 16; i++) { + if (i == skb->len) + pr_info("++++"); + else + pr_info("%02x\n", send_buf[i]); + } + spin_lock_irqsave(&priv->lock, flags); + + /* Start from the next BD that should be filled */ + bd = priv->curtx_bd; + bd_status = ioread16be(&bd->status); + /* Save the skb pointer so we can free it later */ + priv->tx_skbuff[priv->skb_curtx] = skb; + + /* Update the current skb pointer (wrapping if this was the last) */ + priv->skb_curtx = + (priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); + + /* copy skb data to tx buffer for sdma processing */ + memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), + skb->data, skb->len); + + /* set bd status and length */ + bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S; + + iowrite16be(bd_status, &bd->status); + iowrite16be(skb->len, &bd->length); + + /* Move to next BD in the ring */ + if (!(bd_status & T_W_S)) + bd += 1; + else + bd = priv->tx_bd_base; + + if (bd == priv->dirty_tx) { + if (!netif_queue_stopped(dev)) + netif_stop_queue(dev); + } + + priv->curtx_bd = bd; + + spin_unlock_irqrestore(&priv->lock, flags); + + return NETDEV_TX_OK; +} + +static int hdlc_tx_done(struct ucc_hdlc_private *priv) +{ + /* Start from the next BD that should be filled */ + struct net_device *dev = priv->ndev; + struct qe_bd *bd; /* BD pointer */ + u16 bd_status; + + bd = priv->dirty_tx; + bd_status = ioread16be(&bd->status); + + /* Normal processing. */ + while ((bd_status & T_R_S) == 0) { + struct sk_buff *skb; + + /* BD contains already transmitted buffer. */ + /* Handle the transmitted buffer and release */ + /* the BD to be used with the current frame */ + + skb = priv->tx_skbuff[priv->skb_dirtytx]; + if (!skb) + break; + pr_info("TxBD: %x\n", bd_status); + dev->stats.tx_packets++; + memset(priv->tx_buffer + + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), + 0, skb->len); + dev_kfree_skb_irq(skb); + + priv->tx_skbuff[priv->skb_dirtytx] = NULL; + priv->skb_dirtytx = + (priv->skb_dirtytx + + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN); + + /* We freed a buffer, so now we can restart transmission */ + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); + + /* Advance the confirmation BD pointer */ + if (!(bd_status & T_W_S)) + bd += 1; + else + bd = priv->tx_bd_base; + bd_status = ioread16be(&bd->status); + } + priv->dirty_tx = bd; + + return 0; +} + +static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) +{ + struct net_device *dev = priv->ndev; + struct sk_buff *skb; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct qe_bd *bd; + u32 bd_status; + u16 length, howmany = 0; + u8 *bdbuffer; + int i; + static int entry; + + bd = priv->currx_bd; + bd_status = ioread16be(&bd->status); + + /* while there are received buffers and BD is full (~R_E) */ + while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) { + if (bd_status & R_OV_S) + dev->stats.rx_over_errors++; + if (bd_status & R_CR_S) { +#ifdef BROKEN_FRAME_INFO + pr_info("Broken Frame with RxBD: %x\n", bd_status); +#endif + dev->stats.rx_crc_errors++; + dev->stats.rx_dropped++; + goto recycle; + } + bdbuffer = priv->rx_buffer + + (priv->currx_bdnum * MAX_RX_BUF_LENGTH); + length = ioread16be(&bd->length); + + pr_info("Received data length:%d", length); + pr_info("while entry times:%d", entry++); + + pr_info("\nReceived data:\n"); + for (i = 0; (i < 16); i++) { + if (i == length) + pr_info("++++"); + else + pr_info("%02x\n", bdbuffer[i]); + } + + switch (dev->type) { + case ARPHRD_RAWHDLC: + bdbuffer += HDLC_HEAD_LEN; + length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE); + + skb = dev_alloc_skb(length); + if (!skb) { + dev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, length); + skb->len = length; + skb->dev = dev; + memcpy(skb->data, bdbuffer, length); + break; + + case ARPHRD_PPP: + length -= HDLC_CRC_SIZE; + + skb = dev_alloc_skb(length); + if (!skb) { + dev->stats.rx_dropped++; + return -ENOMEM; + } + + skb_put(skb, length); + skb->len = length; + skb->dev = dev; + memcpy(skb->data, bdbuffer, length); + break; + } + + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + howmany++; + if (hdlc->proto) + skb->protocol = hdlc_type_trans(skb, dev); + pr_info("skb->protocol:%x\n", skb->protocol); + netif_receive_skb(skb); + +recycle: + iowrite16be(bd_status | R_E_S | R_I_S, &bd->status); + + /* update to point at the next bd */ + if (bd_status & R_W_S) { + priv->currx_bdnum = 0; + bd = priv->rx_bd_base; + } else { + if (priv->currx_bdnum < (RX_BD_RING_LEN - 1)) + priv->currx_bdnum += 1; + else + priv->currx_bdnum = RX_BD_RING_LEN - 1; + + bd += 1; + } + + bd_status = ioread16be(&bd->status); + } + + priv->currx_bd = bd; + return howmany; +} + +static int ucc_hdlc_poll(struct napi_struct *napi, int budget) +{ + struct ucc_hdlc_private *priv = container_of(napi, + struct ucc_hdlc_private, + napi); + int howmany; + + /* Tx event processing */ + spin_lock(&priv->lock); + hdlc_tx_done(priv); + spin_unlock(&priv->lock); + + howmany = 0; + howmany += hdlc_rx_done(priv, budget - howmany); + + if (howmany < budget) { + napi_complete(napi); + qe_setbits32(priv->uccf->p_uccm, + (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16); + } + + return howmany; +} + +static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id) +{ + struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id; + struct net_device *dev = priv->ndev; + struct ucc_fast_private *uccf; + struct ucc_tdm_info *ut_info; + u32 ucce; + u32 uccm; + + ut_info = priv->ut_info; + uccf = priv->uccf; + + ucce = ioread32be(uccf->p_ucce); + uccm = ioread32be(uccf->p_uccm); + ucce &= uccm; + iowrite32be(ucce, uccf->p_ucce); + pr_info("irq ucce:%x\n", ucce); + if (!ucce) + return IRQ_NONE; + + if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) { + if (napi_schedule_prep(&priv->napi)) { + uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) + << 16); + iowrite32be(uccm, uccf->p_uccm); + __napi_schedule(&priv->napi); + } + } + + /* Errors and other events */ + if (ucce >> 16 & UCC_HDLC_UCCE_BSY) + dev->stats.rx_errors++; + if (ucce >> 16 & UCC_HDLC_UCCE_TXE) + dev->stats.tx_errors++; + + return IRQ_HANDLED; +} + +static int uhdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + const size_t size = sizeof(te1_settings); + te1_settings line; + struct ucc_hdlc_private *priv = netdev_priv(dev); + + if (cmd != SIOCWANDEV) + return hdlc_ioctl(dev, ifr, cmd); + + switch (ifr->ifr_settings.type) { + case IF_GET_IFACE: + ifr->ifr_settings.type = IF_IFACE_E1; + if (ifr->ifr_settings.size < size) { + ifr->ifr_settings.size = size; /* data size wanted */ + return -ENOBUFS; + } + memset(&line, 0, sizeof(line)); + line.clock_type = priv->clocking; + + if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) + return -EFAULT; + return 0; + + default: + return hdlc_ioctl(dev, ifr, cmd); + } +} + +static int uhdlc_open(struct net_device *dev) +{ + u32 cecr_subblock; + hdlc_device *hdlc = dev_to_hdlc(dev); + struct ucc_hdlc_private *priv = hdlc->priv; + struct ucc_tdm *utdm = priv->utdm; + + if (priv->hdlc_busy != 1) { + if (request_irq(priv->ut_info->uf_info.irq, + ucc_hdlc_irq_handler, 0, "hdlc", priv)) + return -ENODEV; + + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, 0); + + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Enable the TDM port */ + if (priv->tsa) + utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); + + priv->hdlc_busy = 1; + netif_device_attach(priv->ndev); + napi_enable(&priv->napi); + netif_start_queue(dev); + hdlc_open(dev); + } + + return 0; +} + +static void uhdlc_memclean(struct ucc_hdlc_private *priv) +{ + qe_muram_free(priv->ucc_pram->riptr); + qe_muram_free(priv->ucc_pram->tiptr); + + if (priv->rx_bd_base) { + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * sizeof(struct qe_bd), + priv->rx_bd_base, priv->dma_rx_bd); + + priv->rx_bd_base = NULL; + priv->dma_rx_bd = 0; + } + + if (priv->tx_bd_base) { + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * sizeof(struct qe_bd), + priv->tx_bd_base, priv->dma_tx_bd); + + priv->tx_bd_base = NULL; + priv->dma_tx_bd = 0; + } + + if (priv->ucc_pram) { + qe_muram_free(priv->ucc_pram_offset); + priv->ucc_pram = NULL; + priv->ucc_pram_offset = 0; + } + + kfree(priv->rx_skbuff); + priv->rx_skbuff = NULL; + + kfree(priv->tx_skbuff); + priv->tx_skbuff = NULL; + + if (priv->uf_regs) { + iounmap(priv->uf_regs); + priv->uf_regs = NULL; + } + + if (priv->uccf) { + ucc_fast_free(priv->uccf); + priv->uccf = NULL; + } + + if (priv->rx_buffer) { + dma_free_coherent(priv->dev, + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH, + priv->rx_buffer, priv->dma_rx_addr); + priv->rx_buffer = NULL; + priv->dma_rx_addr = 0; + } + + if (priv->tx_buffer) { + dma_free_coherent(priv->dev, + TX_BD_RING_LEN * MAX_RX_BUF_LENGTH, + priv->tx_buffer, priv->dma_tx_addr); + priv->tx_buffer = NULL; + priv->dma_tx_addr = 0; + } +} + +static int uhdlc_close(struct net_device *dev) +{ + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + struct ucc_tdm *utdm = priv->utdm; + u32 cecr_subblock; + + napi_disable(&priv->napi); + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + if (priv->tsa) + utdm->si_regs->siglmr1_h &= ~(0x1 << utdm->tdm_port); + + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + free_irq(priv->ut_info->uf_info.irq, priv); + netif_stop_queue(dev); + priv->hdlc_busy = 0; + + return 0; +} + +static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) +{ + struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv; + + if (encoding != ENCODING_NRZ && + encoding != ENCODING_NRZI) + return -EINVAL; + + if (parity != PARITY_NONE && + parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR1_CCITT) + return -EINVAL; + + priv->encoding = encoding; + priv->parity = parity; + + return 0; +} + +#ifdef CONFIG_PM +static void store_clk_config(struct ucc_hdlc_private *priv) +{ + struct qe_mux *qe_mux_reg = &qe_immr->qmx; + + /* store si clk */ + priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h); + priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l); + + /* store si sync */ + priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr); + + /* store ucc clk */ + memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32)); +} + +static void resume_clk_config(struct ucc_hdlc_private *priv) +{ + struct qe_mux *qe_mux_reg = &qe_immr->qmx; + + memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32)); + + iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h); + iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l); + + iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr); +} + +static int uhdlc_suspend(struct device *dev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); + struct ucc_tdm_info *ut_info; + struct ucc_fast __iomem *uf_regs; + + if (!priv) + return -EINVAL; + + if (!netif_running(priv->ndev)) + return 0; + + netif_device_detach(priv->ndev); + napi_disable(&priv->napi); + + ut_info = priv->ut_info; + uf_regs = priv->uf_regs; + + /* backup gumr guemr*/ + priv->gumr = ioread32be(&uf_regs->gumr); + priv->guemr = ioread8(&uf_regs->guemr); + + priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak), + GFP_KERNEL); + if (!priv->ucc_pram_bak) + return -ENOMEM; + + /* backup HDLC parameter */ + memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram, + sizeof(struct ucc_hdlc_param)); + + /* store the clk configuration */ + store_clk_config(priv); + + /* save power */ + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + dev_dbg(dev, "ucc hdlc suspend\n"); + return 0; +} + +static int uhdlc_resume(struct device *dev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(dev); + struct ucc_tdm *utdm; + struct ucc_tdm_info *ut_info; + struct ucc_fast __iomem *uf_regs; + struct ucc_fast_private *uccf; + struct ucc_fast_info *uf_info; + int ret, i; + u32 cecr_subblock; + u16 bd_status; + + if (!priv) + return -EINVAL; + + if (!netif_running(priv->ndev)) + return 0; + + utdm = priv->utdm; + ut_info = priv->ut_info; + uf_info = &ut_info->uf_info; + uf_regs = priv->uf_regs; + uccf = priv->uccf; + + /* restore gumr guemr */ + iowrite8(priv->guemr, &uf_regs->guemr); + iowrite32be(priv->gumr, &uf_regs->gumr); + + /* Set Virtual Fifo registers */ + iowrite16be(uf_info->urfs, &uf_regs->urfs); + iowrite16be(uf_info->urfet, &uf_regs->urfet); + iowrite16be(uf_info->urfset, &uf_regs->urfset); + iowrite16be(uf_info->utfs, &uf_regs->utfs); + iowrite16be(uf_info->utfet, &uf_regs->utfet); + iowrite16be(uf_info->utftt, &uf_regs->utftt); + /* utfb, urfb are offsets from MURAM base */ + iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb); + iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb); + + /* Rx Tx and sync clock routing */ + resume_clk_config(priv); + + iowrite32be(uf_info->uccm_mask, &uf_regs->uccm); + iowrite32be(0xffffffff, &uf_regs->ucce); + + ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* rebuild SIRAM */ + if (priv->tsa) + ucc_tdm_init(priv->utdm, priv->ut_info); + + /* Write to QE CECR, UCCx channel to Stop Transmission */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + /* Set UPSMR normal mode */ + iowrite32be(0, &uf_regs->upsmr); + + /* init parameter base */ + cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num); + ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock, + QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset); + + priv->ucc_pram = (struct ucc_hdlc_param __iomem *) + qe_muram_addr(priv->ucc_pram_offset); + + /* restore ucc parameter */ + memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak, + sizeof(struct ucc_hdlc_param)); + kfree(priv->ucc_pram_bak); + + /* rebuild BD entry */ + for (i = 0; i < RX_BD_RING_LEN; i++) { + if (i < (RX_BD_RING_LEN - 1)) + bd_status = R_E_S | R_I_S; + else + bd_status = R_E_S | R_I_S | R_W_S; + + iowrite16be(bd_status, &priv->rx_bd_base[i].status); + iowrite32be(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH, + &priv->rx_bd_base[i].buf); + } + + for (i = 0; i < TX_BD_RING_LEN; i++) { + if (i < (TX_BD_RING_LEN - 1)) + bd_status = T_I_S | T_TC_S; + else + bd_status = T_I_S | T_TC_S | T_W_S; + + iowrite16be(bd_status, &priv->tx_bd_base[i].status); + iowrite32be(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH, + &priv->tx_bd_base[i].buf); + } + + /* if hdlc is busy enable TX and RX */ + if (priv->hdlc_busy == 1) { + cecr_subblock = ucc_fast_get_qe_cr_subblock( + priv->ut_info->uf_info.ucc_num); + + qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock, + (u8)QE_CR_PROTOCOL_UNSPECIFIED, 0); + + ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX); + + /* Enable the TDM port */ + if (priv->tsa) + utdm->si_regs->siglmr1_h |= (0x1 << utdm->tdm_port); + } + + napi_enable(&priv->napi); + netif_device_attach(priv->ndev); + + return 0; +} + +static const struct dev_pm_ops uhdlc_pm_ops = { + .suspend = uhdlc_suspend, + .resume = uhdlc_resume, + .freeze = uhdlc_suspend, + .thaw = uhdlc_resume, +}; + +#define HDLC_PM_OPS (&uhdlc_pm_ops) + +#else + +#define HDLC_PM_OPS NULL + +#endif +static const struct net_device_ops uhdlc_ops = { + .ndo_open = uhdlc_open, + .ndo_stop = uhdlc_close, + .ndo_change_mtu = hdlc_change_mtu, + .ndo_start_xmit = hdlc_start_xmit, + .ndo_do_ioctl = uhdlc_ioctl, +}; + +static int ucc_hdlc_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct ucc_hdlc_private *uhdlc_priv = NULL; + struct ucc_tdm_info *ut_info; + struct ucc_tdm *utdm; + struct resource res; + struct net_device *dev; + hdlc_device *hdlc; + int ucc_num; + const char *sprop; + int ret; + u32 val; + + ret = of_property_read_u32_index(np, "cell-index", 0, &val); + if (ret) { + dev_err(&pdev->dev, "Invalid ucc property\n"); + return -ENODEV; + } + + ucc_num = val - 1; + if ((ucc_num > 3) || (ucc_num < 0)) { + dev_err(&pdev->dev, ": Invalid UCC num\n"); + return -EINVAL; + } + + memcpy(&utdm_info[ucc_num], &utdm_primary_info, + sizeof(utdm_primary_info)); + + ut_info = &utdm_info[ucc_num]; + ut_info->uf_info.ucc_num = ucc_num; + + sprop = of_get_property(np, "rx-clock-name", NULL); + if (sprop) { + ut_info->uf_info.rx_clock = qe_clock_source(sprop); + if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) || + (ut_info->uf_info.rx_clock > QE_CLK24)) { + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "Invalid rx-clock-name property\n"); + return -EINVAL; + } + + sprop = of_get_property(np, "tx-clock-name", NULL); + if (sprop) { + ut_info->uf_info.tx_clock = qe_clock_source(sprop); + if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) || + (ut_info->uf_info.tx_clock > QE_CLK24)) { + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); + return -EINVAL; + } + } else { + dev_err(&pdev->dev, "Invalid tx-clock-name property\n"); + return -EINVAL; + } + + /* use the same clock when work in loopback */ + if (ut_info->uf_info.rx_clock == ut_info->uf_info.tx_clock) + qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1); + + ret = of_address_to_resource(np, 0, &res); + if (ret) + return -EINVAL; + + ut_info->uf_info.regs = res.start; + ut_info->uf_info.irq = irq_of_parse_and_map(np, 0); + + uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL); + if (!uhdlc_priv) { + return -ENOMEM; + } + + dev_set_drvdata(&pdev->dev, uhdlc_priv); + uhdlc_priv->dev = &pdev->dev; + uhdlc_priv->ut_info = ut_info; + + if (of_get_property(np, "fsl,tdm-interface", NULL)) + uhdlc_priv->tsa = 1; + + if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) + uhdlc_priv->loopback = 1; + + if (uhdlc_priv->tsa == 1) { + utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); + if (!utdm) { + ret = -ENOMEM; + dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n"); + goto free_uhdlc_priv; + } + uhdlc_priv->utdm = utdm; + ret = ucc_of_parse_tdm(np, utdm, ut_info); + if (ret) + goto free_utdm; + } + + ret = uhdlc_init(uhdlc_priv); + if (ret) { + dev_err(&pdev->dev, "Failed to init uhdlc\n"); + goto free_utdm; + } + + dev = alloc_hdlcdev(uhdlc_priv); + if (!dev) { + ret = -ENOMEM; + pr_err("ucc_hdlc: unable to allocate memory\n"); + goto undo_uhdlc_init; + } + + uhdlc_priv->ndev = dev; + hdlc = dev_to_hdlc(dev); + dev->tx_queue_len = 16; + dev->netdev_ops = &uhdlc_ops; + hdlc->attach = ucc_hdlc_attach; + hdlc->xmit = ucc_hdlc_tx; + netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); + if (register_hdlc_device(dev)) { + ret = -ENOBUFS; + pr_err("ucc_hdlc: unable to register hdlc device\n"); + free_netdev(dev); + goto free_dev; + } + + return 0; + +free_dev: + free_netdev(dev); +undo_uhdlc_init: +free_utdm: + if (uhdlc_priv->tsa) + kfree(utdm); +free_uhdlc_priv: + kfree(uhdlc_priv); + return ret; +} + +static int ucc_hdlc_remove(struct platform_device *pdev) +{ + struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev); + + uhdlc_memclean(priv); + + if (priv->utdm->si_regs) { + iounmap(priv->utdm->si_regs); + priv->utdm->si_regs = NULL; + } + + if (priv->utdm->siram) { + iounmap(priv->utdm->siram); + priv->utdm->siram = NULL; + } + kfree(priv); + + dev_info(&pdev->dev, "UCC based hdlc module removed\n"); + + return 0; +} + +static const struct of_device_id fsl_ucc_hdlc_of_match[] = { + { + .compatible = "fsl,ucc-hdlc", + }, + {}, +}; + +MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match); + +static struct platform_driver ucc_hdlc_driver = { + .probe = ucc_hdlc_probe, + .remove = ucc_hdlc_remove, + .driver = { + .name = DRV_NAME, + .pm = HDLC_PM_OPS, + .of_match_table = fsl_ucc_hdlc_of_match, + }, +}; + +module_platform_driver(ucc_hdlc_driver); diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h new file mode 100644 index 000000000..881ecdeef --- /dev/null +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -0,0 +1,147 @@ +/* Freescale QUICC Engine HDLC Device Driver + * + * Copyright 2014 Freescale Semiconductor Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _UCC_HDLC_H_ +#define _UCC_HDLC_H_ + +#include <linux/kernel.h> +#include <linux/list.h> + +#include <soc/fsl/qe/immap_qe.h> +#include <soc/fsl/qe/qe.h> + +#include <soc/fsl/qe/ucc.h> +#include <soc/fsl/qe/ucc_fast.h> + +/* UCC HDLC event register */ +#define UCCE_HDLC_RX_EVENTS \ +(UCC_HDLC_UCCE_RXF | UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_BSY) +#define UCCE_HDLC_TX_EVENTS (UCC_HDLC_UCCE_TXB | UCC_HDLC_UCCE_TXE) + +struct ucc_hdlc_param { + __be16 riptr; + __be16 tiptr; + __be16 res0; + __be16 mrblr; + __be32 rstate; + __be32 rbase; + __be16 rbdstat; + __be16 rbdlen; + __be32 rdptr; + __be32 tstate; + __be32 tbase; + __be16 tbdstat; + __be16 tbdlen; + __be32 tdptr; + __be32 rbptr; + __be32 tbptr; + __be32 rcrc; + __be32 res1; + __be32 tcrc; + __be32 res2; + __be32 res3; + __be32 c_mask; + __be32 c_pres; + __be16 disfc; + __be16 crcec; + __be16 abtsc; + __be16 nmarc; + __be32 max_cnt; + __be16 mflr; + __be16 rfthr; + __be16 rfcnt; + __be16 hmask; + __be16 haddr1; + __be16 haddr2; + __be16 haddr3; + __be16 haddr4; + __be16 ts_tmp; + __be16 tmp_mb; +}; + +struct ucc_hdlc_private { + struct ucc_tdm *utdm; + struct ucc_tdm_info *ut_info; + struct ucc_fast_private *uccf; + struct device *dev; + struct net_device *ndev; + struct napi_struct napi; + struct ucc_fast __iomem *uf_regs; /* UCC Fast registers */ + struct ucc_hdlc_param __iomem *ucc_pram; + u16 tsa; + bool hdlc_busy; + bool loopback; + + u8 *tx_buffer; + u8 *rx_buffer; + dma_addr_t dma_tx_addr; + dma_addr_t dma_rx_addr; + + struct qe_bd *tx_bd_base; + struct qe_bd *rx_bd_base; + dma_addr_t dma_tx_bd; + dma_addr_t dma_rx_bd; + struct qe_bd *curtx_bd; + struct qe_bd *currx_bd; + struct qe_bd *dirty_tx; + u16 currx_bdnum; + + struct sk_buff **tx_skbuff; + struct sk_buff **rx_skbuff; + u16 skb_curtx; + u16 skb_currx; + unsigned short skb_dirtytx; + + unsigned short tx_ring_size; + unsigned short rx_ring_size; + u32 ucc_pram_offset; + + unsigned short encoding; + unsigned short parity; + u32 clocking; + spinlock_t lock; /* lock for Tx BD and Tx buffer */ +#ifdef CONFIG_PM + struct ucc_hdlc_param *ucc_pram_bak; + u32 gumr; + u8 guemr; + u32 cmxsi1cr_l, cmxsi1cr_h; + u32 cmxsi1syr; + u32 cmxucr[4]; +#endif +}; + +#define TX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x20 +#define RX_CLEAN_MAX 0x10 +#define NUM_OF_BUF 4 +#define MAX_RX_BUF_LENGTH (48 * 0x20) +#define MAX_FRAME_LENGTH (MAX_RX_BUF_LENGTH + 8) +#define ALIGNMENT_OF_UCC_HDLC_PRAM 64 +#define SI_BANK_SIZE 128 +#define MAX_HDLC_NUM 4 +#define HDLC_HEAD_LEN 2 +#define HDLC_CRC_SIZE 2 +#define TX_RING_MOD_MASK(size) (size - 1) +#define RX_RING_MOD_MASK(size) (size - 1) + +#define HDLC_HEAD_MASK 0x0000 +#define DEFAULT_HDLC_HEAD 0xff44 +#define DEFAULT_ADDR_MASK 0x00ff +#define DEFAULT_HDLC_ADDR 0x00ff + +#define BMR_GBL 0x20000000 +#define BMR_BIG_ENDIAN 0x10000000 +#define CRC_16BIT_MASK 0x0000F0B8 +#define CRC_16BIT_PRES 0x0000FFFF +#define DEFAULT_RFTHR 1 + +#define DEFAULT_PPP_HEAD 0xff03 + +#endif diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c new file mode 100644 index 000000000..d06a887a2 --- /dev/null +++ b/drivers/net/wan/slic_ds26522.c @@ -0,0 +1,255 @@ +/* + * drivers/net/wan/slic_ds26522.c + * + * Copyright (C) 2016 Freescale Semiconductor, Inc. + * + * Author:Zhao Qiang<qiang.zhao@nxp.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/bitrev.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/kthread.h> +#include <linux/spi/spi.h> +#include <linux/wait.h> +#include <linux/param.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include "slic_ds26522.h" + +#define DRV_NAME "ds26522" + +#define SLIC_TRANS_LEN 1 +#define SLIC_TWO_LEN 2 +#define SLIC_THREE_LEN 3 + +static struct spi_device *g_spi; + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhao Qiang<B45475@freescale.com>"); + +/* the read/write format of address is + * w/r|A13|A12|A11|A10|A9|A8|A7|A6|A5|A4|A3|A2|A1|A0|x + */ +static void slic_write(struct spi_device *spi, u16 addr, + u8 data) +{ + u8 temp[3]; + + addr = bitrev16(addr) >> 1; + data = bitrev8(data); + temp[0] = (u8)((addr >> 8) & 0x7f); + temp[1] = (u8)(addr & 0xfe); + temp[2] = data; + + /* write spi addr and value */ + spi_write(spi, &temp[0], SLIC_THREE_LEN); +} + +static u8 slic_read(struct spi_device *spi, u16 addr) +{ + u8 temp[2]; + u8 data; + + addr = bitrev16(addr) >> 1; + temp[0] = (u8)(((addr >> 8) & 0x7f) | 0x80); + temp[1] = (u8)(addr & 0xfe); + + spi_write_then_read(spi, &temp[0], SLIC_TWO_LEN, &data, + SLIC_TRANS_LEN); + + data = bitrev8(data); + return data; +} + +static bool get_slic_product_code(struct spi_device *spi) +{ + u8 device_id; + + device_id = slic_read(spi, DS26522_IDR_ADDR); + if ((device_id & 0xf8) == 0x68) + return true; + else + return false; +} + +static void ds26522_e1_spec_config(struct spi_device *spi) +{ + /* Receive E1 Mode, Framer Disabled */ + slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_E1); + + /* Transmit E1 Mode, Framer Disable */ + slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_E1); + + /* Receive E1 Mode Framer Enable */ + slic_write(spi, DS26522_RMMR_ADDR, + slic_read(spi, DS26522_RMMR_ADDR) | DS26522_RMMR_FRM_EN); + + /* Transmit E1 Mode Framer Enable */ + slic_write(spi, DS26522_TMMR_ADDR, + slic_read(spi, DS26522_TMMR_ADDR) | DS26522_TMMR_FRM_EN); + + /* RCR1, receive E1 B8zs & ESF */ + slic_write(spi, DS26522_RCR1_ADDR, + DS26522_RCR1_E1_HDB3 | DS26522_RCR1_E1_CCS); + + /* RSYSCLK=2.048MHz, RSYNC-Output */ + slic_write(spi, DS26522_RIOCR_ADDR, + DS26522_RIOCR_2048KHZ | DS26522_RIOCR_RSIO_OUT); + + /* TCR1 Transmit E1 b8zs */ + slic_write(spi, DS26522_TCR1_ADDR, DS26522_TCR1_TB8ZS); + + /* TSYSCLK=2.048MHz, TSYNC-Output */ + slic_write(spi, DS26522_TIOCR_ADDR, + DS26522_TIOCR_2048KHZ | DS26522_TIOCR_TSIO_OUT); + + /* Set E1TAF */ + slic_write(spi, DS26522_E1TAF_ADDR, DS26522_E1TAF_DEFAULT); + + /* Set E1TNAF register */ + slic_write(spi, DS26522_E1TNAF_ADDR, DS26522_E1TNAF_DEFAULT); + + /* Receive E1 Mode Framer Enable & init Done */ + slic_write(spi, DS26522_RMMR_ADDR, slic_read(spi, DS26522_RMMR_ADDR) | + DS26522_RMMR_INIT_DONE); + + /* Transmit E1 Mode Framer Enable & init Done */ + slic_write(spi, DS26522_TMMR_ADDR, slic_read(spi, DS26522_TMMR_ADDR) | + DS26522_TMMR_INIT_DONE); + + /* Configure LIU E1 mode */ + slic_write(spi, DS26522_LTRCR_ADDR, DS26522_LTRCR_E1); + + /* E1 Mode default 75 ohm w/Transmit Impedance Matlinking */ + slic_write(spi, DS26522_LTITSR_ADDR, + DS26522_LTITSR_TLIS_75OHM | DS26522_LTITSR_LBOS_75OHM); + + /* E1 Mode default 75 ohm Long Haul w/Receive Impedance Matlinking */ + slic_write(spi, DS26522_LRISMR_ADDR, + DS26522_LRISMR_75OHM | DS26522_LRISMR_MAX); + + /* Enable Transmit output */ + slic_write(spi, DS26522_LMCR_ADDR, DS26522_LMCR_TE); +} + +static int slic_ds26522_init_configure(struct spi_device *spi) +{ + u16 addr; + + /* set clock */ + slic_write(spi, DS26522_GTCCR_ADDR, DS26522_GTCCR_BPREFSEL_REFCLKIN | + DS26522_GTCCR_BFREQSEL_2048KHZ | + DS26522_GTCCR_FREQSEL_2048KHZ); + slic_write(spi, DS26522_GTCR2_ADDR, DS26522_GTCR2_TSSYNCOUT); + slic_write(spi, DS26522_GFCR_ADDR, DS26522_GFCR_BPCLK_2048KHZ); + + /* set gtcr */ + slic_write(spi, DS26522_GTCR1_ADDR, DS26522_GTCR1); + + /* Global LIU Software Reset Register */ + slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_RESET); + + /* Global Framer and BERT Software Reset Register */ + slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_RESET); + + usleep_range(100, 120); + + slic_write(spi, DS26522_GLSRR_ADDR, DS26522_GLSRR_NORMAL); + slic_write(spi, DS26522_GFSRR_ADDR, DS26522_GFSRR_NORMAL); + + /* Perform RX/TX SRESET,Reset receiver */ + slic_write(spi, DS26522_RMMR_ADDR, DS26522_RMMR_SFTRST); + + /* Reset tranceiver */ + slic_write(spi, DS26522_TMMR_ADDR, DS26522_TMMR_SFTRST); + + usleep_range(100, 120); + + /* Zero all Framer Registers */ + for (addr = DS26522_RF_ADDR_START; addr <= DS26522_RF_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + for (addr = DS26522_TF_ADDR_START; addr <= DS26522_TF_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + for (addr = DS26522_LIU_ADDR_START; addr <= DS26522_LIU_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + for (addr = DS26522_BERT_ADDR_START; addr <= DS26522_BERT_ADDR_END; + addr++) + slic_write(spi, addr, 0); + + /* setup ds26522 for E1 specification */ + ds26522_e1_spec_config(spi); + + slic_write(spi, DS26522_GTCR1_ADDR, 0x00); + + return 0; +} + +static int slic_ds26522_remove(struct spi_device *spi) +{ + pr_info("DS26522 module uninstalled\n"); + return 0; +} + +static int slic_ds26522_probe(struct spi_device *spi) +{ + int ret = 0; + + g_spi = spi; + spi->bits_per_word = 8; + + if (!get_slic_product_code(spi)) + return ret; + + ret = slic_ds26522_init_configure(spi); + if (ret == 0) + pr_info("DS26522 cs%d configurated\n", spi->chip_select); + + return ret; +} + +static const struct of_device_id slic_ds26522_match[] = { + { + .compatible = "maxim,ds26522", + }, + {}, +}; + +static struct spi_driver slic_ds26522_driver = { + .driver = { + .name = "ds26522", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + .of_match_table = slic_ds26522_match, + }, + .probe = slic_ds26522_probe, + .remove = slic_ds26522_remove, +}; + +static int __init slic_ds26522_init(void) +{ + return spi_register_driver(&slic_ds26522_driver); +} + +static void __exit slic_ds26522_exit(void) +{ + spi_unregister_driver(&slic_ds26522_driver); +} + +module_init(slic_ds26522_init); +module_exit(slic_ds26522_exit); diff --git a/drivers/net/wan/slic_ds26522.h b/drivers/net/wan/slic_ds26522.h new file mode 100644 index 000000000..22aa0ecbd --- /dev/null +++ b/drivers/net/wan/slic_ds26522.h @@ -0,0 +1,134 @@ +/* + * drivers/tdm/line_ctrl/slic_ds26522.h + * + * Copyright 2016 Freescale Semiconductor, Inc. + * + * Author: Zhao Qiang <B45475@freescale.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#define DS26522_RF_ADDR_START 0x00 +#define DS26522_RF_ADDR_END 0xef +#define DS26522_GLB_ADDR_START 0xf0 +#define DS26522_GLB_ADDR_END 0xff +#define DS26522_TF_ADDR_START 0x100 +#define DS26522_TF_ADDR_END 0x1ef +#define DS26522_LIU_ADDR_START 0x1000 +#define DS26522_LIU_ADDR_END 0x101f +#define DS26522_TEST_ADDR_START 0x1008 +#define DS26522_TEST_ADDR_END 0x101f +#define DS26522_BERT_ADDR_START 0x1100 +#define DS26522_BERT_ADDR_END 0x110f + +#define DS26522_RMMR_ADDR 0x80 +#define DS26522_RCR1_ADDR 0x81 +#define DS26522_RCR3_ADDR 0x83 +#define DS26522_RIOCR_ADDR 0x84 + +#define DS26522_GTCR1_ADDR 0xf0 +#define DS26522_GFCR_ADDR 0xf1 +#define DS26522_GTCR2_ADDR 0xf2 +#define DS26522_GTCCR_ADDR 0xf3 +#define DS26522_GLSRR_ADDR 0xf5 +#define DS26522_GFSRR_ADDR 0xf6 +#define DS26522_IDR_ADDR 0xf8 + +#define DS26522_E1TAF_ADDR 0x164 +#define DS26522_E1TNAF_ADDR 0x165 +#define DS26522_TMMR_ADDR 0x180 +#define DS26522_TCR1_ADDR 0x181 +#define DS26522_TIOCR_ADDR 0x184 + +#define DS26522_LTRCR_ADDR 0x1000 +#define DS26522_LTITSR_ADDR 0x1001 +#define DS26522_LMCR_ADDR 0x1002 +#define DS26522_LRISMR_ADDR 0x1007 + +#define MAX_NUM_OF_CHANNELS 8 +#define PQ_MDS_8E1T1_BRD_REV 0x00 +#define PQ_MDS_8E1T1_PLD_REV 0x00 + +#define DS26522_GTCCR_BPREFSEL_REFCLKIN 0xa0 +#define DS26522_GTCCR_BFREQSEL_1544KHZ 0x08 +#define DS26522_GTCCR_FREQSEL_1544KHZ 0x04 +#define DS26522_GTCCR_BFREQSEL_2048KHZ 0x00 +#define DS26522_GTCCR_FREQSEL_2048KHZ 0x00 + +#define DS26522_GFCR_BPCLK_2048KHZ 0x00 + +#define DS26522_GTCR2_TSSYNCOUT 0x02 +#define DS26522_GTCR1 0x00 + +#define DS26522_GFSRR_RESET 0x01 +#define DS26522_GFSRR_NORMAL 0x00 + +#define DS26522_GLSRR_RESET 0x01 +#define DS26522_GLSRR_NORMAL 0x00 + +#define DS26522_RMMR_SFTRST 0x02 +#define DS26522_RMMR_FRM_EN 0x80 +#define DS26522_RMMR_INIT_DONE 0x40 +#define DS26522_RMMR_T1 0x00 +#define DS26522_RMMR_E1 0x01 + +#define DS26522_E1TAF_DEFAULT 0x1b +#define DS26522_E1TNAF_DEFAULT 0x40 + +#define DS26522_TMMR_SFTRST 0x02 +#define DS26522_TMMR_FRM_EN 0x80 +#define DS26522_TMMR_INIT_DONE 0x40 +#define DS26522_TMMR_T1 0x00 +#define DS26522_TMMR_E1 0x01 + +#define DS26522_RCR1_T1_SYNCT 0x80 +#define DS26522_RCR1_T1_RB8ZS 0x40 +#define DS26522_RCR1_T1_SYNCC 0x08 + +#define DS26522_RCR1_E1_HDB3 0x40 +#define DS26522_RCR1_E1_CCS 0x20 + +#define DS26522_RIOCR_1544KHZ 0x00 +#define DS26522_RIOCR_2048KHZ 0x10 +#define DS26522_RIOCR_RSIO_OUT 0x00 + +#define DS26522_RCR3_FLB 0x01 + +#define DS26522_TIOCR_1544KHZ 0x00 +#define DS26522_TIOCR_2048KHZ 0x10 +#define DS26522_TIOCR_TSIO_OUT 0x04 + +#define DS26522_TCR1_TB8ZS 0x04 + +#define DS26522_LTRCR_T1 0x02 +#define DS26522_LTRCR_E1 0x00 + +#define DS26522_LTITSR_TLIS_75OHM 0x00 +#define DS26522_LTITSR_LBOS_75OHM 0x00 +#define DS26522_LTITSR_TLIS_100OHM 0x10 +#define DS26522_LTITSR_TLIS_0DB_CSU 0x00 + +#define DS26522_LRISMR_75OHM 0x00 +#define DS26522_LRISMR_100OHM 0x10 +#define DS26522_LRISMR_MAX 0x03 + +#define DS26522_LMCR_TE 0x01 + +enum line_rate { + LINE_RATE_T1, /* T1 line rate (1.544 Mbps) */ + LINE_RATE_E1 /* E1 line rate (2.048 Mbps) */ +}; + +enum tdm_trans_mode { + NORMAL = 0, + FRAMER_LB +}; + +enum card_support_type { + LM_CARD = 0, + DS26522_CARD, + NO_CARD +}; diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index bd62bc19e..acec16b9c 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -25,10 +25,9 @@ #include "ahb.h" static const struct of_device_id ath10k_ahb_of_match[] = { - /* TODO: enable this entry once everything in place. - * { .compatible = "qcom,ipq4019-wifi", - * .data = (void *)ATH10K_HW_QCA4019 }, - */ + { .compatible = "qcom,ipq4019-wifi", + .data = (void *)ATH10K_HW_QCA4019 + }, { } }; @@ -476,6 +475,7 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg) static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); int ret; @@ -487,6 +487,7 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) ar_ahb->irq, ret); return ret; } + ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY; return 0; } @@ -918,8 +919,6 @@ int ath10k_ahb_init(void) { int ret; - printk(KERN_ERR "AHB support is still work in progress\n"); - ret = platform_driver_register(&ath10k_ahb_driver); if (ret) printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 697f8d054..d037ecef2 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -18,6 +18,7 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/of.h> +#include <asm/byteorder.h> #include "core.h" #include "mac.h" @@ -55,7 +56,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -69,6 +70,25 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, }, { + .id = QCA9887_HW_1_0_VERSION, + .dev_id = QCA9887_1_0_DEVICE_ID, + .name = "qca9887 hw1.0", + .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, + .otp_exe_param = 0, + .channel_counters_freq_hz = 88000, + .max_probe_resp_desc_thres = 0, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER, + .cal_data_len = 2116, + .fw = { + .dir = QCA9887_HW_1_0_FW_DIR, + .board = QCA9887_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA9887_BOARD_DATA_SZ, + .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA6174_HW_2_1_VERSION, .dev_id = QCA6164_2_1_DEVICE_ID, .name = "qca6164 hw2.1", @@ -148,6 +168,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .uart_pin = 7, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@ -163,6 +184,51 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { }, }, { + .id = QCA9984_HW_1_0_DEV_VERSION, + .dev_id = QCA9984_1_0_DEVICE_ID, + .name = "qca9984/qca9994 hw1.0", + .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, + .cal_data_len = 12064, + .fw = { + .dir = QCA9984_HW_1_0_FW_DIR, + .board = QCA9984_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, + { + .id = QCA9888_HW_2_0_DEV_VERSION, + .dev_id = QCA9888_2_0_DEVICE_ID, + .name = "qca9888 hw2.0", + .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x00000700, + .continuous_frag_desc = true, + .channel_counters_freq_hz = 150000, + .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .tx_chain_mask = 3, + .rx_chain_mask = 3, + .max_spatial_stream = 2, + .cal_data_len = 12064, + .fw = { + .dir = QCA9888_HW_2_0_FW_DIR, + .board = QCA9888_HW_2_0_BOARD_DATA_FILE, + .board_size = QCA99X0_BOARD_DATA_SZ, + .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ, + }, + }, + { .id = QCA9377_HW_1_0_DEV_VERSION, .dev_id = QCA9377_1_0_DEVICE_ID, .name = "qca9377 hw1.0", @@ -202,9 +268,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, - .has_shifted_cc_wraparound = true, + .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x0010000, .continuous_frag_desc = true, + .cck_rate_map_rev2 = true, .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, @@ -236,6 +303,7 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", + [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -531,6 +599,35 @@ out: return ret; } +static int ath10k_download_cal_eeprom(struct ath10k *ar) +{ + size_t data_len; + void *data = NULL; + int ret; + + ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len); + if (ret) { + if (ret != -EOPNOTSUPP) + ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = ath10k_download_board_data(ar, data, data_len); + if (ret) { + ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n", + ret); + goto out_free; + } + + ret = 0; + +out_free: + kfree(data); + + return ret; +} + static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) { u32 result, address; @@ -1293,7 +1390,17 @@ static int ath10k_download_cal_data(struct ath10k *ar) } ath10k_dbg(ar, ATH10K_DBG_BOOT, - "boot did not find DT entry, try OTP next: %d\n", + "boot did not find DT entry, try target EEPROM next: %d\n", + ret); + + ret = ath10k_download_cal_eeprom(ar); + if (ret == 0) { + ar->cal_mode = ATH10K_CAL_MODE_EEPROM; + goto done; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "boot did not find target EEPROM entry, try OTP next: %d\n", ret); ret = ath10k_download_and_run_otp(ar); @@ -1590,7 +1697,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_10_4: case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: - WARN_ON(1); + ath10k_err(ar, "htt op version not found from fw meta data"); return -EINVAL; } } @@ -1733,6 +1840,16 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map)) val |= WMI_10_4_BSS_CHANNEL_INFO_64; + /* 10.4 firmware supports BT-Coex without reloading firmware + * via pdev param. To support Bluetooth coexistence pdev param, + * WMI_COEX_GPIO_SUPPORT of extended resource config should be + * enabled always. + */ + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) + val |= WMI_10_4_COEX_GPIO_SUPPORT; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, @@ -2062,6 +2179,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, switch (hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: ar->regs = &qca988x_regs; ar->hw_values = &qca988x_values; break; @@ -2071,9 +2189,14 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->hw_values = &qca6174_values; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: ar->regs = &qca99x0_regs; ar->hw_values = &qca99x0_values; break; + case ATH10K_HW_QCA9888: + ar->regs = &qca99x0_regs; + ar->hw_values = &qca9888_values; + break; case ATH10K_HW_QCA4019: ar->regs = &qca4019_regs; ar->hw_values = &qca4019_values; @@ -2159,5 +2282,5 @@ void ath10k_core_destroy(struct ath10k *ar) EXPORT_SYMBOL(ath10k_core_destroy); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Core module for QCA988X PCIe devices."); +MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards."); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 1852e0ee3..30ae5bf81 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -165,6 +165,13 @@ struct ath10k_fw_stats_peer { u32 rx_duration; }; +struct ath10k_fw_extd_stats_peer { + struct list_head list; + + u8 peer_macaddr[ETH_ALEN]; + u32 rx_duration; +}; + struct ath10k_fw_stats_vdev { struct list_head list; @@ -256,9 +263,11 @@ struct ath10k_fw_stats_pdev { }; struct ath10k_fw_stats { + bool extended; struct list_head pdevs; struct list_head vdevs; struct list_head peers; + struct list_head peers_extd; }; #define ATH10K_TPC_TABLE_TYPE_FLAG 1 @@ -535,6 +544,13 @@ enum ath10k_fw_features { */ ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, + /* Firmware supports BT-Coex without reloading firmware via pdev param. + * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of + * extended resource config should be enabled always. This firmware IE + * is used to configure WMI_COEX_GPIO_SUPPORT. + */ + ATH10K_FW_FEATURE_BTCOEX_PARAM = 14, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -571,6 +587,7 @@ enum ath10k_cal_mode { ATH10K_CAL_MODE_DT, ATH10K_PRE_CAL_MODE_FILE, ATH10K_PRE_CAL_MODE_DT, + ATH10K_CAL_MODE_EEPROM, }; enum ath10k_crypt_mode { @@ -593,6 +610,8 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode) return "pre-cal-file"; case ATH10K_PRE_CAL_MODE_DT: return "pre-cal-dt"; + case ATH10K_CAL_MODE_EEPROM: + return "eeprom"; } return "unknown"; @@ -657,6 +676,7 @@ struct ath10k_fw_components { struct ath10k { struct ath_common ath_common; struct ieee80211_hw *hw; + struct ieee80211_ops *ops; struct device *dev; u8 mac_addr[ETH_ALEN]; @@ -703,12 +723,10 @@ struct ath10k { int uart_pin; u32 otp_exe_param; - /* This is true if given HW chip has a quirky Cycle Counter - * wraparound which resets to 0x7fffffff instead of 0. All - * other CC related counters (e.g. Rx Clear Count) are divided - * by 2 so they never wraparound themselves. + /* Type of hw cycle counter wraparound logic, for more info + * refer enum ath10k_hw_cc_wraparound_type. */ - bool has_shifted_cc_wraparound; + enum ath10k_hw_cc_wraparound_type cc_wraparound_type; /* Some of chip expects fragment descriptor to be continuous * memory for any TX operation. Set continuous_frag_desc flag @@ -716,6 +734,12 @@ struct ath10k { */ bool continuous_frag_desc; + /* CCK hardware rate table mapping for the newer chipsets + * like QCA99X0, QCA4019 got revised. The CCK h/w rate values + * are in a proper order with respect to the rate/preamble + */ + bool cck_rate_map_rev2; + u32 channel_counters_freq_hz; /* Mgmt tx descriptors threshold for limiting probe response diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index e2511550f..8f0fd41df 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -139,11 +139,11 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar) ar->id.subsystem_vendor, ar->id.subsystem_device); ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n", - config_enabled(CONFIG_ATH10K_DEBUG), - config_enabled(CONFIG_ATH10K_DEBUGFS), - config_enabled(CONFIG_ATH10K_TRACING), - config_enabled(CONFIG_ATH10K_DFS_CERTIFIED), - config_enabled(CONFIG_NL80211_TESTMODE)); + IS_ENABLED(CONFIG_ATH10K_DEBUG), + IS_ENABLED(CONFIG_ATH10K_DEBUGFS), + IS_ENABLED(CONFIG_ATH10K_TRACING), + IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED), + IS_ENABLED(CONFIG_NL80211_TESTMODE)); firmware = ar->normal_mode_fw.fw_file.firmware; if (firmware) @@ -313,13 +313,25 @@ static void ath10k_fw_stats_peers_free(struct list_head *head) } } +static void ath10k_fw_extd_stats_peers_free(struct list_head *head) +{ + struct ath10k_fw_extd_stats_peer *i, *tmp; + + list_for_each_entry_safe(i, tmp, head, list) { + list_del(&i->list); + kfree(i); + } +} + static void ath10k_debug_fw_stats_reset(struct ath10k *ar) { spin_lock_bh(&ar->data_lock); ar->debug.fw_stats_done = false; + ar->debug.fw_stats.extended = false; ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd); spin_unlock_bh(&ar->data_lock); } @@ -334,6 +346,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) INIT_LIST_HEAD(&stats.pdevs); INIT_LIST_HEAD(&stats.vdevs); INIT_LIST_HEAD(&stats.peers); + INIT_LIST_HEAD(&stats.peers_extd); spin_lock_bh(&ar->data_lock); ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats); @@ -354,7 +367,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * delivered which is treated as end-of-data and is itself discarded */ if (ath10k_peer_stats_enabled(ar)) - ath10k_sta_update_rx_duration(ar, &stats.peers); + ath10k_sta_update_rx_duration(ar, &stats); if (ar->debug.fw_stats_done) { if (!ath10k_peer_stats_enabled(ar)) @@ -396,6 +409,8 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); + list_splice_tail_init(&stats.peers_extd, + &ar->debug.fw_stats.peers_extd); } complete(&ar->debug.fw_stats_complete); @@ -407,6 +422,7 @@ free: ath10k_fw_stats_pdevs_free(&stats.pdevs); ath10k_fw_stats_vdevs_free(&stats.vdevs); ath10k_fw_stats_peers_free(&stats.peers); + ath10k_fw_extd_stats_peers_free(&stats.peers_extd); spin_unlock_bh(&ar->data_lock); } @@ -609,25 +625,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file, char buf[32]; int ret; - mutex_lock(&ar->conf_mutex); - simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); /* make sure that buf is null terminated */ buf[sizeof(buf) - 1] = 0; + /* drop the possible '\n' from the end */ + if (buf[count - 1] == '\n') + buf[count - 1] = 0; + + mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_ON && ar->state != ATH10K_STATE_RESTARTED) { ret = -ENETDOWN; goto exit; } - /* drop the possible '\n' from the end */ - if (buf[count - 1] == '\n') { - buf[count - 1] = 0; - count--; - } - if (!strcmp(buf, "soft")) { ath10k_info(ar, "simulating soft firmware crash\n"); ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0); @@ -2127,6 +2141,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, size_t buf_size; int ret; bool val; + u32 pdev_param; buf_size = min(count, (sizeof(buf) - 1)); if (copy_from_user(buf, ubuf, buf_size)) @@ -2150,14 +2165,25 @@ static ssize_t ath10k_write_btcoex(struct file *file, goto exit; } + pdev_param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val); + if (ret) { + ath10k_warn(ar, "failed to enable btcoex: %d\n", ret); + ret = count; + goto exit; + } + } else { + ath10k_info(ar, "restarting firmware due to btcoex change"); + queue_work(ar->workqueue, &ar->restart_work); + } + if (val) set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); else clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); - ath10k_info(ar, "restarting firmware due to btcoex change"); - - queue_work(ar->workqueue, &ar->restart_work); ret = count; exit: @@ -2320,6 +2346,7 @@ int ath10k_debug_create(struct ath10k *ar) INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs); INIT_LIST_HEAD(&ar->debug.fw_stats.peers); + INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd); return 0; } @@ -2397,7 +2424,7 @@ int ath10k_debug_register(struct ath10k *ar) debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR, ar->debug.debugfs_phy, ar, &fops_nf_cal_period); - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { debugfs_create_file("dfs_simulate_radar", S_IWUSR, ar->debug.debugfs_phy, ar, &fops_simulate_radar); diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 75c89e362..c458fa96a 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -154,10 +154,15 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) #ifdef CONFIG_MAC80211_DEBUGFS void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); -void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *peer); +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats); +void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo); #else -static inline void ath10k_sta_update_rx_duration(struct ath10k *ar, - struct list_head *peer) +static inline +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) { } #endif /* CONFIG_MAC80211_DEBUGFS */ diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 67ef75b60..9955fea08 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -18,13 +18,34 @@ #include "wmi-ops.h" #include "debug.h" -void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head) -{ struct ieee80211_sta *sta; +static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + struct ath10k_fw_extd_stats_peer *peer; + struct ieee80211_sta *sta; + struct ath10k_sta *arsta; + + rcu_read_lock(); + list_for_each_entry(peer, &stats->peers_extd, list) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, + NULL); + if (!sta) + continue; + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_duration += (u64)peer->rx_duration; + } + rcu_read_unlock(); +} + +static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ struct ath10k_fw_stats_peer *peer; + struct ieee80211_sta *sta; struct ath10k_sta *arsta; rcu_read_lock(); - list_for_each_entry(peer, head, list) { + list_for_each_entry(peer, &stats->peers, list) { sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, NULL); if (!sta) @@ -35,6 +56,29 @@ void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head) rcu_read_unlock(); } +void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct ath10k_fw_stats *stats) +{ + if (stats->extended) + ath10k_sta_update_extd_stats_rx_duration(ar, stats); + else + ath10k_sta_update_stats_rx_duration(ar, stats); +} + +void ath10k_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct station_info *sinfo) +{ + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ath10k *ar = arsta->arvif->ar; + + if (!ath10k_peer_stats_enabled(ar)) + return; + + sinfo->rx_duration = arsta->rx_duration; + sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; +} + static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -249,28 +293,6 @@ static const struct file_operations fops_delba = { .llseek = default_llseek, }; -static ssize_t ath10k_dbg_sta_read_rx_duration(struct file *file, - char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct ieee80211_sta *sta = file->private_data; - struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; - char buf[100]; - int len = 0; - - len = scnprintf(buf, sizeof(buf), - "%llu usecs\n", arsta->rx_duration); - - return simple_read_from_buffer(user_buf, count, ppos, buf, len); -} - -static const struct file_operations fops_rx_duration = { - .read = ath10k_dbg_sta_read_rx_duration, - .open = simple_open, - .owner = THIS_MODULE, - .llseek = default_llseek, -}; - void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { @@ -279,6 +301,4 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); - debugfs_create_file("rx_duration", S_IRUGO, dir, sta, - &fops_rx_duration); } diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h index 89e7076c9..b2566b06e 100644 --- a/drivers/net/wireless/ath/ath10k/hif.h +++ b/drivers/net/wireless/ath/ath10k/hif.h @@ -87,6 +87,10 @@ struct ath10k_hif_ops { int (*suspend)(struct ath10k *ar); int (*resume)(struct ath10k *ar); + + /* fetch calibration data from target eeprom */ + int (*fetch_cal_eeprom)(struct ath10k *ar, void **data, + size_t *data_len); }; static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id, @@ -202,4 +206,14 @@ static inline void ath10k_hif_write32(struct ath10k *ar, ar->hif.ops->write32(ar, address, data); } +static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar, + void **data, + size_t *data_len) +{ + if (!ar->hif.ops->fetch_cal_eeprom) + return -EOPNOTSUPP; + + return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len); +} + #endif /* _HIF_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h index cc827185d..0c55cd92a 100644 --- a/drivers/net/wireless/ath/ath10k/htc.h +++ b/drivers/net/wireless/ath/ath10k/htc.h @@ -22,7 +22,6 @@ #include <linux/list.h> #include <linux/bug.h> #include <linux/skbuff.h> -#include <linux/semaphore.h> #include <linux/timer.h> struct ath10k; diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 911c535d0..430a83e14 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -485,10 +485,10 @@ struct htt_mgmt_tx_completion { __le32 status; } __packed; -#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x3F) +#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F) #define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0) -#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 6) -#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 7) +#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5) +#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6) #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F #define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0 diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 813cdd262..24c8d65bc 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -748,7 +748,7 @@ ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd) if (WARN_ON_ONCE(!arvif)) return NULL; - if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) + if (ath10k_mac_vif_chan(arvif->vif, &def)) return NULL; return def.chan; @@ -939,7 +939,8 @@ static void ath10k_process_rx(struct ath10k *ar, is_multicast_ether_addr(ieee80211_get_DA(hdr)) ? "mcast" : "ucast", (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4, - status->flag == 0 ? "legacy" : "", + (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) == 0 ? + "legacy" : "", status->flag & RX_FLAG_HT ? "ht" : "", status->flag & RX_FLAG_VHT ? "vht" : "", status->flag & RX_FLAG_40MHZ ? "40" : "", @@ -1524,7 +1525,7 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar, static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; - static struct ieee80211_rx_status rx_status; + struct ieee80211_rx_status *rx_status = &htt->rx_status; struct sk_buff_head amsdu; int ret; @@ -1548,11 +1549,11 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) return ret; } - ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff); + ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0); - ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status); - ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status); - ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status); + ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); + ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status); + ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); return 0; } @@ -2181,34 +2182,6 @@ static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar, ath10k_mac_tx_push_pending(ar); } -static inline enum nl80211_band phy_mode_to_band(u32 phy_mode) -{ - enum nl80211_band band; - - switch (phy_mode) { - case MODE_11A: - case MODE_11NA_HT20: - case MODE_11NA_HT40: - case MODE_11AC_VHT20: - case MODE_11AC_VHT40: - case MODE_11AC_VHT80: - band = NL80211_BAND_5GHZ; - break; - case MODE_11G: - case MODE_11B: - case MODE_11GONLY: - case MODE_11NG_HT20: - case MODE_11NG_HT40: - case MODE_11AC_VHT20_2G: - case MODE_11AC_VHT40_2G: - case MODE_11AC_VHT80_2G: - default: - band = NL80211_BAND_2GHZ; - } - - return band; -} - void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) { bool release; @@ -2290,7 +2263,6 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_tx_mgmt_dec_pending(htt); spin_unlock_bh(&htt->tx_lock); } - ath10k_mac_tx_push_pending(ar); break; } case HTT_T2H_MSG_TYPE_TX_COMPL_IND: @@ -2335,12 +2307,10 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) ath10k_htt_rx_delba(ar, resp); break; case HTT_T2H_MSG_TYPE_PKTLOG: { - struct ath10k_pktlog_hdr *hdr = - (struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload; - trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload, - sizeof(*hdr) + - __le16_to_cpu(hdr->size)); + skb->len - + offsetof(struct htt_resp, + pktlog_msg.payload)); break; } case HTT_T2H_MSG_TYPE_RX_FLUSH: { @@ -2441,8 +2411,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) dev_kfree_skb_any(skb); } - ath10k_mac_tx_push_pending(ar); - num_mpdus = atomic_read(&htt->num_mpdus_ready); while (num_mpdus) { diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 6269c610b..7c072b605 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -49,7 +49,7 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ath10k *ar = hw->priv; - struct ath10k_sta *arsta = (void *)txq->sta->drv_priv; + struct ath10k_sta *arsta; struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; unsigned long frame_cnt; unsigned long byte_cnt; @@ -67,10 +67,12 @@ static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) return; - if (txq->sta) + if (txq->sta) { + arsta = (void *)txq->sta->drv_priv; peer_id = arsta->peer_id; - else + } else { peer_id = arvif->peer_id; + } tid = txq->tid; bit = BIT(peer_id % 32); @@ -388,6 +390,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) { int size; + tasklet_kill(&htt->txrx_compl_task); + idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); idr_destroy(&htt->pending_tx); @@ -733,16 +737,18 @@ static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); - struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; + struct ath10k_vif *arvif; - if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) + if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { return ar->scan.vdev_id; - else if (cb->vif) + } else if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; return arvif->vdev_id; - else if (ar->monitor_started) + } else if (ar->monitor_started) { return ar->monitor_vdev_id; - else + } else { return 0; + } } static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index f544d4851..f903d468d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -19,7 +19,6 @@ #include "hw.h" const struct ath10k_hw_regs qca988x_regs = { - .rtc_state_cold_reset_mask = 0x00000400, .rtc_soc_base_address = 0x00004000, .rtc_wmac_base_address = 0x00005000, .soc_core_base_address = 0x00009000, @@ -46,7 +45,6 @@ const struct ath10k_hw_regs qca988x_regs = { }; const struct ath10k_hw_regs qca6174_regs = { - .rtc_state_cold_reset_mask = 0x00002000, .rtc_soc_base_address = 0x00000800, .rtc_wmac_base_address = 0x00001000, .soc_core_base_address = 0x0003a000, @@ -73,7 +71,6 @@ const struct ath10k_hw_regs qca6174_regs = { }; const struct ath10k_hw_regs qca99x0_regs = { - .rtc_state_cold_reset_mask = 0x00000400, .rtc_soc_base_address = 0x00080000, .rtc_wmac_base_address = 0x00000000, .soc_core_base_address = 0x00082000, @@ -168,6 +165,15 @@ const struct ath10k_hw_values qca99x0_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_values qca9888_values = { + .rtc_state_val_on = 3, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + const struct ath10k_hw_values qca4019_values = { .ce_count = 12, .num_target_ce_config_wlan = 10, @@ -179,17 +185,36 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) { u32 cc_fix = 0; + u32 rcc_fix = 0; + enum ath10k_hw_cc_wraparound_type wraparound_type; survey->filled |= SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; - if (ar->hw_params.has_shifted_cc_wraparound && cc < cc_prev) { - cc_fix = 0x7fffffff; - survey->filled &= ~SURVEY_INFO_TIME_BUSY; + wraparound_type = ar->hw_params.cc_wraparound_type; + + if (cc < cc_prev || rcc < rcc_prev) { + switch (wraparound_type) { + case ATH10K_HW_CC_WRAP_SHIFTED_ALL: + if (cc < cc_prev) { + cc_fix = 0x7fffffff; + survey->filled &= ~SURVEY_INFO_TIME_BUSY; + } + break; + case ATH10K_HW_CC_WRAP_SHIFTED_EACH: + if (cc < cc_prev) + cc_fix = 0x7fffffff; + + if (rcc < rcc_prev) + rcc_fix = 0x7fffffff; + break; + case ATH10K_HW_CC_WRAP_DISABLED: + break; + } } cc -= cc_prev - cc_fix; - rcc -= rcc_prev; + rcc -= rcc_prev - rcc_fix; survey->time = CCNT_TO_MSEC(ar, cc); survey->time_busy = CCNT_TO_MSEC(ar, rcc); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 1f9774f7a..bf0015476 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -26,7 +26,10 @@ #define QCA6164_2_1_DEVICE_ID (0x0041) #define QCA6174_2_1_DEVICE_ID (0x003e) #define QCA99X0_2_0_DEVICE_ID (0x0040) +#define QCA9888_2_0_DEVICE_ID (0x0056) +#define QCA9984_1_0_DEVICE_ID (0x0046) #define QCA9377_1_0_DEVICE_ID (0x0042) +#define QCA9887_1_0_DEVICE_ID (0x0050) /* QCA988X 1.0 definitions (unsupported) */ #define QCA988X_HW_1_0_CHIP_ID_REV 0x0 @@ -38,6 +41,13 @@ #define QCA988X_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 +/* QCA9887 1.0 definitions */ +#define QCA9887_HW_1_0_VERSION 0x4100016d +#define QCA9887_HW_1_0_CHIP_ID_REV 0 +#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0" +#define QCA9887_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" +#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234 + /* QCA6174 target BMI version signatures */ #define QCA6174_HW_1_0_VERSION 0x05000000 #define QCA6174_HW_1_1_VERSION 0x05000001 @@ -91,6 +101,22 @@ enum qca9377_chip_id_rev { #define QCA99X0_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" #define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234 +/* QCA9984 1.0 defines */ +#define QCA9984_HW_1_0_DEV_VERSION 0x1000000 +#define QCA9984_HW_DEV_TYPE 0xa +#define QCA9984_HW_1_0_CHIP_ID_REV 0x0 +#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0" +#define QCA9984_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" +#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234 + +/* QCA9888 2.0 defines */ +#define QCA9888_HW_2_0_DEV_VERSION 0x1000000 +#define QCA9888_HW_DEV_TYPE 0xc +#define QCA9888_HW_2_0_CHIP_ID_REV 0x0 +#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0" +#define QCA9888_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" +#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234 + /* QCA9377 1.0 definitions */ #define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0" #define QCA9377_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/" @@ -193,12 +219,14 @@ enum ath10k_hw_rev { ATH10K_HW_QCA988X, ATH10K_HW_QCA6174, ATH10K_HW_QCA99X0, + ATH10K_HW_QCA9888, + ATH10K_HW_QCA9984, ATH10K_HW_QCA9377, ATH10K_HW_QCA4019, + ATH10K_HW_QCA9887, }; struct ath10k_hw_regs { - u32 rtc_state_cold_reset_mask; u32 rtc_soc_base_address; u32 rtc_wmac_base_address; u32 soc_core_base_address; @@ -241,14 +269,18 @@ struct ath10k_hw_values { extern const struct ath10k_hw_values qca988x_values; extern const struct ath10k_hw_values qca6174_values; extern const struct ath10k_hw_values qca99x0_values; +extern const struct ath10k_hw_values qca9888_values; extern const struct ath10k_hw_values qca4019_values; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); #define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X) +#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887) #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) +#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888) +#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) #define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) @@ -275,25 +307,6 @@ enum ath10k_mcast2ucast_mode { ATH10K_MCAST2UCAST_ENABLED = 1, }; -struct ath10k_pktlog_hdr { - __le16 flags; - __le16 missed_cnt; - __le16 log_type; - __le16 size; - __le32 timestamp; - u8 payload[0]; -} __packed; - -struct ath10k_pktlog_10_4_hdr { - __le16 flags; - __le16 missed_cnt; - __le16 log_type; - __le16 size; - __le32 timestamp; - __le32 type_specific_data; - u8 payload[0]; -} __packed; - enum ath10k_hw_rate_ofdm { ATH10K_HW_RATE_OFDM_48M = 0, ATH10K_HW_RATE_OFDM_24M, @@ -315,11 +328,41 @@ enum ath10k_hw_rate_cck { ATH10K_HW_RATE_CCK_SP_2M, }; +enum ath10k_hw_rate_rev2_cck { + ATH10K_HW_RATE_REV2_CCK_LP_1M = 1, + ATH10K_HW_RATE_REV2_CCK_LP_2M, + ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + ATH10K_HW_RATE_REV2_CCK_LP_11M, + ATH10K_HW_RATE_REV2_CCK_SP_2M, + ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + ATH10K_HW_RATE_REV2_CCK_SP_11M, +}; + enum ath10k_hw_4addr_pad { ATH10K_HW_4ADDR_PAD_AFTER, ATH10K_HW_4ADDR_PAD_BEFORE, }; +enum ath10k_hw_cc_wraparound_type { + ATH10K_HW_CC_WRAP_DISABLED = 0, + + /* This type is when the HW chip has a quirky Cycle Counter + * wraparound which resets to 0x7fffffff instead of 0. All + * other CC related counters (e.g. Rx Clear Count) are divided + * by 2 so they never wraparound themselves. + */ + ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1, + + /* Each hw counter wrapsaround independently. When the + * counter overflows the repestive counter is right shifted + * by 1, i.e reset to 0x7fffffff, and other counters will be + * running unaffected. In this type of wraparound, it should + * be possible to report accurate Rx busy time unlike the + * first type. + */ + ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2, +}; + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 @@ -484,7 +527,6 @@ enum ath10k_hw_4addr_pad { /* as of IP3.7.1 */ #define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on -#define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask #define RTC_STATE_V_LSB 0 #define RTC_STATE_V_MASK 0x00000007 #define RTC_STATE_ADDRESS 0x0000 @@ -547,7 +589,10 @@ enum ath10k_hw_4addr_pad { #define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001 #define WLAN_GPIO_PIN0_ADDRESS 0x00000028 +#define WLAN_GPIO_PIN0_CONFIG_LSB 11 #define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800 +#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5 +#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060 #define WLAN_GPIO_PIN1_ADDRESS 0x0000002c #define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800 #define WLAN_GPIO_PIN10_ADDRESS 0x00000050 @@ -560,6 +605,8 @@ enum ath10k_hw_4addr_pad { #define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0 #define SI_CONFIG_OFFSET 0x00000000 +#define SI_CONFIG_ERR_INT_LSB 19 +#define SI_CONFIG_ERR_INT_MASK 0x00080000 #define SI_CONFIG_BIDIR_OD_DATA_LSB 18 #define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000 #define SI_CONFIG_I2C_LSB 16 @@ -573,7 +620,9 @@ enum ath10k_hw_4addr_pad { #define SI_CONFIG_DIVIDER_LSB 0 #define SI_CONFIG_DIVIDER_MASK 0x0000000f #define SI_CS_OFFSET 0x00000004 +#define SI_CS_DONE_ERR_LSB 10 #define SI_CS_DONE_ERR_MASK 0x00000400 +#define SI_CS_DONE_INT_LSB 9 #define SI_CS_DONE_INT_MASK 0x00000200 #define SI_CS_START_LSB 8 #define SI_CS_START_MASK 0x00000100 @@ -624,7 +673,10 @@ enum ath10k_hw_4addr_pad { #define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS #define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS #define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS +#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB #define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK +#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB +#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK #define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK #define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS #define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS @@ -679,6 +731,18 @@ enum ath10k_hw_4addr_pad { #define WINDOW_READ_ADDR_ADDRESS MISSING #define WINDOW_WRITE_ADDR_ADDRESS MISSING +#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5 +#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3 +#define QCA9887_1_0_SI_CLK_GPIO_PIN 17 +#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3 +#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010 + +#define QCA9887_EEPROM_SELECT_READ 0xa10000a0 +#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00 +#define QCA9887_EEPROM_ADDR_HI_LSB 8 +#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000 +#define QCA9887_EEPROM_ADDR_LO_LSB 16 + #define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB) #endif /* _HW_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 4040f9413..0bbd0a00e 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -62,6 +62,32 @@ static struct ieee80211_rate ath10k_rates[] = { { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, }; +static struct ieee80211_rate ath10k_rates_rev2[] = { + { .bitrate = 10, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, + { .bitrate = 20, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 55, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + { .bitrate = 110, + .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, + .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, + .flags = IEEE80211_RATE_SHORT_PREAMBLE }, + + { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, + { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, + { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, + { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, + { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, + { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, + { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, + { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, +}; + #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) @@ -70,6 +96,9 @@ static struct ieee80211_rate ath10k_rates[] = { #define ath10k_g_rates (ath10k_rates + 0) #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) +#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) +#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) + static bool ath10k_mac_bitrate_is_cck(int bitrate) { switch (bitrate) { @@ -773,6 +802,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) { struct ath10k_peer *peer, *tmp; int peer_id; + int i; lockdep_assert_held(&ar->conf_mutex); @@ -789,6 +819,17 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) ar->peer_map[peer_id] = NULL; } + /* Double check that peer is properly un-referenced from + * the peer_map + */ + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { + if (ar->peer_map[i] == peer) { + ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n", + peer->addr, peer, i); + ar->peer_map[i] = NULL; + } + } + list_del(&peer->list); kfree(peer); ar->num_peers--; @@ -799,6 +840,7 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) static void ath10k_peer_cleanup_all(struct ath10k *ar) { struct ath10k_peer *peer, *tmp; + int i; lockdep_assert_held(&ar->conf_mutex); @@ -807,6 +849,10 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar) list_del(&peer->list); kfree(peer); } + + for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) + ar->peer_map[i] = NULL; + spin_unlock_bh(&ar->data_lock); ar->num_peers = 0; @@ -2910,7 +2956,7 @@ static int ath10k_update_channel_list(struct ath10k *ar) if (channel->flags & IEEE80211_CHAN_DISABLED) continue; - ch->allow_ht = true; + ch->allow_ht = true; /* FIXME: when should we really allow VHT? */ ch->allow_vht = true; @@ -2993,7 +3039,7 @@ static void ath10k_regd_update(struct ath10k *ar) regpair = ar->ath_common.regulatory.regpair; - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { nl_dfs_reg = ar->dfs_detector->region; wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); } else { @@ -3022,7 +3068,7 @@ static void ath10k_reg_notifier(struct wiphy *wiphy, ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", request->dfs_region); result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, @@ -3646,17 +3692,18 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) static void ath10k_mac_txq_init(struct ieee80211_txq *txq) { - struct ath10k_txq *artxq = (void *)txq->drv_priv; + struct ath10k_txq *artxq; if (!txq) return; + artxq = (void *)txq->drv_priv; INIT_LIST_HEAD(&artxq->list); } static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) { - struct ath10k_txq *artxq = (void *)txq->drv_priv; + struct ath10k_txq *artxq; struct ath10k_skb_cb *cb; struct sk_buff *msdu; int msdu_id; @@ -3664,6 +3711,7 @@ static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) if (!txq) return; + artxq = (void *)txq->drv_priv; spin_lock_bh(&ar->txqs_lock); if (!list_empty(&artxq->list)) list_del_init(&artxq->list); @@ -3781,6 +3829,9 @@ void ath10k_mac_tx_push_pending(struct ath10k *ar) int ret; int max; + if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) + return; + spin_lock_bh(&ar->txqs_lock); rcu_read_lock(); @@ -3826,12 +3877,16 @@ void __ath10k_scan_finish(struct ath10k *ar) break; case ATH10K_SCAN_RUNNING: case ATH10K_SCAN_ABORTING: - if (!ar->scan.is_roc) - ieee80211_scan_completed(ar->hw, - (ar->scan.state == - ATH10K_SCAN_ABORTING)); - else if (ar->scan.roc_notify) + if (!ar->scan.is_roc) { + struct cfg80211_scan_info info = { + .aborted = (ar->scan.state == + ATH10K_SCAN_ABORTING), + }; + + ieee80211_scan_completed(ar->hw, &info); + } else if (ar->scan.roc_notify) { ieee80211_remain_on_channel_expired(ar->hw); + } /* fall through */ case ATH10K_SCAN_STARTING: ar->scan.state = ATH10K_SCAN_IDLE; @@ -4051,9 +4106,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, list_add_tail(&artxq->list, &ar->txqs); spin_unlock_bh(&ar->txqs_lock); - if (ath10k_mac_tx_can_push(hw, txq)) - tasklet_schedule(&ar->htt.txrx_compl_task); - + ath10k_mac_tx_push_pending(ar); ath10k_htt_tx_txq_update(hw, txq); } @@ -4194,6 +4247,9 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } + if (ar->cfg_tx_chainmask <= 1) + vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; + vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); @@ -4231,7 +4287,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) ht_cap.cap |= smps; } - if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) + if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { @@ -4467,6 +4523,19 @@ static int ath10k_start(struct ieee80211_hw *hw) } } + param = ar->wmi.pdev_param->enable_btcoex; + if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && + test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, + ar->running_fw->fw_file.fw_features)) { + ret = ath10k_wmi_pdev_set_param(ar, param, 0); + if (ret) { + ath10k_warn(ar, + "failed to set btcoex param: %d\n", ret); + goto err_core_stop; + } + clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar); @@ -5932,9 +6001,17 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, continue; if (peer->sta == sta) { - ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n", - sta->addr, arvif->vdev_id); + ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n", + sta->addr, peer, i, arvif->vdev_id); peer->sta = NULL; + + /* Clean up the peer object as well since we + * must have failed to do this above. + */ + list_del(&peer->list); + ar->peer_map[i] = NULL; + kfree(peer); + ar->num_peers--; } } spin_unlock_bh(&ar->data_lock); @@ -7359,6 +7436,7 @@ static const struct ieee80211_ops ath10k_ops = { #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, + .sta_statistics = ath10k_sta_statistics, #endif }; @@ -7428,21 +7506,32 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = { struct ath10k *ath10k_mac_create(size_t priv_size) { struct ieee80211_hw *hw; + struct ieee80211_ops *ops; struct ath10k *ar; - hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops); - if (!hw) + ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); + if (!ops) return NULL; + hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); + if (!hw) { + kfree(ops); + return NULL; + } + ar = hw->priv; ar->hw = hw; + ar->ops = ops; return ar; } void ath10k_mac_destroy(struct ath10k *ar) { + struct ieee80211_ops *ops = ar->ops; + ieee80211_free_hw(ar->hw); + kfree(ops); } static const struct ieee80211_iface_limit ath10k_if_limits[] = { @@ -7695,8 +7784,14 @@ int ath10k_mac_register(struct ath10k *ar) band = &ar->mac.sbands[NL80211_BAND_2GHZ]; band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); band->channels = channels; - band->n_bitrates = ath10k_g_rates_size; - band->bitrates = ath10k_g_rates; + + if (ar->hw_params.cck_rate_map_rev2) { + band->n_bitrates = ath10k_g_rates_rev2_size; + band->bitrates = ath10k_g_rates_rev2; + } else { + band->n_bitrates = ath10k_g_rates_size; + band->bitrates = ath10k_g_rates; + } ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; } @@ -7860,7 +7955,7 @@ int ath10k_mac_register(struct ath10k *ar) if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) ar->hw->netdev_features = NETIF_F_HW_CSUM; - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { /* Init ath dfs pattern detector */ ar->ath_common.debug_mask = ATH_DBG_DFS; ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, @@ -7870,6 +7965,15 @@ int ath10k_mac_register(struct ath10k *ar) ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); } + /* Current wake_tx_queue implementation imposes a significant + * performance penalty in some setups. The tx scheduling code needs + * more work anyway so disable the wake_tx_queue unless firmware + * supports the pull-push mechanism. + */ + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, + ar->running_fw->fw_file.fw_features)) + ar->ops->wake_tx_queue = NULL; + ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, ath10k_reg_notifier); if (ret) { @@ -7899,7 +8003,7 @@ err_unregister: ieee80211_unregister_hw(ar->hw); err_dfs_detector_exit: - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) ar->dfs_detector->exit(ar->dfs_detector); err_free: @@ -7914,7 +8018,7 @@ void ath10k_mac_unregister(struct ath10k *ar) { ieee80211_unregister_hw(ar->hw); - if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) + if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) ar->dfs_detector->exit(ar->dfs_detector); kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index d2674ade4..b4eb1cd60 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -56,7 +56,10 @@ static const struct pci_device_id ath10k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */ + { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */ { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */ + { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */ {0} }; @@ -81,8 +84,14 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV }, + { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV }, + + { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV }, + { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV }, { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV }, + + { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV }, }; static void ath10k_pci_buffer_cleanup(struct ath10k *ar); @@ -837,13 +846,16 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS) & 0x7ff) << 21; break; + case ATH10K_HW_QCA9888: case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: case ATH10K_HW_QCA4019: val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); break; @@ -864,7 +876,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; u32 *buf; - unsigned int completed_nbytes, orig_nbytes, remaining_bytes; + unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; struct ath10k_ce_pipe *ce_diag; /* Host buffer address in CE space */ u32 ce_data; @@ -882,9 +894,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, * 1) 4-byte alignment * 2) Buffer in DMA-able space */ - orig_nbytes = nbytes; + alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT); + data_buf = (unsigned char *)dma_alloc_coherent(ar->dev, - orig_nbytes, + alloc_nbytes, &ce_data_base, GFP_ATOMIC); @@ -892,9 +905,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ret = -ENOMEM; goto done; } - memset(data_buf, 0, orig_nbytes); + memset(data_buf, 0, alloc_nbytes); - remaining_bytes = orig_nbytes; + remaining_bytes = nbytes; ce_data = ce_data_base; while (remaining_bytes) { nbytes = min_t(unsigned int, remaining_bytes, @@ -954,19 +967,22 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, } remaining_bytes -= nbytes; + + if (ret) { + ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", + address, ret); + break; + } + memcpy(data, data_buf, nbytes); + address += nbytes; - ce_data += nbytes; + data += nbytes; } done: - if (ret == 0) - memcpy(data, data_buf, orig_nbytes); - else - ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n", - address, ret); if (data_buf) - dma_free_coherent(ar->dev, orig_nbytes, data_buf, + dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); spin_unlock_bh(&ar_pci->ce_lock); @@ -1560,6 +1576,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -1569,6 +1586,8 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA9888: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI. @@ -1583,6 +1602,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) switch (ar->hw_rev) { case ATH10K_HW_QCA988X: + case ATH10K_HW_QCA9887: case ATH10K_HW_QCA6174: case ATH10K_HW_QCA9377: val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + @@ -1592,6 +1612,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA9984: + case ATH10K_HW_QCA9888: case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to unmask irq/MSI. @@ -1932,6 +1954,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) switch (ar_pci->pdev->device) { case QCA988X_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID: + case QCA9888_2_0_DEVICE_ID: + case QCA9984_1_0_DEVICE_ID: + case QCA9887_1_0_DEVICE_ID: return 1; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: @@ -2198,6 +2223,14 @@ static void ath10k_pci_fw_crashed_clear(struct ath10k *ar) ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val); } +static bool ath10k_pci_has_device_gone(struct ath10k *ar) +{ + u32 val; + + val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS); + return (val == 0xffffffff); +} + /* this function effectively clears target memory controller assert line */ static void ath10k_pci_warm_reset_si0(struct ath10k *ar) { @@ -2293,16 +2326,20 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) return 0; } +static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar) +{ + ath10k_pci_irq_disable(ar); + return ath10k_pci_qca99x0_chip_reset(ar); +} + static int ath10k_pci_safe_chip_reset(struct ath10k *ar) { - if (QCA_REV_988X(ar) || QCA_REV_6174(ar)) { - return ath10k_pci_warm_reset(ar); - } else if (QCA_REV_99X0(ar)) { - ath10k_pci_irq_disable(ar); - return ath10k_pci_qca99x0_chip_reset(ar); - } else { + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (!ar_pci->pci_soft_reset) return -ENOTSUPP; - } + + return ar_pci->pci_soft_reset(ar); } static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar) @@ -2437,16 +2474,12 @@ static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar) static int ath10k_pci_chip_reset(struct ath10k *ar) { - if (QCA_REV_988X(ar)) - return ath10k_pci_qca988x_chip_reset(ar); - else if (QCA_REV_6174(ar)) - return ath10k_pci_qca6174_chip_reset(ar); - else if (QCA_REV_9377(ar)) - return ath10k_pci_qca6174_chip_reset(ar); - else if (QCA_REV_99X0(ar)) - return ath10k_pci_qca99x0_chip_reset(ar); - else + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (WARN_ON(!ar_pci->pci_hard_reset)) return -ENOTSUPP; + + return ar_pci->pci_hard_reset(ar); } static int ath10k_pci_hif_power_up(struct ath10k *ar) @@ -2559,6 +2592,144 @@ static int ath10k_pci_hif_resume(struct ath10k *ar) } #endif +static bool ath10k_pci_validate_cal(void *data, size_t size) +{ + __le16 *cal_words = data; + u16 checksum = 0; + size_t i; + + if (size % 2 != 0) + return false; + + for (i = 0; i < size / 2; i++) + checksum ^= le16_to_cpu(cal_words[i]); + + return checksum == 0xffff; +} + +static void ath10k_pci_enable_eeprom(struct ath10k *ar) +{ + /* Enable SI clock */ + ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0); + + /* Configure GPIOs for I2C operation */ + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN, + SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG, + GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET + + 4 * QCA9887_1_0_SI_CLK_GPIO_PIN, + SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) | + SM(1, GPIO_PIN0_PAD_PULL)); + + ath10k_pci_write32(ar, + GPIO_BASE_ADDRESS + + QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS, + 1u << QCA9887_1_0_SI_CLK_GPIO_PIN); + + /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */ + ath10k_pci_write32(ar, + SI_BASE_ADDRESS + SI_CONFIG_OFFSET, + SM(1, SI_CONFIG_ERR_INT) | + SM(1, SI_CONFIG_BIDIR_OD_DATA) | + SM(1, SI_CONFIG_I2C) | + SM(1, SI_CONFIG_POS_SAMPLE) | + SM(1, SI_CONFIG_INACTIVE_DATA) | + SM(1, SI_CONFIG_INACTIVE_CLK) | + SM(8, SI_CONFIG_DIVIDER)); +} + +static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out) +{ + u32 reg; + int wait_limit; + + /* set device select byte and for the read operation */ + reg = QCA9887_EEPROM_SELECT_READ | + SM(addr, QCA9887_EEPROM_ADDR_LO) | + SM(addr >> 8, QCA9887_EEPROM_ADDR_HI); + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg); + + /* write transmit data, transfer length, and START bit */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, + SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) | + SM(4, SI_CS_TX_CNT)); + + /* wait max 1 sec */ + wait_limit = 100000; + + /* wait for SI_CS_DONE_INT */ + do { + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET); + if (MS(reg, SI_CS_DONE_INT)) + break; + + wait_limit--; + udelay(10); + } while (wait_limit > 0); + + if (!MS(reg, SI_CS_DONE_INT)) { + ath10k_err(ar, "timeout while reading device EEPROM at %04x\n", + addr); + return -ETIMEDOUT; + } + + /* clear SI_CS_DONE_INT */ + ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg); + + if (MS(reg, SI_CS_DONE_ERR)) { + ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr); + return -EIO; + } + + /* extract receive data */ + reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET); + *out = reg; + + return 0; +} + +static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data, + size_t *data_len) +{ + u8 *caldata = NULL; + size_t calsize, i; + int ret; + + if (!QCA_REV_9887(ar)) + return -EOPNOTSUPP; + + calsize = ar->hw_params.cal_data_len; + caldata = kmalloc(calsize, GFP_KERNEL); + if (!caldata) + return -ENOMEM; + + ath10k_pci_enable_eeprom(ar); + + for (i = 0; i < calsize; i++) { + ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]); + if (ret) + goto err_free; + } + + if (!ath10k_pci_validate_cal(caldata, calsize)) + goto err_free; + + *data = caldata; + *data_len = calsize; + + return 0; + +err_free: + kfree(data); + + return -EINVAL; +} + static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .tx_sg = ath10k_pci_hif_tx_sg, .diag_read = ath10k_pci_hif_diag_read, @@ -2578,6 +2749,7 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = { .suspend = ath10k_pci_hif_suspend, .resume = ath10k_pci_hif_resume, #endif + .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom, }; /* @@ -2591,6 +2763,9 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg) struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; + if (ath10k_pci_has_device_gone(ar)) + return IRQ_NONE; + ret = ath10k_pci_force_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret); @@ -2976,24 +3151,52 @@ static int ath10k_pci_probe(struct pci_dev *pdev, enum ath10k_hw_rev hw_rev; u32 chip_id; bool pci_ps; + int (*pci_soft_reset)(struct ath10k *ar); + int (*pci_hard_reset)(struct ath10k *ar); switch (pci_dev->device) { case QCA988X_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA988X; pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; + break; + case QCA9887_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9887; + pci_ps = false; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca988x_chip_reset; break; case QCA6164_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID: hw_rev = ATH10K_HW_QCA6174; pci_ps = true; + pci_soft_reset = ath10k_pci_warm_reset; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; break; case QCA99X0_2_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA99X0; pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + break; + case QCA9984_1_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9984; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; + break; + case QCA9888_2_0_DEVICE_ID: + hw_rev = ATH10K_HW_QCA9888; + pci_ps = false; + pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; + pci_hard_reset = ath10k_pci_qca99x0_chip_reset; break; case QCA9377_1_0_DEVICE_ID: hw_rev = ATH10K_HW_QCA9377; pci_ps = true; + pci_soft_reset = NULL; + pci_hard_reset = ath10k_pci_qca6174_chip_reset; break; default: WARN_ON(1); @@ -3018,6 +3221,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; ar_pci->bus_ops = &ath10k_pci_bus_ops; + ar_pci->pci_soft_reset = pci_soft_reset; + ar_pci->pci_hard_reset = pci_hard_reset; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; @@ -3169,12 +3374,15 @@ static void __exit ath10k_pci_exit(void) module_exit(ath10k_pci_exit); MODULE_AUTHOR("Qualcomm Atheros"); -MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices"); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices"); MODULE_LICENSE("Dual BSD/GPL"); /* QCA988x 2.0 firmware files */ /*(DEBLOBBED)*/ +/* QCA9887 1.0 firmware files */ +/*(DEBLOBBED)*/ + /* QCA6174 2.1 firmware files */ /*(DEBLOBBED)*/ diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 959dc321b..6eca1df2c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -234,6 +234,12 @@ struct ath10k_pci { const struct ath10k_bus_ops *bus_ops; + /* Chip specific pci reset routine used to do a safe reset */ + int (*pci_soft_reset)(struct ath10k *ar); + + /* Chip specific pci full reset function */ + int (*pci_hard_reset)(struct ath10k *ar); + /* Keep this entry in the last, memory for struct ath10k_ahb is * allocated (ahb support enabled case) in the continuation of * this struct. diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index ca8d16884..034e7a54c 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -656,26 +656,6 @@ struct rx_msdu_end { * Reserved: HW should fill with zero. FW should ignore. */ -#define RX_PPDU_START_SIG_RATE_SELECT_OFDM 0 -#define RX_PPDU_START_SIG_RATE_SELECT_CCK 1 - -#define RX_PPDU_START_SIG_RATE_OFDM_48 0 -#define RX_PPDU_START_SIG_RATE_OFDM_24 1 -#define RX_PPDU_START_SIG_RATE_OFDM_12 2 -#define RX_PPDU_START_SIG_RATE_OFDM_6 3 -#define RX_PPDU_START_SIG_RATE_OFDM_54 4 -#define RX_PPDU_START_SIG_RATE_OFDM_36 5 -#define RX_PPDU_START_SIG_RATE_OFDM_18 6 -#define RX_PPDU_START_SIG_RATE_OFDM_9 7 - -#define RX_PPDU_START_SIG_RATE_CCK_LP_11 0 -#define RX_PPDU_START_SIG_RATE_CCK_LP_5_5 1 -#define RX_PPDU_START_SIG_RATE_CCK_LP_2 2 -#define RX_PPDU_START_SIG_RATE_CCK_LP_1 3 -#define RX_PPDU_START_SIG_RATE_CCK_SP_11 4 -#define RX_PPDU_START_SIG_RATE_CCK_SP_5_5 5 -#define RX_PPDU_START_SIG_RATE_CCK_SP_2 6 - #define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04 #define HTT_RX_PPDU_START_PREAMBLE_HT 0x08 #define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09 @@ -711,25 +691,6 @@ struct rx_msdu_end { /* No idea what this flag means. It seems to be always set in rate. */ #define RX_PPDU_START_RATE_FLAG BIT(3) -enum rx_ppdu_start_rate { - RX_PPDU_START_RATE_OFDM_48M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_48M, - RX_PPDU_START_RATE_OFDM_24M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_24M, - RX_PPDU_START_RATE_OFDM_12M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_12M, - RX_PPDU_START_RATE_OFDM_6M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_6M, - RX_PPDU_START_RATE_OFDM_54M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_54M, - RX_PPDU_START_RATE_OFDM_36M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_36M, - RX_PPDU_START_RATE_OFDM_18M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_18M, - RX_PPDU_START_RATE_OFDM_9M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_OFDM_9M, - - RX_PPDU_START_RATE_CCK_LP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_11M, - RX_PPDU_START_RATE_CCK_LP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_5_5M, - RX_PPDU_START_RATE_CCK_LP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_2M, - RX_PPDU_START_RATE_CCK_LP_1M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_LP_1M, - RX_PPDU_START_RATE_CCK_SP_11M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_11M, - RX_PPDU_START_RATE_CCK_SP_5_5M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_5_5M, - RX_PPDU_START_RATE_CCK_SP_2M = RX_PPDU_START_RATE_FLAG | ATH10K_HW_RATE_CCK_SP_2M, -}; - struct rx_ppdu_start { struct { u8 pri20_mhz; @@ -994,7 +955,41 @@ struct rx_pkt_end { __le32 info0; /* %RX_PKT_END_INFO0_ */ __le32 phy_timestamp_1; __le32 phy_timestamp_2; - __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ +} __packed; + +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000 +#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000 +#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30 +#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14) +#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29) + +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c +#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2 +#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030 +#define RX_LOCATION_INFO1_PKT_BW_LSB 4 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00 +#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000 +#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000 +#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000 +#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000 +#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27 +#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0) +#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1) +#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7) +#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29) +#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30) +#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31) + +struct rx_location_info { + __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */ + __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */ } __packed; enum rx_phy_ppdu_end_info0 { @@ -1067,6 +1062,17 @@ struct rx_phy_ppdu_end { struct rx_ppdu_end_qca99x0 { struct rx_pkt_end rx_pkt_end; + __le32 rx_location_info; /* %RX_LOCATION_INFO_ */ + struct rx_phy_ppdu_end rx_phy_ppdu_end; + __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ + __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ + __le16 bb_length; + __le16 info1; /* %RX_PPDU_END_INFO1_ */ +} __packed; + +struct rx_ppdu_end_qca9984 { + struct rx_pkt_end rx_pkt_end; + struct rx_location_info rx_location_info; struct rx_phy_ppdu_end rx_phy_ppdu_end; __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */ @@ -1080,6 +1086,7 @@ struct rx_ppdu_end { struct rx_ppdu_end_qca988x qca988x; struct rx_ppdu_end_qca6174 qca6174; struct rx_ppdu_end_qca99x0 qca99x0; + struct rx_ppdu_end_qca9984 qca9984; } __packed; } __packed; diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c index 4671cfbcd..7d9b0da1b 100644 --- a/drivers/net/wireless/ath/ath10k/spectral.c +++ b/drivers/net/wireless/ath/ath10k/spectral.c @@ -101,9 +101,9 @@ int ath10k_spectral_process_fft(struct ath10k *ar, break; case 80: /* TODO: As experiments with an analogue sender and various - * configuaritions (fft-sizes of 64/128/256 and 20/40/80 Mhz) + * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz) * show, the particular configuration of 80 MHz/64 bins does - * not match with the other smaples at all. Until the reason + * not match with the other samples at all. Until the reason * for that is found, don't report these samples. */ if (bin_len == 64) diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 8e24099fa..aaf53a81e 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -447,6 +447,9 @@ Fw Mode/SubMode Mask #define QCA988X_BOARD_DATA_SZ 7168 #define QCA988X_BOARD_EXT_DATA_SZ 0 +#define QCA9887_BOARD_DATA_SZ 7168 +#define QCA9887_BOARD_EXT_DATA_SZ 0 + #define QCA6174_BOARD_DATA_SZ 8192 #define QCA6174_BOARD_EXT_DATA_SZ 0 diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c index 576e7c42e..b29a86a26 100644 --- a/drivers/net/wireless/ath/ath10k/txrx.c +++ b/drivers/net/wireless/ath/ath10k/txrx.c @@ -81,10 +81,11 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, skb_cb = ATH10K_SKB_CB(msdu); txq = skb_cb->txq; - artxq = (void *)txq->drv_priv; - if (txq) + if (txq) { + artxq = (void *)txq->drv_priv; artxq->num_fw_queued--; + } ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_dec_pending(htt); @@ -117,6 +118,9 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt, ieee80211_tx_status(htt->ar->hw, msdu); /* we do not own the msdu anymore */ + + ath10k_mac_tx_push_pending(ar); + return 0; } @@ -213,6 +217,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt, ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n", ev->vdev_id, ev->addr, ev->peer_id); + WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer)); ar->peer_map[ev->peer_id] = peer; set_bit(ev->peer_id, peer->peer_ids); exit: diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 2c300329e..d2462886b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1104,6 +1104,7 @@ static struct wmi_pdev_param_map wmi_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { @@ -1199,6 +1200,7 @@ static struct wmi_pdev_param_map wmi_10x_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { @@ -1294,6 +1296,7 @@ static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = { .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED, .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED, .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED, + .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED, }; /* firmware 10.2 specific mappings */ @@ -1550,6 +1553,7 @@ static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = { .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET, .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR, .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR, + .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, }; static const struct wmi_peer_flags_map wmi_peer_flags_map = { @@ -1822,7 +1826,7 @@ static struct sk_buff * ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) { struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu); - struct ath10k_vif *arvif = (void *)cb->vif->drv_priv; + struct ath10k_vif *arvif; struct wmi_mgmt_tx_cmd *cmd; struct ieee80211_hdr *hdr; struct sk_buff *skb; @@ -1834,10 +1838,12 @@ ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu) hdr = (struct ieee80211_hdr *)msdu->data; fc = le16_to_cpu(hdr->frame_control); - if (cb->vif) + if (cb->vif) { + arvif = (void *)cb->vif->drv_priv; vdev_id = arvif->vdev_id; - else + } else { vdev_id = 0; + } if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) return ERR_PTR(-EINVAL); @@ -2920,6 +2926,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, u32 num_pdev_ext_stats; u32 num_vdev_stats; u32 num_peer_stats; + u32 num_bcnflt_stats; u32 stats_id; int i; @@ -2930,6 +2937,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats); num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); num_peer_stats = __le32_to_cpu(ev->num_peer_stats); + num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); stats_id = __le32_to_cpu(ev->stats_id); for (i = 0; i < num_pdev_stats; i++) { @@ -2970,32 +2978,57 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar, /* fw doesn't implement vdev stats */ for (i = 0; i < num_peer_stats; i++) { - const struct wmi_10_4_peer_extd_stats *src; + const struct wmi_10_4_peer_stats *src; struct ath10k_fw_stats_peer *dst; - int stats_len; - bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD); - - if (extd_peer_stats) - stats_len = sizeof(struct wmi_10_4_peer_extd_stats); - else - stats_len = sizeof(struct wmi_10_4_peer_stats); src = (void *)skb->data; - if (!skb_pull(skb, stats_len)) + if (!skb_pull(skb, sizeof(*src))) return -EPROTO; dst = kzalloc(sizeof(*dst), GFP_ATOMIC); if (!dst) continue; - ath10k_wmi_10_4_pull_peer_stats(&src->common, dst); - /* FIXME: expose 10.4 specific values */ - if (extd_peer_stats) - dst->rx_duration = __le32_to_cpu(src->rx_duration); - + ath10k_wmi_10_4_pull_peer_stats(src, dst); list_add_tail(&dst->list, &stats->peers); } + for (i = 0; i < num_bcnflt_stats; i++) { + const struct wmi_10_4_bss_bcn_filter_stats *src; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + /* FIXME: expose values to userspace + * + * Note: Even though this loop seems to do nothing it is + * required to parse following sub-structures properly. + */ + } + + if ((stats_id & WMI_10_4_STAT_PEER_EXTD) == 0) + return 0; + + stats->extended = true; + + for (i = 0; i < num_peer_stats; i++) { + const struct wmi_10_4_peer_extd_stats *src; + struct ath10k_fw_extd_stats_peer *dst; + + src = (void *)skb->data; + if (!skb_pull(skb, sizeof(*src))) + return -EPROTO; + + dst = kzalloc(sizeof(*dst), GFP_ATOMIC); + if (!dst) + continue; + + ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr); + dst->rx_duration = __le32_to_cpu(src->rx_duration); + list_add_tail(&dst->list, &stats->peers_extd); + } + return 0; } @@ -3671,7 +3704,7 @@ void ath10k_wmi_event_dfs(struct ath10k *ar, phyerr->tsf_timestamp, tsf, buf_len); /* Skip event if DFS disabled */ - if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) + if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) return; ATH10K_DFS_STAT_INC(ar, pulses_total); @@ -5253,6 +5286,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb) case WMI_10_4_PEER_STA_KICKOUT_EVENTID: ath10k_wmi_event_peer_sta_kickout(ar, skb); break; + case WMI_10_4_ROAM_EVENTID: + ath10k_wmi_event_roam(ar, skb); + break; case WMI_10_4_HOST_SWBA_EVENTID: ath10k_wmi_event_host_swba(ar, skb); break; @@ -7899,6 +7935,7 @@ static const struct wmi_ops wmi_10_4_ops = { .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev, .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, + .pull_roam_ev = ath10k_wmi_op_pull_roam_ev, .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme, .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 9fdf47ea2..3ef468893 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3447,6 +3447,7 @@ struct wmi_pdev_param_map { u32 wapi_mbssid_offset; u32 arp_srcaddr; u32 arp_dstaddr; + u32 enable_btcoex; }; #define WMI_PDEV_PARAM_UNSUPPORTED 0 @@ -3760,6 +3761,9 @@ enum wmi_10_4_pdev_param { WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH, WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR, WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE, + WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE, + WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY, + WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX, }; struct wmi_pdev_set_param_cmd { @@ -4336,7 +4340,6 @@ struct wmi_10_4_peer_stats { } __packed; struct wmi_10_4_peer_extd_stats { - struct wmi_10_4_peer_stats common; struct wmi_mac_addr peer_macaddr; __le32 inactive_time; __le32 peer_chain_rssi; @@ -4344,6 +4347,19 @@ struct wmi_10_4_peer_extd_stats { __le32 reserved[10]; } __packed; +struct wmi_10_4_bss_bcn_stats { + __le32 vdev_id; + __le32 bss_bcns_dropped; + __le32 bss_bcn_delivered; +} __packed; + +struct wmi_10_4_bss_bcn_filter_stats { + __le32 bcns_dropped; + __le32 bcns_delivered; + __le32 active_filters; + struct wmi_10_4_bss_bcn_stats bss_stats; +} __packed; + struct wmi_10_2_pdev_ext_stats { __le32 rx_rssi_comb; __le32 rx_rssi[4]; diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c index fc47b7098..f23c85176 100644 --- a/drivers/net/wireless/ath/ath5k/pcu.c +++ b/drivers/net/wireless/ath/ath5k/pcu.c @@ -219,8 +219,8 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah) sifs = AR5K_INIT_SIFS_QUARTER_RATE; break; case AR5K_BWMODE_DEFAULT: - sifs = AR5K_INIT_SIFS_DEFAULT_BG; default: + sifs = AR5K_INIT_SIFS_DEFAULT_BG; if (channel->band == NL80211_BAND_5GHZ) sifs = AR5K_INIT_SIFS_DEFAULT_A; break; diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index 4e11ba06f..72e2ec677 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -847,8 +847,6 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy, up(&ar->sem); - vif->sme_state = SME_DISCONNECTED; - return 0; } @@ -859,7 +857,11 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason, struct ath6kl *ar = vif->ar; if (vif->scan_req) { - cfg80211_scan_done(vif->scan_req, true); + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(vif->scan_req, &info); vif->scan_req = NULL; } @@ -1069,6 +1071,9 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) { struct ath6kl *ar = vif->ar; + struct cfg80211_scan_info info = { + .aborted = aborted, + }; int i; ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__, @@ -1089,7 +1094,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) } out: - cfg80211_scan_done(vif->scan_req, aborted); + cfg80211_scan_done(vif->scan_req, &info); vif->scan_req = NULL; } @@ -1104,7 +1109,8 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq, cfg80211_chandef_create(&chandef, ieee80211_get_channel(vif->ar->wiphy, freq), - (mode == WMI_11G_HT20) ? + (mode == WMI_11G_HT20 && + ath6kl_band_2ghz.ht_cap.ht_supported) ? NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT); mutex_lock(&vif->wdev.mtx); @@ -2971,6 +2977,7 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev) ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); clear_bit(CONNECTED, &vif->flags); + netif_carrier_off(vif->ndev); /* Restore ht setting in firmware */ return ath6kl_restore_htcap(vif); @@ -3614,7 +3621,11 @@ void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready) } if (vif->scan_req) { - cfg80211_scan_done(vif->scan_req, true); + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(vif->scan_req, &info); vif->scan_req = NULL; } @@ -3870,7 +3881,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar) BIT(NL80211_IFTYPE_P2P_CLIENT); } - if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) && + if (IS_ENABLED(CONFIG_ATH6KL_REGDOMAIN) && test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) { wiphy->reg_notifier = ath6kl_cfg80211_reg_notify; ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS; diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h index 7067b87d0..0bf4592a3 100644 --- a/drivers/net/wireless/ath/ath6kl/core.h +++ b/drivers/net/wireless/ath/ath6kl/core.h @@ -148,7 +148,7 @@ enum ath6kl_fw_capability { /* ratetable is the 2 stream version (max MCS15) */ ATH6KL_FW_CAPABILITY_RATETABLE_MCS15, - /* firmare doesn't support IP checksumming */ + /* firmware doesn't support IP checksumming */ ATH6KL_FW_CAPABILITY_NO_IP_CHECKSUM, /* this needs to be last */ diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c index 40432fe7a..9df41d5e3 100644 --- a/drivers/net/wireless/ath/ath6kl/txrx.c +++ b/drivers/net/wireless/ath/ath6kl/txrx.c @@ -1401,6 +1401,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) return; } + pad_before_data_start = + (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT) + & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK; + /* Get the Power save state of the STA */ if (vif->nw_type == AP_NETWORK) { meta_type = wmi_data_hdr_get_meta(dhdr); @@ -1408,7 +1412,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) & WMI_DATA_HDR_PS_MASK); - offset = sizeof(struct wmi_data_hdr); + offset = sizeof(struct wmi_data_hdr) + pad_before_data_start; trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG); switch (meta_type) { @@ -1523,9 +1527,6 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet) seq_no = wmi_data_hdr_get_seqno(dhdr); meta_type = wmi_data_hdr_get_meta(dhdr); dot11_hdr = wmi_data_hdr_get_dot11(dhdr); - pad_before_data_start = - (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT) - & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK; skb_pull(skb, sizeof(struct wmi_data_hdr)); diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c index 631c3a0c5..b8cf04d11 100644 --- a/drivers/net/wireless/ath/ath6kl/wmi.c +++ b/drivers/net/wireless/ath/ath6kl/wmi.c @@ -2544,8 +2544,7 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx, s32 nominal_phy = 0; int ret; - if (!((params->user_pri < 8) && - (params->user_pri <= 0x7) && + if (!((params->user_pri <= 0x7) && (up_to_ac[params->user_pri & 0x7] == params->traffic_class) && (params->traffic_direc == UPLINK_TRAFFIC || params->traffic_direc == DNLINK_TRAFFIC || diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index bd4a1a655..bea6186f7 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -18,7 +18,6 @@ #include <linux/nl80211.h> #include <linux/platform_device.h> -#include <linux/ath9k_platform.h> #include <linux/module.h> #include "ath9k.h" @@ -58,20 +57,9 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz) static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data) { - struct ath_softc *sc = (struct ath_softc *)common->priv; - struct platform_device *pdev = to_platform_device(sc->dev); - struct ath9k_platform_data *pdata; - - pdata = dev_get_platdata(&pdev->dev); - if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { - ath_err(common, - "%s: flash read failed, offset %08x is out of range\n", - __func__, off); - return false; - } - - *data = pdata->eeprom_data[off]; - return true; + ath_err(common, "%s: eeprom data has to be provided externally\n", + __func__); + return false; } static struct ath_bus_ops ath_ahb_bus_ops = { diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 53d7445a5..61a9b8504 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -476,6 +476,7 @@ static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable) static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, struct ath_spec_scan *param) { + u32 repeat_bit; u8 count; if (!param->enabled) { @@ -486,12 +487,15 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, REG_SET_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_FFT_ENA); REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_ENABLE); + if (AR_SREV_9280(ah)) + repeat_bit = AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT; + else + repeat_bit = AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_KIWI; + if (param->short_repeat) - REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, - AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT); + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, repeat_bit); else - REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, - AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT); + REG_CLR_BIT(ah, AR_PHY_SPECTRAL_SCAN, repeat_bit); /* on AR92xx, the highest bit of count will make the the chip send * spectral samples endlessly. Check if this really was intended, @@ -499,15 +503,25 @@ static void ar9002_hw_spectral_scan_config(struct ath_hw *ah, */ count = param->count; if (param->endless) { - if (AR_SREV_9271(ah)) - count = 0; - else + if (AR_SREV_9280(ah)) count = 0x80; + else + count = 0; } else if (count & 0x80) count = 0x7f; + else if (!count) + count = 1; + + if (AR_SREV_9280(ah)) { + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_COUNT, count); + } else { + REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_COUNT_KIWI, count); + REG_SET_BIT(ah, AR_PHY_SPECTRAL_SCAN, + AR_PHY_SPECTRAL_SCAN_PHYERR_MASK_SELECT); + } - REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, - AR_PHY_SPECTRAL_SCAN_COUNT, count); REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, AR_PHY_SPECTRAL_SCAN_PERIOD, param->period); REG_RMW_FIELD(ah, AR_PHY_SPECTRAL_SCAN, diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h index 9d17a5375..2b58245f7 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h @@ -177,8 +177,11 @@ #define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8 #define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/ #define AR_PHY_SPECTRAL_SCAN_COUNT_S 16 +#define AR_PHY_SPECTRAL_SCAN_COUNT_KIWI 0x0FFF0000 /* Number of reports, reg 68, bits 16-27*/ +#define AR_PHY_SPECTRAL_SCAN_COUNT_KIWI_S 16 #define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/ -#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/ +#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_KIWI 0x10000000 /* Short repeat, reg 68, bit 28*/ +#define AR_PHY_SPECTRAL_SCAN_PHYERR_MASK_SELECT 0x40000000 #define AR_PHY_RX_DELAY 0x9914 #define AR_PHY_SEARCH_START_DELAY 0x9918 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 518e649ec..b6f064a8d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -33,6 +33,7 @@ struct coeff { enum ar9003_cal_types { IQ_MISMATCH_CAL = BIT(0), + TEMP_COMP_CAL = BIT(1), }; static void ar9003_hw_setup_calibration(struct ath_hw *ah, @@ -58,6 +59,12 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah, /* Kick-off cal */ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); break; + case TEMP_COMP_CAL: + ath_dbg(common, CALIBRATE, + "starting Temperature Compensation Calibration\n"); + REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL); + REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START); + break; default: ath_err(common, "Invalid calibration type\n"); break; @@ -75,50 +82,51 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah, struct ath9k_cal_list *currCal) { struct ath9k_hw_cal_data *caldata = ah->caldata; - /* Cal is assumed not done until explicitly set below */ - bool iscaldone = false; + const struct ath9k_percal_data *cur_caldata = currCal->calData; /* Calibration in progress. */ if (currCal->calState == CAL_RUNNING) { /* Check to see if it has finished. */ - if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) { + if (REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL) + return false; + + /* + * Accumulate cal measures for active chains + */ + if (cur_caldata->calCollect) + cur_caldata->calCollect(ah); + ah->cal_samples++; + + if (ah->cal_samples >= cur_caldata->calNumSamples) { + unsigned int i, numChains = 0; + for (i = 0; i < AR9300_MAX_CHAINS; i++) { + if (rxchainmask & (1 << i)) + numChains++; + } + /* - * Accumulate cal measures for active chains + * Process accumulated data */ - currCal->calData->calCollect(ah); - ah->cal_samples++; - - if (ah->cal_samples >= - currCal->calData->calNumSamples) { - unsigned int i, numChains = 0; - for (i = 0; i < AR9300_MAX_CHAINS; i++) { - if (rxchainmask & (1 << i)) - numChains++; - } - - /* - * Process accumulated data - */ - currCal->calData->calPostProc(ah, numChains); + if (cur_caldata->calPostProc) + cur_caldata->calPostProc(ah, numChains); - /* Calibration has finished. */ - caldata->CalValid |= currCal->calData->calType; - currCal->calState = CAL_DONE; - iscaldone = true; - } else { + /* Calibration has finished. */ + caldata->CalValid |= cur_caldata->calType; + currCal->calState = CAL_DONE; + return true; + } else { /* * Set-up collection of another sub-sample until we * get desired number */ ar9003_hw_setup_calibration(ah, currCal); - } } - } else if (!(caldata->CalValid & currCal->calData->calType)) { + } else if (!(caldata->CalValid & cur_caldata->calType)) { /* If current cal is marked invalid in channel, kick it off */ ath9k_hw_reset_calibration(ah, currCal); } - return iscaldone; + return false; } static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, @@ -315,9 +323,16 @@ static const struct ath9k_percal_data iq_cal_single_sample = { ar9003_hw_iqcalibrate }; +static const struct ath9k_percal_data temp_cal_single_sample = { + TEMP_COMP_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, +}; + static void ar9003_hw_init_cal_settings(struct ath_hw *ah) { ah->iq_caldata.calData = &iq_cal_single_sample; + ah->temp_caldata.calData = &temp_cal_single_sample; if (AR_SREV_9300_20_OR_LATER(ah)) { ah->enabled_cals |= TX_IQ_CAL; @@ -325,7 +340,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah) ah->enabled_cals |= TX_IQ_ON_AGC_CAL; } - ah->supp_cals = IQ_MISMATCH_CAL; + ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL; } #define OFF_UPPER_LT 24 @@ -1374,6 +1389,29 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable) } } +static void ar9003_hw_init_cal_common(struct ath_hw *ah) +{ + struct ath9k_hw_cal_data *caldata = ah->caldata; + + /* Initialize list pointers */ + ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; + + INIT_CAL(&ah->iq_caldata); + INSERT_CAL(ah, &ah->iq_caldata); + + INIT_CAL(&ah->temp_caldata); + INSERT_CAL(ah, &ah->temp_caldata); + + /* Initialize current pointer to first element in list */ + ah->cal_list_curr = ah->cal_list; + + if (ah->cal_list_curr) + ath9k_hw_reset_calibration(ah, ah->cal_list_curr); + + if (caldata) + caldata->CalValid = 0; +} + static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah, struct ath9k_channel *chan) { @@ -1533,21 +1571,7 @@ skip_tx_iqcal: /* Revert chainmask to runtime parameters */ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); - /* Initialize list pointers */ - ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; - - INIT_CAL(&ah->iq_caldata); - INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); - - /* Initialize current pointer to first element in list */ - ah->cal_list_curr = ah->cal_list; - - if (ah->cal_list_curr) - ath9k_hw_reset_calibration(ah, ah->cal_list_curr); - - if (caldata) - caldata->CalValid = 0; + ar9003_hw_init_cal_common(ah); return true; } @@ -1578,8 +1602,6 @@ static bool do_ar9003_agc_cal(struct ath_hw *ah) static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, struct ath9k_channel *chan) { - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_cal_data *caldata = ah->caldata; bool txiqcal_done = false; bool status = true; bool run_agc_cal = false, sep_iq_cal = false; @@ -1677,21 +1699,7 @@ skip_tx_iqcal: /* Revert chainmask to runtime parameters */ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); - /* Initialize list pointers */ - ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; - - INIT_CAL(&ah->iq_caldata); - INSERT_CAL(ah, &ah->iq_caldata); - ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n"); - - /* Initialize current pointer to first element in list */ - ah->cal_list_curr = ah->cal_list; - - if (ah->cal_list_curr) - ath9k_hw_reset_calibration(ah, ah->cal_list_curr); - - if (caldata) - caldata->CalValid = 0; + ar9003_hw_init_cal_common(ah); return true; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index dec1a317a..5bd2cbaf5 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -3202,8 +3202,7 @@ static int ar9300_compress_decision(struct ath_hw *ah, it, length); break; case _CompressBlock: - if (reference == 0) { - } else { + if (reference != 0) { eep = ar9003_eeprom_struct_find_by_id(reference); if (eep == NULL) { ath_dbg(common, EEPROM, @@ -4176,7 +4175,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah, if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9531(ah)) ar9003_hw_internal_regulator_apply(ah); ar9003_hw_apply_tuning_caps(ah); - ar9003_hw_apply_minccapwr_thresh(ah, chan); + ar9003_hw_apply_minccapwr_thresh(ah, is2ghz); ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz); ar9003_hw_thermometer_apply(ah); ar9003_hw_thermo_cal_apply(ah); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 566da789f..a171dbb29 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -689,13 +689,6 @@ #define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300) #define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8) -#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \ - ((AR_SREV_9485(ah) ? 0x1628c : 0x16294))) -#define AR_CH0_THERM_XPABIASLVL_MSB 0x3 -#define AR_CH0_THERM_XPABIASLVL_MSB_S 0 -#define AR_CH0_THERM_XPASHORT2GND 0x4 -#define AR_CH0_THERM_XPASHORT2GND_S 2 - #define AR_SWITCH_TABLE_COM_ALL (0xffff) #define AR_SWITCH_TABLE_COM_ALL_S (0) #define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff) @@ -712,15 +705,17 @@ #define AR_SWITCH_TABLE_ALL (0xfff) #define AR_SWITCH_TABLE_ALL_S (0) -#define AR_PHY_65NM_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ - ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c)) +#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\ + ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c)) +#define AR_CH0_THERM_XPABIASLVL_MSB 0x3 +#define AR_CH0_THERM_XPABIASLVL_MSB_S 0 +#define AR_CH0_THERM_XPASHORT2GND 0x4 +#define AR_CH0_THERM_XPASHORT2GND_S 2 -#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 -#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 -#define AR_PHY_65NM_CH0_THERM_START 0x20000000 -#define AR_PHY_65NM_CH0_THERM_START_S 29 -#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00 -#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8 +#define AR_CH0_THERM_LOCAL 0x80000000 +#define AR_CH0_THERM_START 0x20000000 +#define AR_CH0_THERM_SAR_ADC_OUT 0x0000ff00 +#define AR_CH0_THERM_SAR_ADC_OUT_S 8 #define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \ (AR_SREV_9462(ah) ? 0x16290 : 0x16284)) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 93b3793cc..26fc8ecfe 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -637,6 +637,8 @@ struct ath9k_vif_iter_data { int nwds; /* number of WDS vifs */ int nadhocs; /* number of adhoc vifs */ int nocbs; /* number of OCB vifs */ + int nbcnvifs; /* number of beaconing vifs */ + struct ieee80211_vif *primary_beacon_vif; struct ieee80211_vif *primary_sta; }; @@ -685,10 +687,11 @@ struct ath_beacon { }; void ath9k_beacon_tasklet(unsigned long data); -void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, - u32 changed); +void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif, + bool beacons); void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif); void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif); +void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc); void ath9k_set_beacon(struct ath_softc *sc); bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif); void ath9k_csa_update(struct ath_softc *sc); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 5cf0cd7cb..e36f947e1 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -50,7 +50,7 @@ static void ath9k_beaconq_config(struct ath_softc *sc) txq = sc->tx.txq_map[IEEE80211_AC_BE]; ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be); qi.tqi_aifs = qi_be.tqi_aifs; - if (ah->slottime == ATH9K_SLOT_TIME_20) + if (ah->slottime == 20) qi.tqi_cwmin = 2*qi_be.tqi_cwmin; else qi.tqi_cwmin = 4*qi_be.tqi_cwmin; @@ -209,7 +209,6 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif) } sc->beacon.bslot[avp->av_bslot] = vif; - sc->nbcnvifs++; ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n", avp->av_bslot); @@ -220,15 +219,12 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif) struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; struct ath_buf *bf = avp->av_bcbuf; - struct ath_beacon_config *cur_conf = &sc->cur_chan->beacon; ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n", avp->av_bslot); tasklet_disable(&sc->bcon_tasklet); - cur_conf->enable_beacon &= ~BIT(avp->av_bslot); - if (bf && bf->bf_mpdu) { struct sk_buff *skb = bf->bf_mpdu; dma_unmap_single(sc->dev, bf->bf_buf_addr, @@ -240,12 +236,73 @@ void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif) avp->av_bcbuf = NULL; sc->beacon.bslot[avp->av_bslot] = NULL; - sc->nbcnvifs--; list_add_tail(&bf->list, &sc->beacon.bbuf); tasklet_enable(&sc->bcon_tasklet); } +void ath9k_beacon_ensure_primary_slot(struct ath_softc *sc) +{ + struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct ieee80211_vif *vif; + struct ath_vif *avp; + s64 tsfadjust; + u32 offset; + int first_slot = ATH_BCBUF; + int slot; + + tasklet_disable(&sc->bcon_tasklet); + + /* Find first taken slot. */ + for (slot = 0; slot < ATH_BCBUF; slot++) { + if (sc->beacon.bslot[slot]) { + first_slot = slot; + break; + } + } + if (first_slot == 0) + goto out; + + /* Re-enumarate all slots, moving them forward. */ + for (slot = 0; slot < ATH_BCBUF; slot++) { + if (slot + first_slot < ATH_BCBUF) { + vif = sc->beacon.bslot[slot + first_slot]; + sc->beacon.bslot[slot] = vif; + + if (vif) { + avp = (void *)vif->drv_priv; + avp->av_bslot = slot; + } + } else { + sc->beacon.bslot[slot] = NULL; + } + } + + vif = sc->beacon.bslot[0]; + if (WARN_ON(!vif)) + goto out; + + /* Get the tsf_adjust value for the new first slot. */ + avp = (void *)vif->drv_priv; + tsfadjust = le64_to_cpu(avp->tsf_adjust); + + ath_dbg(common, CONFIG, + "Adjusting global TSF after beacon slot reassignment: %lld\n", + (signed long long)tsfadjust); + + /* Modify TSF as required and update the HW. */ + avp->chanctx->tsf_val += tsfadjust; + if (sc->cur_chan == avp->chanctx) { + offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL); + ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset); + } + + /* The slots tsf_adjust will be updated by ath9k_beacon_config later. */ + +out: + tasklet_enable(&sc->bcon_tasklet); +} + static int ath9k_beacon_choose_slot(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -274,22 +331,33 @@ static int ath9k_beacon_choose_slot(struct ath_softc *sc) return slot; } -static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif) +static void ath9k_set_tsfadjust(struct ath_softc *sc, + struct ath_beacon_config *cur_conf) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_vif *avp = (void *)vif->drv_priv; - struct ath_beacon_config *cur_conf = &avp->chanctx->beacon; - u32 tsfadjust; + s64 tsfadjust; + int slot; - if (avp->av_bslot == 0) - return; + for (slot = 0; slot < ATH_BCBUF; slot++) { + struct ath_vif *avp; - tsfadjust = cur_conf->beacon_interval * avp->av_bslot; - tsfadjust = TU_TO_USEC(tsfadjust) / ATH_BCBUF; - avp->tsf_adjust = cpu_to_le64(tsfadjust); + if (!sc->beacon.bslot[slot]) + continue; - ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n", - (unsigned long long)tsfadjust, avp->av_bslot); + avp = (void *)sc->beacon.bslot[slot]->drv_priv; + + /* tsf_adjust is added to the TSF value. We send out the + * beacon late, so need to adjust the TSF starting point to be + * later in time (i.e. the theoretical first beacon has a TSF + * of 0 after correction). + */ + tsfadjust = cur_conf->beacon_interval * avp->av_bslot; + tsfadjust = -TU_TO_USEC(tsfadjust) / ATH_BCBUF; + avp->tsf_adjust = cpu_to_le64(tsfadjust); + + ath_dbg(common, CONFIG, "tsfadjust is: %lld for bslot: %d\n", + (signed long long)tsfadjust, avp->av_bslot); + } } bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif) @@ -443,20 +511,28 @@ void ath9k_beacon_tasklet(unsigned long data) * Both nexttbtt and intval have to be in usecs. */ static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, - u32 intval, bool reset_tsf) + u32 intval) { struct ath_hw *ah = sc->sc_ah; ath9k_hw_disable_interrupts(ah); - if (reset_tsf) - ath9k_hw_reset_tsf(ah); ath9k_beaconq_config(sc); ath9k_hw_beaconinit(ah, nexttbtt, intval); + ah->imask |= ATH9K_INT_SWBA; sc->beacon.bmisscnt = 0; ath9k_hw_set_interrupts(ah); ath9k_hw_enable_interrupts(ah); } +static void ath9k_beacon_stop(struct ath_softc *sc) +{ + ath9k_hw_disable_interrupts(sc->sc_ah); + sc->sc_ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); + sc->beacon.bmisscnt = 0; + ath9k_hw_set_interrupts(sc->sc_ah); + ath9k_hw_enable_interrupts(sc->sc_ah); +} + /* * For multi-bss ap support beacons are either staggered evenly over N slots or * burst together. For the former arrange for the SWBA to be delivered for each @@ -468,7 +544,7 @@ static void ath9k_beacon_config_ap(struct ath_softc *sc, struct ath_hw *ah = sc->sc_ah; ath9k_cmn_beacon_config_ap(ah, conf, ATH_BCBUF); - ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, false); + ath9k_beacon_init(sc, conf->nexttbtt, conf->intval); } static void ath9k_beacon_config_sta(struct ath_hw *ah, @@ -497,7 +573,7 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc, ath9k_cmn_beacon_config_adhoc(ah, conf); - ath9k_beacon_init(sc, conf->nexttbtt, conf->intval, conf->ibss_creator); + ath9k_beacon_init(sc, conf->nexttbtt, conf->intval); /* * Set the global 'beacon has been configured' flag for the @@ -507,44 +583,6 @@ static void ath9k_beacon_config_adhoc(struct ath_softc *sc, set_bit(ATH_OP_BEACONS, &common->op_flags); } -static bool ath9k_allow_beacon_config(struct ath_softc *sc, - struct ieee80211_vif *vif) -{ - struct ath_common *common = ath9k_hw_common(sc->sc_ah); - struct ath_vif *avp = (void *)vif->drv_priv; - - if (ath9k_is_chanctx_enabled()) { - /* - * If the VIF is not present in the current channel context, - * then we can't do the usual opmode checks. Allow the - * beacon config for the VIF to be updated in this case and - * return immediately. - */ - if (sc->cur_chan != avp->chanctx) - return true; - } - - if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { - if (vif->type != NL80211_IFTYPE_AP) { - ath_dbg(common, CONFIG, - "An AP interface is already present !\n"); - return false; - } - } - - if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) { - if ((vif->type == NL80211_IFTYPE_STATION) && - test_bit(ATH_OP_BEACONS, &common->op_flags) && - vif != sc->cur_chan->primary_sta) { - ath_dbg(common, CONFIG, - "Beacon already configured for a station interface\n"); - return false; - } - } - - return true; -} - static void ath9k_cache_beacon_config(struct ath_softc *sc, struct ath_chanctx *ctx, struct ieee80211_bss_conf *bss_conf) @@ -580,87 +618,79 @@ static void ath9k_cache_beacon_config(struct ath_softc *sc, if (cur_conf->dtim_period == 0) cur_conf->dtim_period = 1; + ath9k_set_tsfadjust(sc, cur_conf); } -void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif, - u32 changed) +void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif, + bool beacons) { - struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - struct ath_hw *ah = sc->sc_ah; - struct ath_common *common = ath9k_hw_common(ah); - struct ath_vif *avp = (void *)vif->drv_priv; - struct ath_chanctx *ctx = avp->chanctx; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); + struct ath_vif *avp; + struct ath_chanctx *ctx; struct ath_beacon_config *cur_conf; unsigned long flags; + bool enabled; bool skip_beacon = false; - if (!ctx) + if (!beacons) { + clear_bit(ATH_OP_BEACONS, &common->op_flags); + ath9k_beacon_stop(sc); return; + } - cur_conf = &avp->chanctx->beacon; - if (vif->type == NL80211_IFTYPE_AP) - ath9k_set_tsfadjust(sc, vif); - - if (!ath9k_allow_beacon_config(sc, vif)) + if (WARN_ON(!main_vif)) return; - if (vif->type == NL80211_IFTYPE_STATION) { - ath9k_cache_beacon_config(sc, ctx, bss_conf); - if (ctx != sc->cur_chan) - return; + avp = (void *)main_vif->drv_priv; + ctx = avp->chanctx; + cur_conf = &ctx->beacon; + enabled = cur_conf->enable_beacon; + cur_conf->enable_beacon = beacons; + + if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) { + ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf); ath9k_set_beacon(sc); set_bit(ATH_OP_BEACONS, &common->op_flags); return; } - /* - * Take care of multiple interfaces when - * enabling/disabling SWBA. - */ - if (changed & BSS_CHANGED_BEACON_ENABLED) { - bool enabled = cur_conf->enable_beacon; - - if (!bss_conf->enable_beacon) { - cur_conf->enable_beacon &= ~BIT(avp->av_bslot); - } else { - cur_conf->enable_beacon |= BIT(avp->av_bslot); - if (!enabled) - ath9k_cache_beacon_config(sc, ctx, bss_conf); - } - } - - if (ctx != sc->cur_chan) - return; + /* Update the beacon configuration. */ + ath9k_cache_beacon_config(sc, ctx, &main_vif->bss_conf); /* * Configure the HW beacon registers only when we have a valid * beacon interval. */ if (cur_conf->beacon_interval) { - /* - * If we are joining an existing IBSS network, start beaconing - * only after a TSF-sync has taken place. Ensure that this - * happens by setting the appropriate flags. + /* Special case to sync the TSF when joining an existing IBSS. + * This is only done if no AP interface is active. + * Note that mac80211 always resets the TSF when creating a new + * IBSS interface. */ - if ((changed & BSS_CHANGED_IBSS) && !bss_conf->ibss_creator && - bss_conf->enable_beacon) { + if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC && + !enabled && beacons && !main_vif->bss_conf.ibss_creator) { spin_lock_irqsave(&sc->sc_pm_lock, flags); sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; spin_unlock_irqrestore(&sc->sc_pm_lock, flags); skip_beacon = true; - } else { - ath9k_set_beacon(sc); } /* * Do not set the ATH_OP_BEACONS flag for IBSS joiner mode * here, it is done in ath9k_beacon_config_adhoc(). */ - if (cur_conf->enable_beacon && !skip_beacon) + if (beacons && !skip_beacon) { set_bit(ATH_OP_BEACONS, &common->op_flags); - else + ath9k_set_beacon(sc); + } else { clear_bit(ATH_OP_BEACONS, &common->op_flags); + ath9k_beacon_stop(sc); + } + } else { + clear_bit(ATH_OP_BEACONS, &common->op_flags); + ath9k_beacon_stop(sc); } } diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index e56bafcf5..57e26a640 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -960,6 +960,9 @@ void ath_roc_complete(struct ath_softc *sc, enum ath_roc_complete_reason reason) void ath_scan_complete(struct ath_softc *sc, bool abort) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); + struct cfg80211_scan_info info = { + .aborted = abort, + }; if (abort) ath_dbg(common, CHAN_CTX, "HW scan aborted\n"); @@ -969,7 +972,7 @@ void ath_scan_complete(struct ath_softc *sc, bool abort) sc->offchannel.scan_req = NULL; sc->offchannel.scan_vif = NULL; sc->offchannel.state = ATH_OFFCHANNEL_IDLE; - ieee80211_scan_completed(sc->hw, abort); + ieee80211_scan_completed(sc->hw, &info); clear_bit(ATH_OP_SCANNING, &common->op_flags); spin_lock_bh(&sc->chan_lock); if (test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags)) diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c index a8762711a..e2512d5bc 100644 --- a/drivers/net/wireless/ath/ath9k/common-spectral.c +++ b/drivers/net/wireless/ath/ath9k/common-spectral.c @@ -731,7 +731,7 @@ void ath9k_cmn_spectral_scan_trigger(struct ath_common *common, struct ath_hw *ah = spec_priv->ah; u32 rxfilter; - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return; if (!ath9k_hw_ops(ah)->spectral_scan_trigger) { @@ -806,7 +806,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, char buf[32]; ssize_t len; - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return -EOPNOTSUPP; len = min(count, sizeof(buf) - 1); @@ -1072,7 +1072,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) { - if (config_enabled(CONFIG_ATH9K_DEBUGFS)) { + if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) { relay_close(spec_priv->rfs_chan_spec_scan); spec_priv->rfs_chan_spec_scan = NULL; } diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index d23737342..f0ab6f995 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -50,6 +50,7 @@ #define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) struct ath_beacon_config { + struct ieee80211_vif *main_vif; int beacon_interval; u16 dtim_period; u16 bmiss_timeout; diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index d2ff0fc04..7334c9b09 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -280,7 +280,7 @@ EXPORT_SYMBOL(ath_dynack_sample_ack_ts); void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an) { /* ackto = slottime + sifs + air delay */ - u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64; + u32 ackto = 9 + 16 + 64; struct ath_dynack *da = &ah->dynack; an->ackto = ackto; @@ -315,7 +315,7 @@ EXPORT_SYMBOL(ath_dynack_node_deinit); void ath_dynack_reset(struct ath_hw *ah) { /* ackto = slottime + sifs + air delay */ - u32 ackto = ATH9K_SLOT_TIME_9 + 16 + 64; + u32 ackto = 9 + 16 + 64; struct ath_dynack *da = &ah->dynack; da->lto = jiffies; diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a794157a1..a449588a8 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -15,6 +15,7 @@ */ #include "hw.h" +#include <linux/ath9k_platform.h> void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) { @@ -108,26 +109,42 @@ void ath9k_hw_usb_gen_fill_eeprom(struct ath_hw *ah, u16 *eep_data, } } -static bool ath9k_hw_nvram_read_blob(struct ath_hw *ah, u32 off, - u16 *data) +static bool ath9k_hw_nvram_read_array(u16 *blob, size_t blob_size, + off_t offset, u16 *data) { - u16 *blob_data; - - if (off * sizeof(u16) > ah->eeprom_blob->size) + if (offset > blob_size) return false; - blob_data = (u16 *)ah->eeprom_blob->data; - *data = blob_data[off]; + *data = blob[offset]; return true; } +static bool ath9k_hw_nvram_read_pdata(struct ath9k_platform_data *pdata, + off_t offset, u16 *data) +{ + return ath9k_hw_nvram_read_array(pdata->eeprom_data, + ARRAY_SIZE(pdata->eeprom_data), + offset, data); +} + +static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob, + off_t offset, u16 *data) +{ + return ath9k_hw_nvram_read_array((u16 *) eeprom_blob->data, + eeprom_blob->size / sizeof(u16), + offset, data); +} + bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) { struct ath_common *common = ath9k_hw_common(ah); + struct ath9k_platform_data *pdata = ah->dev->platform_data; bool ret; if (ah->eeprom_blob) - ret = ath9k_hw_nvram_read_blob(ah, off, data); + ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data); + else if (pdata && !pdata->use_eeprom && pdata->eeprom_data) + ret = ath9k_hw_nvram_read_pdata(pdata, off, data); else ret = common->bus_ops->eeprom_read(common, off, data); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index e6bcb4c90..2c0e4d26e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -45,7 +45,7 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv) * Long slot time : 2x cwmin * Short slot time : 4x cwmin */ - if (ah->slottime == ATH9K_SLOT_TIME_20) + if (ah->slottime == 20) qi.tqi_cwmin = 2*qi_be.tqi_cwmin; else qi.tqi_cwmin = 4*qi_be.tqi_cwmin; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index c148c6c50..b65c1b661 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -678,7 +678,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv, for (i = 0; i < ATH9K_HTC_MAX_BCN_VIF; i++) priv->beacon.bslot[i] = NULL; - priv->beacon.slottime = ATH9K_SLOT_TIME_9; + priv->beacon.slottime = 9; ath9k_cmn_init_channels_rates(common); ath9k_cmn_init_crypto(ah); diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 8b2895f9a..14b13f07c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -454,7 +454,7 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) if (AR_SREV_9100(ah)) ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; - ah->slottime = ATH9K_SLOT_TIME_9; + ah->slottime = 9; ah->globaltxtimeout = (u32) -1; ah->power_mode = ATH9K_PM_UNDEFINED; ah->htc_reset_init = true; @@ -471,33 +471,34 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah) ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); } -static int ath9k_hw_init_macaddr(struct ath_hw *ah) +static void ath9k_hw_init_macaddr(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - u32 sum; int i; u16 eeval; static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; - sum = 0; + /* MAC address may already be loaded via ath9k_platform_data */ + if (is_valid_ether_addr(common->macaddr)) + return; + for (i = 0; i < 3; i++) { eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]); - sum += eeval; common->macaddr[2 * i] = eeval >> 8; common->macaddr[2 * i + 1] = eeval & 0xff; } - if (!is_valid_ether_addr(common->macaddr)) { - ath_err(common, - "eeprom contains invalid mac address: %pM\n", - common->macaddr); - random_ether_addr(common->macaddr); - ath_err(common, - "random mac address will be used: %pM\n", - common->macaddr); - } + if (is_valid_ether_addr(common->macaddr)) + return; - return 0; + ath_err(common, "eeprom contains invalid mac address: %pM\n", + common->macaddr); + + random_ether_addr(common->macaddr); + ath_err(common, "random mac address will be used: %pM\n", + common->macaddr); + + return; } static int ath9k_hw_post_init(struct ath_hw *ah) @@ -636,12 +637,7 @@ static int __ath9k_hw_init(struct ath_hw *ah) if (r) return r; - r = ath9k_hw_init_macaddr(ah); - if (r) { - ath_err(common, "Failed to initialize MAC address\n"); - return r; - } - + ath9k_hw_init_macaddr(ah); ath9k_hw_init_hang_checks(ah); common->state = ATH_HW_INITIALIZED; @@ -1832,8 +1828,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, u32 saveLedState; u32 saveDefAntenna; u32 macStaId1; + struct timespec tsf_ts; + u32 tsf_offset; u64 tsf = 0; - s64 usec = 0; int r; bool start_mci_reset = false; bool save_fullsleep = ah->chip_fullsleep; @@ -1877,8 +1874,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; /* Save TSF before chip reset, a cold reset clears it */ + getrawmonotonic(&tsf_ts); tsf = ath9k_hw_gettsf64(ah); - usec = ktime_to_us(ktime_get_raw()); saveLedState = REG_READ(ah, AR_CFG_LED) & (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | @@ -1911,8 +1908,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, } /* Restore TSF */ - usec = ktime_to_us(ktime_get_raw()) - usec; - ath9k_hw_settsf64(ah, tsf + usec); + tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL); + ath9k_hw_settsf64(ah, tsf + tsf_offset); if (AR_SREV_9280_20_OR_LATER(ah)) REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); @@ -1932,12 +1929,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, /* * Some AR91xx SoC devices frequently fail to accept TSF writes * right after the chip reset. When that happens, write a new - * value after the initvals have been applied, with an offset - * based on measured time difference + * value after the initvals have been applied. */ if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) { - tsf += 1500; - ath9k_hw_settsf64(ah, tsf); + tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL); + ath9k_hw_settsf64(ah, tsf + tsf_offset); } ath9k_hw_init_mfp(ah); @@ -2486,6 +2482,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) return -EINVAL; } + ath9k_gpio_cap_init(ah); + if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah) || @@ -2535,8 +2533,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) else pCap->hw_caps &= ~ATH9K_HW_CAP_HT; - ath9k_gpio_cap_init(ah); - if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; else diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 9cbca1229..2a5d3ad11 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -830,6 +830,7 @@ struct ath_hw { /* Calibration */ u32 supp_cals; struct ath9k_cal_list iq_caldata; + struct ath9k_cal_list temp_caldata; struct ath9k_cal_list adcgain_caldata; struct ath9k_cal_list adcdc_caldata; struct ath9k_cal_list *cal_list; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 2ee862475..cfa3fe82a 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -372,7 +372,7 @@ static void ath9k_init_misc(struct ath_softc *sc) common->last_rssi = ATH_RSSI_DUMMY_MARKER; memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN); - sc->beacon.slottime = ATH9K_SLOT_TIME_9; + sc->beacon.slottime = 9; for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) sc->beacon.bslot[i] = NULL; @@ -512,31 +512,52 @@ static void ath9k_eeprom_release(struct ath_softc *sc) release_firmware(sc->sc_ah->eeprom_blob); } -static int ath9k_init_soc_platform(struct ath_softc *sc) +static int ath9k_init_platform(struct ath_softc *sc) { struct ath9k_platform_data *pdata = sc->dev->platform_data; struct ath_hw *ah = sc->sc_ah; - int ret = 0; + struct ath_common *common = ath9k_hw_common(ah); + int ret; if (!pdata) return 0; + if (!pdata->use_eeprom) { + ah->ah_flags &= ~AH_USE_EEPROM; + ah->gpio_mask = pdata->gpio_mask; + ah->gpio_val = pdata->gpio_val; + ah->led_pin = pdata->led_pin; + ah->is_clk_25mhz = pdata->is_clk_25mhz; + ah->get_mac_revision = pdata->get_mac_revision; + ah->external_reset = pdata->external_reset; + ah->disable_2ghz = pdata->disable_2ghz; + ah->disable_5ghz = pdata->disable_5ghz; + + if (!pdata->endian_check) + ah->ah_flags |= AH_NO_EEP_SWAP; + } + if (pdata->eeprom_name) { ret = ath9k_eeprom_request(sc, pdata->eeprom_name); if (ret) return ret; } + if (pdata->led_active_high) + ah->config.led_active_high = true; + if (pdata->tx_gain_buffalo) ah->config.tx_gain_buffalo = true; - return ret; + if (pdata->macaddr) + ether_addr_copy(common->macaddr, pdata->macaddr); + + return 0; } static int ath9k_init_softc(u16 devid, struct ath_softc *sc, const struct ath_bus_ops *bus_ops) { - struct ath9k_platform_data *pdata = sc->dev->platform_data; struct ath_hw *ah = NULL; struct ath9k_hw_capabilities *pCap; struct ath_common *common; @@ -550,6 +571,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, ah->dev = sc->dev; ah->hw = sc->hw; ah->hw_version.devid = devid; + ah->ah_flags |= AH_USE_EEPROM; + ah->led_pin = -1; ah->reg_ops.read = ath9k_ioread32; ah->reg_ops.multi_read = ath9k_multi_ioread32; ah->reg_ops.write = ath9k_iowrite32; @@ -569,22 +592,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (!ath9k_is_chanctx_enabled()) sc->cur_chan->hw_queue_base = 0; - if (!pdata || pdata->use_eeprom) { - ah->ah_flags |= AH_USE_EEPROM; - sc->sc_ah->led_pin = -1; - } else { - sc->sc_ah->gpio_mask = pdata->gpio_mask; - sc->sc_ah->gpio_val = pdata->gpio_val; - sc->sc_ah->led_pin = pdata->led_pin; - ah->is_clk_25mhz = pdata->is_clk_25mhz; - ah->get_mac_revision = pdata->get_mac_revision; - ah->external_reset = pdata->external_reset; - ah->disable_2ghz = pdata->disable_2ghz; - ah->disable_5ghz = pdata->disable_5ghz; - if (!pdata->endian_check) - ah->ah_flags |= AH_NO_EEP_SWAP; - } - common->ops = &ah->reg_ops; common->bus_ops = bus_ops; common->ps_ops = &ath9k_ps_ops; @@ -600,7 +607,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, */ ath9k_init_pcoem_platform(sc); - ret = ath9k_init_soc_platform(sc); + ret = ath9k_init_platform(sc); if (ret) return ret; @@ -646,9 +653,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, if (ret) goto err_hw; - if (pdata && pdata->macaddr) - memcpy(common->macaddr, pdata->macaddr, ETH_ALEN); - ret = ath9k_init_queues(sc); if (ret) goto err_queues; @@ -839,7 +843,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_P2P_GO_CTWIN; - if (!config_enabled(CONFIG_ATH9K_TX99)) { + if (!IS_ENABLED(CONFIG_ATH9K_TX99)) { hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT) | diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index 7fbf7f965..3bab01435 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -65,10 +65,6 @@ #define INIT_SSH_RETRY 32 #define INIT_SLG_RETRY 32 -#define ATH9K_SLOT_TIME_6 6 -#define ATH9K_SLOT_TIME_9 9 -#define ATH9K_SLOT_TIME_20 20 - #define ATH9K_TXERR_XRETRY 0x01 #define ATH9K_TXERR_FILT 0x02 #define ATH9K_TXERR_FIFO 0x04 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 4b59a4c15..7cb65c303 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -915,6 +915,22 @@ static bool ath9k_uses_beacons(int type) } } +static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data, + struct ieee80211_vif *vif) +{ + /* Use the first (configured) interface, but prefering AP interfaces. */ + if (!iter_data->primary_beacon_vif) { + iter_data->primary_beacon_vif = vif; + } else { + if (iter_data->primary_beacon_vif->type != NL80211_IFTYPE_AP && + vif->type == NL80211_IFTYPE_AP) + iter_data->primary_beacon_vif = vif; + } + + iter_data->beacons = true; + iter_data->nbcnvifs += 1; +} + static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, u8 *mac, struct ieee80211_vif *vif) { @@ -931,11 +947,13 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, } if (!vif->bss_conf.use_short_slot) - iter_data->slottime = ATH9K_SLOT_TIME_20; + iter_data->slottime = 20; switch (vif->type) { case NL80211_IFTYPE_AP: iter_data->naps++; + if (vif->bss_conf.enable_beacon) + ath9k_vif_iter_set_beacon(iter_data, vif); break; case NL80211_IFTYPE_STATION: iter_data->nstations++; @@ -948,12 +966,12 @@ static void ath9k_vif_iter(struct ath9k_vif_iter_data *iter_data, case NL80211_IFTYPE_ADHOC: iter_data->nadhocs++; if (vif->bss_conf.enable_beacon) - iter_data->beacons = true; + ath9k_vif_iter_set_beacon(iter_data, vif); break; case NL80211_IFTYPE_MESH_POINT: iter_data->nmeshes++; if (vif->bss_conf.enable_beacon) - iter_data->beacons = true; + ath9k_vif_iter_set_beacon(iter_data, vif); break; case NL80211_IFTYPE_WDS: iter_data->nwds++; @@ -1004,7 +1022,7 @@ void ath9k_calculate_iter_data(struct ath_softc *sc, */ memset(iter_data, 0, sizeof(*iter_data)); eth_broadcast_addr(iter_data->mask); - iter_data->slottime = ATH9K_SLOT_TIME_9; + iter_data->slottime = 9; list_for_each_entry(avp, &ctx->vifs, list) ath9k_vif_iter(iter_data, avp->vif->addr, avp->vif); @@ -1066,7 +1084,7 @@ static void ath9k_set_offchannel_state(struct ath_softc *sc) ah->opmode = vif->type; ah->imask &= ~ATH9K_INT_SWBA; ah->imask &= ~ATH9K_INT_TSFOOR; - ah->slottime = ATH9K_SLOT_TIME_9; + ah->slottime = 9; ath_hw_setbssidmask(common); ath9k_hw_setopmode(ah); @@ -1086,7 +1104,6 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ath9k_vif_iter_data iter_data; - struct ath_beacon_config *cur_conf; ath_chanctx_check_active(sc, ctx); @@ -1108,13 +1125,12 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ath_hw_setbssidmask(common); if (iter_data.naps > 0) { - cur_conf = &ctx->beacon; ath9k_hw_set_tsfadjust(ah, true); ah->opmode = NL80211_IFTYPE_AP; - if (cur_conf->enable_beacon) - iter_data.beacons = true; } else { ath9k_hw_set_tsfadjust(ah, false); + if (iter_data.beacons) + ath9k_beacon_ensure_primary_slot(sc); if (iter_data.nmeshes) ah->opmode = NL80211_IFTYPE_MESH_POINT; @@ -1139,11 +1155,11 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, ctx->switch_after_beacon = true; } - ah->imask &= ~ATH9K_INT_SWBA; if (ah->opmode == NL80211_IFTYPE_STATION) { bool changed = (iter_data.primary_sta != ctx->primary_sta); if (iter_data.primary_sta) { + iter_data.primary_beacon_vif = iter_data.primary_sta; iter_data.beacons = true; ath9k_set_assoc_state(sc, iter_data.primary_sta, changed); @@ -1156,16 +1172,12 @@ void ath9k_calculate_summary_state(struct ath_softc *sc, if (ath9k_hw_mci_is_enabled(sc->sc_ah)) ath9k_mci_update_wlan_channels(sc, true); } - } else if (iter_data.beacons) { - ah->imask |= ATH9K_INT_SWBA; } + sc->nbcnvifs = iter_data.nbcnvifs; + ath9k_beacon_config(sc, iter_data.primary_beacon_vif, + iter_data.beacons); ath9k_hw_set_interrupts(ah); - if (iter_data.beacons) - set_bit(ATH_OP_BEACONS, &common->op_flags); - else - clear_bit(ATH_OP_BEACONS, &common->op_flags); - if (ah->slottime != iter_data.slottime) { ah->slottime = iter_data.slottime; ath9k_hw_init_global_settings(ah); @@ -1244,7 +1256,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); - if (config_enabled(CONFIG_ATH9K_TX99)) { + if (IS_ENABLED(CONFIG_ATH9K_TX99)) { if (sc->cur_chan->nvifs >= 1) { mutex_unlock(&sc->mutex); return -EOPNOTSUPP; @@ -1294,7 +1306,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); - if (config_enabled(CONFIG_ATH9K_TX99)) { + if (IS_ENABLED(CONFIG_ATH9K_TX99)) { mutex_unlock(&sc->mutex); return -EOPNOTSUPP; } @@ -1354,7 +1366,7 @@ static void ath9k_enable_ps(struct ath_softc *sc) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return; sc->ps_enabled = true; @@ -1373,7 +1385,7 @@ static void ath9k_disable_ps(struct ath_softc *sc) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return; sc->ps_enabled = false; @@ -1782,9 +1794,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, if ((changed & BSS_CHANGED_BEACON_ENABLED) || (changed & BSS_CHANGED_BEACON_INT) || (changed & BSS_CHANGED_BEACON_INFO)) { - ath9k_beacon_config(sc, vif, changed); - if (changed & BSS_CHANGED_BEACON_ENABLED) - ath9k_calculate_summary_state(sc, avp->chanctx); + ath9k_calculate_summary_state(sc, avp->chanctx); } if ((avp->chanctx == sc->cur_chan) && @@ -1793,6 +1803,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, slottime = 9; else slottime = 20; + if (vif->type == NL80211_IFTYPE_AP) { /* * Defer update, so that connected stations can adjust @@ -1828,11 +1839,19 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw, static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; + struct ath_vif *avp = (void *)vif->drv_priv; u64 tsf; mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); - tsf = ath9k_hw_gettsf64(sc->sc_ah); + /* Get current TSF either from HW or kernel time. */ + if (sc->cur_chan == avp->chanctx) { + tsf = ath9k_hw_gettsf64(sc->sc_ah); + } else { + tsf = sc->cur_chan->tsf_val + + ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL); + } + tsf += le64_to_cpu(avp->tsf_adjust); ath9k_ps_restore(sc); mutex_unlock(&sc->mutex); @@ -1844,10 +1863,15 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf) { struct ath_softc *sc = hw->priv; + struct ath_vif *avp = (void *)vif->drv_priv; mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); - ath9k_hw_settsf64(sc->sc_ah, tsf); + tsf -= le64_to_cpu(avp->tsf_adjust); + getrawmonotonic(&avp->chanctx->tsf_ts); + if (sc->cur_chan == avp->chanctx) + ath9k_hw_settsf64(sc->sc_ah, tsf); + avp->chanctx->tsf_val = tsf; ath9k_ps_restore(sc); mutex_unlock(&sc->mutex); } @@ -1855,11 +1879,15 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct ath_softc *sc = hw->priv; + struct ath_vif *avp = (void *)vif->drv_priv; mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); - ath9k_hw_reset_tsf(sc->sc_ah); + getrawmonotonic(&avp->chanctx->tsf_ts); + if (sc->cur_chan == avp->chanctx) + ath9k_hw_reset_tsf(sc->sc_ah); + avp->chanctx->tsf_val = 0; ath9k_ps_restore(sc); mutex_unlock(&sc->mutex); @@ -1931,7 +1959,7 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx, struct ieee80211_channel *chan; int pos; - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return -EOPNOTSUPP; spin_lock_bh(&common->cc_lock); @@ -1981,7 +2009,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return; mutex_lock(&sc->mutex); diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7cdaf40c3..0dd454acf 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -19,7 +19,6 @@ #include <linux/nl80211.h> #include <linux/pci.h> #include <linux/pci-aspm.h> -#include <linux/ath9k_platform.h> #include <linux/module.h> #include "ath9k.h" @@ -786,35 +785,21 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz) static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data) { - struct ath_softc *sc = (struct ath_softc *) common->priv; - struct ath9k_platform_data *pdata = sc->dev->platform_data; - - if (pdata && !pdata->use_eeprom) { - if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { - ath_err(common, - "%s: eeprom read failed, offset %08x is out of range\n", - __func__, off); - } - - *data = pdata->eeprom_data[off]; - } else { - struct ath_hw *ah = (struct ath_hw *) common->ah; - - common->ops->read(ah, AR5416_EEPROM_OFFSET + - (off << AR5416_EEPROM_S)); - - if (!ath9k_hw_wait(ah, - AR_EEPROM_STATUS_DATA, - AR_EEPROM_STATUS_DATA_BUSY | - AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, - AH_WAIT_TIMEOUT)) { - return false; - } - - *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA), - AR_EEPROM_STATUS_DATA_VAL); + struct ath_hw *ah = (struct ath_hw *) common->ah; + + common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); + + if (!ath9k_hw_wait(ah, + AR_EEPROM_STATUS_DATA, + AR_EEPROM_STATUS_DATA_BUSY | + AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, + AH_WAIT_TIMEOUT)) { + return false; } + *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA), + AR_EEPROM_STATUS_DATA_VAL); + return true; } diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 32160fca8..669734252 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -377,7 +377,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) struct ath_common *common = ath9k_hw_common(sc->sc_ah); u32 rfilt; - if (config_enabled(CONFIG_ATH9K_TX99)) + if (IS_ENABLED(CONFIG_ATH9K_TX99)) return 0; rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c index ac4781f37..16aca9e28 100644 --- a/drivers/net/wireless/ath/ath9k/tx99.c +++ b/drivers/net/wireless/ath/ath9k/tx99.c @@ -132,7 +132,6 @@ static int ath9k_tx99_init(struct ath_softc *sc) ath9k_ps_wakeup(sc); ath9k_hw_disable_interrupts(ah); - atomic_set(&ah->intr_ref_cnt, -1); ath_drain_all_txq(sc); ath_stoprecv(sc); @@ -266,7 +265,7 @@ static const struct file_operations fops_tx99_power = { void ath9k_tx99_init_debug(struct ath_softc *sc) { - if (!AR_SREV_9300_20_OR_LATER(sc->sc_ah)) + if (!AR_SREV_9280_20_OR_LATER(sc->sc_ah)) return; debugfs_create_file("tx99", S_IRUSR | S_IWUSR, diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig index 1a796e5f6..2e34baeaf 100644 --- a/drivers/net/wireless/ath/carl9170/Kconfig +++ b/drivers/net/wireless/ath/carl9170/Kconfig @@ -5,12 +5,10 @@ config CARL9170 select FW_LOADER select CRC32 help - This is another driver for the Atheros "otus" 802.11n USB devices. + This is the mainline driver for the Atheros "otus" 802.11n USB devices. - This driver provides more features than the original, - but it needs a special firmware (carl9170-1.fw) to do that. - - The firmware can be downloaded from our wiki here: + It needs a special firmware (carl9170-1.fw), which can be downloaded + from our wiki here: <http://wireless.kernel.org/en/users/Drivers/carl9170> If you choose to build a module, it'll be called carl9170. diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c index 2303ef962..2f8136d50 100644 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c @@ -352,7 +352,7 @@ dfs_pattern_detector_init(struct ath_common *common, { struct dfs_pattern_detector *dpd; - if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS)) + if (!IS_ENABLED(CONFIG_CFG80211_CERTIFICATION_ONUS)) return NULL; dpd = kmalloc(sizeof(*dpd), GFP_KERNEL); diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 7e15ed9ed..f85060377 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -116,7 +116,7 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A_6C = { static bool dynamic_country_user_possible(struct ath_regulatory *reg) { - if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING)) + if (IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING)) return true; switch (reg->country_code) { @@ -188,7 +188,7 @@ static bool dynamic_country_user_possible(struct ath_regulatory *reg) static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg) { - if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) + if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) return false; if (!dynamic_country_user_possible(reg)) return false; diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 8643801f3..231fd022f 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -35,26 +35,27 @@ void *wcn36xx_dxe_get_next_bd(struct wcn36xx *wcn, bool is_low) return ch->head_blk_ctl->bd_cpu_addr; } +static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data) +{ + wcn36xx_dbg(WCN36XX_DBG_DXE, + "wcn36xx_ccu_write_register: addr=%x, data=%x\n", + addr, data); + + writel(data, wcn->ccu_base + addr); +} + static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data) { wcn36xx_dbg(WCN36XX_DBG_DXE, "wcn36xx_dxe_write_register: addr=%x, data=%x\n", addr, data); - writel(data, wcn->mmio + addr); + writel(data, wcn->dxe_base + addr); } -#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \ -do { \ - if (wcn->chip_version == WCN36XX_CHIP_3680) \ - wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \ - else \ - wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \ -} while (0) \ - static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data) { - *data = readl(wcn->mmio + addr); + *data = readl(wcn->dxe_base + addr); wcn36xx_dbg(WCN36XX_DBG_DXE, "wcn36xx_dxe_read_register: addr=%x, data=%x\n", @@ -701,9 +702,13 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn) reg_data = WCN36XX_DXE_REG_RESET; wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data); - /* Setting interrupt path */ - reg_data = WCN36XX_DXE_CCU_INT; - wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data); + /* Select channels for rx avail and xfer done interrupts... */ + reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 | + WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK; + if (wcn->is_pronto) + wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data); + else + wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data); /***************************************/ /* Init descriptors for TX LOW channel */ diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h index 3eca4f959..c012e8077 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.h +++ b/drivers/net/wireless/ath/wcn36xx/dxe.h @@ -28,11 +28,10 @@ H2H_TEST_RX_TX = DMA2 */ /* DXE registers */ -#define WCN36XX_DXE_MEM_REG 0x202000 +#define WCN36XX_DXE_MEM_REG 0 -#define WCN36XX_DXE_CCU_INT 0xA0011 -#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10 -#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc +#define WCN36XX_CCU_DXE_INT_SELECT_RIVA 0x310 +#define WCN36XX_CCU_DXE_INT_SELECT_PRONTO 0x10dc /* TODO This must calculated properly but not hardcoded */ #define WCN36XX_DXE_CTRL_TX_L 0x328a44 diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h index 658bfb8ba..4f87ef1e1 100644 --- a/drivers/net/wireless/ath/wcn36xx/hal.h +++ b/drivers/net/wireless/ath/wcn36xx/hal.h @@ -4123,7 +4123,7 @@ struct wcn36xx_hal_update_scan_params_req { /* Update scan params - sent from host to PNO to be used during PNO * scanningx */ -struct update_scan_params_req_ex { +struct wcn36xx_hal_update_scan_params_req_ex { struct wcn36xx_hal_msg_header header; @@ -4151,7 +4151,7 @@ struct update_scan_params_req_ex { /* Cb State */ enum phy_chan_bond_state state; -}; +} __packed; /* Update scan params - sent from host to PNO to be used during PNO * scanningx */ diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 8e2c5ff7a..22e584246 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -19,6 +19,8 @@ #include <linux/module.h> #include <linux/firmware.h> #include <linux/platform_device.h> +#include <linux/of_address.h> +#include <linux/of_device.h> #include "wcn36xx.h" unsigned int wcn36xx_dbg_mask; @@ -259,17 +261,6 @@ static void wcn36xx_feat_caps_info(struct wcn36xx *wcn) } } -static void wcn36xx_detect_chip_version(struct wcn36xx *wcn) -{ - if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) { - wcn36xx_info("Chip is 3680\n"); - wcn->chip_version = WCN36XX_CHIP_3680; - } else { - wcn36xx_info("Chip is 3660\n"); - wcn->chip_version = WCN36XX_CHIP_3660; - } -} - static int wcn36xx_start(struct ieee80211_hw *hw) { struct wcn36xx *wcn = hw->priv; @@ -324,9 +315,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw) wcn36xx_feat_caps_info(wcn); } - wcn36xx_detect_chip_version(wcn); - wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1); - /* DMA channel initialization */ ret = wcn36xx_dxe_init(wcn); if (ret) { @@ -1064,7 +1052,11 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn) static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, struct platform_device *pdev) { + struct device_node *mmio_node; struct resource *res; + int index; + int ret; + /* Set TX IRQ */ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "wcnss_wlantx_irq"); @@ -1083,19 +1075,40 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, } wcn->rx_irq = res->start; - /* Map the memory */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "wcnss_mmio"); - if (!res) { - wcn36xx_err("failed to get mmio\n"); - return -ENOENT; + mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0); + if (!mmio_node) { + wcn36xx_err("failed to acquire qcom,mmio reference\n"); + return -EINVAL; } - wcn->mmio = ioremap(res->start, resource_size(res)); - if (!wcn->mmio) { - wcn36xx_err("failed to map io memory\n"); - return -ENOMEM; + + wcn->is_pronto = !!of_device_is_compatible(mmio_node, "qcom,pronto"); + + /* Map the CCU memory */ + index = of_property_match_string(mmio_node, "reg-names", "ccu"); + wcn->ccu_base = of_iomap(mmio_node, index); + if (!wcn->ccu_base) { + wcn36xx_err("failed to map ccu memory\n"); + ret = -ENOMEM; + goto put_mmio_node; } + + /* Map the DXE memory */ + index = of_property_match_string(mmio_node, "reg-names", "dxe"); + wcn->dxe_base = of_iomap(mmio_node, index); + if (!wcn->dxe_base) { + wcn36xx_err("failed to map dxe memory\n"); + ret = -ENOMEM; + goto unmap_ccu; + } + + of_node_put(mmio_node); return 0; + +unmap_ccu: + iounmap(wcn->ccu_base); +put_mmio_node: + of_node_put(mmio_node); + return ret; } static int wcn36xx_probe(struct platform_device *pdev) @@ -1138,7 +1151,8 @@ static int wcn36xx_probe(struct platform_device *pdev) return 0; out_unmap: - iounmap(wcn->mmio); + iounmap(wcn->ccu_base); + iounmap(wcn->dxe_base); out_wq: ieee80211_free_hw(hw); out_err: @@ -1154,7 +1168,8 @@ static int wcn36xx_remove(struct platform_device *pdev) mutex_destroy(&wcn->hal_mutex); ieee80211_unregister_hw(hw); - iounmap(wcn->mmio); + iounmap(wcn->dxe_base); + iounmap(wcn->ccu_base); ieee80211_free_hw(hw); return 0; diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c index 06c5ddcd3..f93299107 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.c +++ b/drivers/net/wireless/ath/wcn36xx/smd.c @@ -674,22 +674,25 @@ static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len) return 0; } -int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn) +int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, + u8 *channels, size_t channel_count) { - struct wcn36xx_hal_update_scan_params_req msg_body; + struct wcn36xx_hal_update_scan_params_req_ex msg_body; int ret = 0; mutex_lock(&wcn->hal_mutex); INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ); - msg_body.dot11d_enabled = 0; - msg_body.dot11d_resolved = 0; - msg_body.channel_count = 26; + msg_body.dot11d_enabled = false; + msg_body.dot11d_resolved = true; + + msg_body.channel_count = channel_count; + memcpy(msg_body.channels, channels, channel_count); msg_body.active_min_ch_time = 60; msg_body.active_max_ch_time = 120; msg_body.passive_min_ch_time = 60; msg_body.passive_max_ch_time = 110; - msg_body.state = 0; + msg_body.state = PHY_SINGLE_CHANNEL_CENTERED; PREPARE_HAL_BUF(wcn->hal_buf, msg_body); @@ -2226,17 +2229,12 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len) case WCN36XX_HAL_COEX_IND: case WCN36XX_HAL_AVOID_FREQ_RANGE_IND: + case WCN36XX_HAL_DEL_BA_IND: case WCN36XX_HAL_OTA_TX_COMPL_IND: case WCN36XX_HAL_MISSED_BEACON_IND: case WCN36XX_HAL_DELETE_STA_CONTEXT_IND: - msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL); - if (!msg_ind) - goto nomem; - msg_ind->msg_len = len; - msg_ind->msg = kmemdup(buf, len, GFP_KERNEL); - if (!msg_ind->msg) { - kfree(msg_ind); -nomem: + msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL); + if (!msg_ind) { /* * FIXME: Do something smarter then just * printing an error. @@ -2245,10 +2243,14 @@ nomem: msg_header->msg_type); break; } - mutex_lock(&wcn->hal_ind_mutex); + + msg_ind->msg_len = len; + memcpy(msg_ind->msg, buf, len); + + spin_lock(&wcn->hal_ind_lock); list_add_tail(&msg_ind->list, &wcn->hal_ind_queue); queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work); - mutex_unlock(&wcn->hal_ind_mutex); + spin_unlock(&wcn->hal_ind_lock); wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n"); break; default: @@ -2262,8 +2264,9 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) container_of(work, struct wcn36xx, hal_ind_work); struct wcn36xx_hal_msg_header *msg_header; struct wcn36xx_hal_ind_msg *hal_ind_msg; + unsigned long flags; - mutex_lock(&wcn->hal_ind_mutex); + spin_lock_irqsave(&wcn->hal_ind_lock, flags); hal_ind_msg = list_first_entry(&wcn->hal_ind_queue, struct wcn36xx_hal_ind_msg, @@ -2273,6 +2276,7 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) switch (msg_header->msg_type) { case WCN36XX_HAL_COEX_IND: + case WCN36XX_HAL_DEL_BA_IND: case WCN36XX_HAL_AVOID_FREQ_RANGE_IND: break; case WCN36XX_HAL_OTA_TX_COMPL_IND: @@ -2295,9 +2299,8 @@ static void wcn36xx_ind_smd_work(struct work_struct *work) msg_header->msg_type); } list_del(wcn->hal_ind_queue.next); - kfree(hal_ind_msg->msg); + spin_unlock_irqrestore(&wcn->hal_ind_lock, flags); kfree(hal_ind_msg); - mutex_unlock(&wcn->hal_ind_mutex); } int wcn36xx_smd_open(struct wcn36xx *wcn) { @@ -2310,7 +2313,7 @@ int wcn36xx_smd_open(struct wcn36xx *wcn) } INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work); INIT_LIST_HEAD(&wcn->hal_ind_queue); - mutex_init(&wcn->hal_ind_mutex); + spin_lock_init(&wcn->hal_ind_lock); ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process); if (ret) { @@ -2330,5 +2333,4 @@ void wcn36xx_smd_close(struct wcn36xx *wcn) { wcn->ctrl_ops->close(); destroy_workqueue(wcn->hal_ind_wq); - mutex_destroy(&wcn->hal_ind_mutex); } diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h index d93e3fd73..df80cbbd9 100644 --- a/drivers/net/wireless/ath/wcn36xx/smd.h +++ b/drivers/net/wireless/ath/wcn36xx/smd.h @@ -46,8 +46,8 @@ struct wcn36xx_fw_msg_status_rsp { struct wcn36xx_hal_ind_msg { struct list_head list; - u8 *msg; size_t msg_len; + u8 msg[]; }; struct wcn36xx; @@ -63,7 +63,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn); int wcn36xx_smd_end_scan(struct wcn36xx *wcn); int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode); -int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn); +int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count); int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif); int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr); int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index 051fb3338..a9a721b6b 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -193,7 +193,7 @@ struct wcn36xx { u8 fw_minor; u8 fw_major; u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE]; - u32 chip_version; + bool is_pronto; /* extra byte for the NULL termination */ u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1]; @@ -202,7 +202,8 @@ struct wcn36xx { /* IRQs */ int tx_irq; int rx_irq; - void __iomem *mmio; + void __iomem *ccu_base; + void __iomem *dxe_base; struct wcn36xx_platform_ctrl_ops *ctrl_ops; /* @@ -215,7 +216,7 @@ struct wcn36xx { struct completion hal_rsp_compl; struct workqueue_struct *hal_ind_wq; struct work_struct hal_ind_work; - struct mutex hal_ind_mutex; + spinlock_t hal_ind_lock; struct list_head hal_ind_queue; /* DXE channels */ @@ -241,9 +242,6 @@ struct wcn36xx { }; -#define WCN36XX_CHIP_3660 0 -#define WCN36XX_CHIP_3680 1 - static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn, u8 major, u8 minor, diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 576981129..f0e1175fb 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -378,6 +378,10 @@ static int wil_cfg80211_scan(struct wiphy *wiphy, /* social scan on P2P_DEVICE is handled as p2p search */ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE && wil_p2p_is_social_scan(request)) { + if (!wil->p2p.p2p_dev_started) { + wil_err(wil, "P2P search requested on stopped P2P device\n"); + return -EIO; + } wil->scan_request = request; wil->radio_wdev = wdev; rc = wil_p2p_search(wil, request); @@ -1351,6 +1355,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy, struct wil6210_priv *wil = wiphy_to_wil(wiphy); wil_dbg_misc(wil, "%s: entered\n", __func__); + wil->p2p.p2p_dev_started = 1; return 0; } @@ -1358,8 +1363,23 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy, struct wireless_dev *wdev) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + u8 started; wil_dbg_misc(wil, "%s: entered\n", __func__); + mutex_lock(&wil->mutex); + started = wil_p2p_stop_discovery(wil); + if (started && wil->scan_request) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(wil->scan_request, &info); + wil->scan_request = NULL; + wil->radio_wdev = wil->wdev; + } + mutex_unlock(&wil->mutex); + + wil->p2p.p2p_dev_started = 0; } static struct cfg80211_ops wil_cfg80211_ops = { diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c index c312a667c..217a4591b 100644 --- a/drivers/net/wireless/ath/wil6210/debug.c +++ b/drivers/net/wireless/ath/wil6210/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Qualcomm Atheros, Inc. + * Copyright (c) 2013,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -19,34 +19,31 @@ void __wil_err(struct wil6210_priv *wil, const char *fmt, ...) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - netdev_err(ndev, "%pV", &vaf); + netdev_err(wil_to_ndev(wil), "%pV", &vaf); trace_wil6210_log_err(&vaf); va_end(args); } void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...) { - if (net_ratelimit()) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; - va_list args; + struct va_format vaf; + va_list args; - va_start(args, fmt); - vaf.va = &args; - netdev_err(ndev, "%pV", &vaf); - trace_wil6210_log_err(&vaf); - va_end(args); - } + if (!net_ratelimit()) + return; + + va_start(args, fmt); + vaf.fmt = fmt; + vaf.va = &args; + netdev_err(wil_to_ndev(wil), "%pV", &vaf); + trace_wil6210_log_err(&vaf); + va_end(args); } void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) @@ -67,27 +64,24 @@ void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...) void __wil_info(struct wil6210_priv *wil, const char *fmt, ...) { - struct net_device *ndev = wil_to_ndev(wil); - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; - netdev_info(ndev, "%pV", &vaf); + netdev_info(wil_to_ndev(wil), "%pV", &vaf); trace_wil6210_log_info(&vaf); va_end(args); } void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...) { - struct va_format vaf = { - .fmt = fmt, - }; + struct va_format vaf; va_list args; va_start(args, fmt); + vaf.fmt = fmt; vaf.va = &args; trace_wil6210_log_dbg(&vaf); va_end(args); diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 8e31d755b..4bc92e549 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -850,10 +850,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) mutex_unlock(&wil->wmi_mutex); if (wil->scan_request) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request); del_timer_sync(&wil->scan_timer); - cfg80211_scan_done(wil->scan_request, true); + cfg80211_scan_done(wil->scan_request, &info); wil->scan_request = NULL; } @@ -1049,10 +1053,14 @@ int __wil_down(struct wil6210_priv *wil) (void)wil_p2p_stop_discovery(wil); if (wil->scan_request) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + wil_dbg_misc(wil, "Abort scan_request 0x%p\n", wil->scan_request); del_timer_sync(&wil->scan_timer); - cfg80211_scan_done(wil->scan_request, true); + cfg80211_scan_done(wil->scan_request, &info); wil->scan_request = NULL; } diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c index 1c9153894..e0f8aa0eb 100644 --- a/drivers/net/wireless/ath/wil6210/p2p.c +++ b/drivers/net/wireless/ath/wil6210/p2p.c @@ -114,8 +114,10 @@ int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration, u8 channel = P2P_DMG_SOCIAL_CHANNEL; int rc; - if (chan) - channel = chan->hw_value; + if (!chan) + return -EINVAL; + + channel = chan->hw_value; wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration); @@ -250,8 +252,12 @@ void wil_p2p_search_expired(struct work_struct *work) mutex_unlock(&wil->mutex); if (started) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_scan_done(wil->scan_request, 0); + cfg80211_scan_done(wil->scan_request, &info); wil->scan_request = NULL; wil->radio_wdev = wil->wdev; mutex_unlock(&wil->p2p_wdev_mutex); diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index aeb72c438..7b5c4222b 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,13 +18,20 @@ #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/interrupt.h> - +#include <linux/suspend.h> #include "wil6210.h" static bool use_msi = true; module_param(use_msi, bool, S_IRUGO); MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true"); +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int wil6210_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + static void wil_set_capabilities(struct wil6210_priv *wil) { @@ -238,6 +245,18 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto bus_disable; } +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + wil->pm_notify.notifier_call = wil6210_pm_notify; + rc = register_pm_notifier(&wil->pm_notify); + if (rc) + /* Do not fail the driver initialization, as suspend can + * be prevented in a later phase if needed + */ + wil_err(wil, "register_pm_notifier failed: %d\n", rc); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + wil6210_debugfs_init(wil); @@ -267,6 +286,12 @@ static void wil_pcie_remove(struct pci_dev *pdev) wil_dbg_misc(wil, "%s()\n", __func__); +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&wil->pm_notify); +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ + wil6210_debugfs_remove(wil); wil_if_remove(wil); wil_if_pcie_disable(wil); @@ -335,6 +360,45 @@ static int wil6210_resume(struct device *dev, bool is_runtime) return rc; } +static int wil6210_pm_notify(struct notifier_block *notify_block, + unsigned long mode, void *unused) +{ + struct wil6210_priv *wil = container_of( + notify_block, struct wil6210_priv, pm_notify); + int rc = 0; + enum wil_platform_event evt; + + wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode); + + switch (mode) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + case PM_RESTORE_PREPARE: + rc = wil_can_suspend(wil, false); + if (rc) + break; + evt = WIL_PLATFORM_EVT_PRE_SUSPEND; + if (wil->platform_ops.notify) + rc = wil->platform_ops.notify(wil->platform_handle, + evt); + break; + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + evt = WIL_PLATFORM_EVT_POST_SUSPEND; + if (wil->platform_ops.notify) + rc = wil->platform_ops.notify(wil->platform_handle, + evt); + break; + default: + wil_dbg_pm(wil, "unhandled notify mode %ld\n", mode); + break; + } + + wil_dbg_pm(wil, "notification mode %ld: rc (%d)\n", mode, rc); + return rc; +} + static int wil6210_pm_suspend(struct device *dev) { return wil6210_suspend(dev, false); diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 0b7ecbcac..11ee24d50 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014 Qualcomm Atheros, Inc. + * Copyright (c) 2014,2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -24,10 +24,32 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "%s(%s)\n", __func__, is_runtime ? "runtime" : "system"); + if (!netif_running(wil_to_ndev(wil))) { + /* can always sleep when down */ + wil_dbg_pm(wil, "Interface is down\n"); + goto out; + } + if (test_bit(wil_status_resetting, wil->status)) { + wil_dbg_pm(wil, "Delay suspend when resetting\n"); + rc = -EBUSY; + goto out; + } + if (wil->recovery_state != fw_recovery_idle) { + wil_dbg_pm(wil, "Delay suspend during recovery\n"); + rc = -EBUSY; + goto out; + } + + /* interface is running */ switch (wdev->iftype) { case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: + if (test_bit(wil_status_fwconnecting, wil->status)) { + wil_dbg_pm(wil, "Delay suspend when connecting\n"); + rc = -EBUSY; + goto out; + } break; /* AP-like interface - can't suspend */ default: @@ -36,6 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) break; } +out: wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__, is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index a4e43796a..f2f6a404d 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -184,6 +184,13 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring, &vring->va[vring->swtail].tx; ctx = &vring->ctx[vring->swtail]; + if (!ctx) { + wil_dbg_txrx(wil, + "ctx(%d) was already completed\n", + vring->swtail); + vring->swtail = wil_vring_next_tail(vring); + continue; + } *d = *_d; wil_txdesc_unmap(dev, d, ctx); if (ctx->skb) @@ -544,6 +551,12 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count) break; } } + + /* make sure all writes to descriptors (shared memory) are done before + * committing them to HW + */ + wmb(); + wil_w(wil, v->hwtail, v->swtail); return rc; @@ -969,6 +982,13 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) txdata->dot1x_open = false; txdata->enabled = 0; /* no Tx can be in progress or start anew */ spin_unlock_bh(&txdata->lock); + /* napi_synchronize waits for completion of the current NAPI but will + * not prevent the next NAPI run. + * Add a memory barrier to guarantee that txdata->enabled is zeroed + * before napi_synchronize so that the next scheduled NAPI will not + * handle this vring + */ + wmb(); /* make sure NAPI won't touch this vring */ if (test_bit(wil_status_napi_en, wil->status)) napi_synchronize(&wil->napi_tx); @@ -1551,6 +1571,13 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, vring_index, used, used + descs_used); } + /* Make sure to advance the head only after descriptor update is done. + * This will prevent a race condition where the completion thread + * will see the DU bit set from previous run and will handle the + * skb before it was completed. + */ + wmb(); + /* advance swhead */ wil_vring_advance_head(vring, descs_used); wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead); @@ -1567,7 +1594,7 @@ mem_error: while (descs_used > 0) { struct wil_ctx *ctx; - i = (swhead + descs_used) % vring->size; + i = (swhead + descs_used - 1) % vring->size; d = (struct vring_tx_desc *)&vring->va[i].tx; _desc = &vring->va[i].tx; *d = *_desc; @@ -1691,6 +1718,13 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, vring_index, used, used + nr_frags + 1); } + /* Make sure to advance the head only after descriptor update is done. + * This will prevent a race condition where the completion thread + * will see the DU bit set from previous run and will handle the + * skb before it was completed. + */ + wmb(); + /* advance swhead */ wil_vring_advance_head(vring, nr_frags + 1); wil_dbg_txrx(wil, "Tx[%2d] swhead %d -> %d\n", vring_index, swhead, @@ -1914,6 +1948,12 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) wil_consume_skb(skb, d->dma.error == 0); } memset(ctx, 0, sizeof(*ctx)); + /* Make sure the ctx is zeroed before updating the tail + * to prevent a case where wil_tx_vring will see + * this descriptor as used and handle it before ctx zero + * is completed. + */ + wmb(); /* There is no need to touch HW descriptor: * - ststus bit TX_DMA_STATUS_DU is set by design, * so hardware will not try to process this desc., diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 1abdd45c3..98d302935 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -458,6 +458,7 @@ struct wil_tid_crypto_rx { struct wil_p2p_info { struct ieee80211_channel listen_chan; u8 discovery_started; + u8 p2p_dev_started; u64 cookie; struct timer_list discovery_timer; /* listen/search duration */ struct work_struct discovery_expired_work; /* listen/search expire */ @@ -662,6 +663,11 @@ struct wil6210_priv { /* High Access Latency Policy voting */ struct wil_halp halp; +#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP + struct notifier_block pm_notify; +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ }; #define wil_to_wiphy(i) (i->wdev->wiphy) diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h index 33d4a34b3..f8c41172a 100644 --- a/drivers/net/wireless/ath/wil6210/wil_platform.h +++ b/drivers/net/wireless/ath/wil6210/wil_platform.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2014-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -23,6 +23,8 @@ enum wil_platform_event { WIL_PLATFORM_EVT_FW_CRASH = 0, WIL_PLATFORM_EVT_PRE_RESET = 1, WIL_PLATFORM_EVT_FW_RDY = 2, + WIL_PLATFORM_EVT_PRE_SUSPEND = 3, + WIL_PLATFORM_EVT_POST_SUSPEND = 4, }; /** diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index b80c5d850..4d9254191 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -426,15 +426,17 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id, { if (wil->scan_request) { struct wmi_scan_complete_event *data = d; - bool aborted = (data->status != WMI_SCAN_SUCCESS); + struct cfg80211_scan_info info = { + .aborted = (data->status != WMI_SCAN_SUCCESS), + }; wil_dbg_wmi(wil, "SCAN_COMPLETE(0x%08x)\n", data->status); wil_dbg_misc(wil, "Complete scan_request 0x%p aborted %d\n", - wil->scan_request, aborted); + wil->scan_request, info.aborted); del_timer_sync(&wil->scan_timer); mutex_lock(&wil->p2p_wdev_mutex); - cfg80211_scan_done(wil->scan_request, aborted); + cfg80211_scan_done(wil->scan_request, &info); wil->radio_wdev = wil->wdev; mutex_unlock(&wil->p2p_wdev_mutex); wil->scan_request = NULL; diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 488c08c30..2e3ece345 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -1915,6 +1915,9 @@ static void at76_dwork_hw_scan(struct work_struct *work) { struct at76_priv *priv = container_of(work, struct at76_priv, dwork_hw_scan.work); + struct cfg80211_scan_info info = { + .aborted = false, + }; int ret; if (priv->device_unplugged) @@ -1941,7 +1944,7 @@ static void at76_dwork_hw_scan(struct work_struct *work) mutex_unlock(&priv->mtx); - ieee80211_scan_completed(priv->hw, false); + ieee80211_scan_completed(priv->hw, &info); ieee80211_wake_queues(priv->hw); } diff --git a/drivers/net/wireless/broadcom/b43/Makefile b/drivers/net/wireless/broadcom/b43/Makefile index ddc4df466..27fab958e 100644 --- a/drivers/net/wireless/broadcom/b43/Makefile +++ b/drivers/net/wireless/broadcom/b43/Makefile @@ -1,6 +1,6 @@ b43-y += main.o b43-y += bus.o -b43-$(CONFIG_B43_PHY_G) += phy_a.o phy_g.o tables.o lo.o wa.o +b43-$(CONFIG_B43_PHY_G) += phy_g.o tables.o lo.o wa.o b43-$(CONFIG_B43_PHY_N) += tables_nphy.o b43-$(CONFIG_B43_PHY_N) += radio_2055.o b43-$(CONFIG_B43_PHY_N) += radio_2056.o diff --git a/drivers/net/wireless/broadcom/b43/leds.c b/drivers/net/wireless/broadcom/b43/leds.c index d79ab2a22..cb987c2ec 100644 --- a/drivers/net/wireless/broadcom/b43/leds.c +++ b/drivers/net/wireless/broadcom/b43/leds.c @@ -222,7 +222,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, sprom[2] = dev->dev->bus_sprom->gpio2; sprom[3] = dev->dev->bus_sprom->gpio3; - if (sprom[led_index] == 0xFF) { + if ((sprom[0] & sprom[1] & sprom[2] & sprom[3]) == 0xff) { /* There is no LED information in the SPROM * for this LED. Hardcode it here. */ *activelow = false; @@ -250,7 +250,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, return; } } else { - *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; + /* keep LED disabled if no mapping is defined */ + if (sprom[led_index] == 0xff) + *behaviour = B43_LED_OFF; + else + *behaviour = sprom[led_index] & B43_LED_BEHAVIOUR; *activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW); } } diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index f8c7aa9db..c522719fc 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -3176,7 +3176,6 @@ static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) static void b43_rate_memory_init(struct b43_wldev *dev) { switch (dev->phy.type) { - case B43_PHYTYPE_A: case B43_PHYTYPE_G: case B43_PHYTYPE_N: case B43_PHYTYPE_LP: @@ -3190,8 +3189,6 @@ static void b43_rate_memory_init(struct b43_wldev *dev) b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); - if (dev->phy.type == B43_PHYTYPE_A) - break; /* fallthrough */ case B43_PHYTYPE_B: b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); @@ -4600,14 +4597,6 @@ static int b43_phy_versioning(struct b43_wldev *dev) if (radio_manuf != 0x17F /* Broadcom */) unsupported = 1; switch (phy_type) { - case B43_PHYTYPE_A: - if (radio_id != 0x2060) - unsupported = 1; - if (radio_rev != 1) - unsupported = 1; - if (radio_manuf != 0x17F) - unsupported = 1; - break; case B43_PHYTYPE_B: if ((radio_id & 0xFFF0) != 0x2050) unsupported = 1; @@ -4762,10 +4751,7 @@ static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) u16 pu_delay; /* The time value is in microseconds. */ - if (dev->phy.type == B43_PHYTYPE_A) - pu_delay = 3700; - else - pu_delay = 1050; + pu_delay = 1050; if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) pu_delay = 500; if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) @@ -4780,14 +4766,10 @@ static void b43_set_pretbtt(struct b43_wldev *dev) u16 pretbtt; /* The time value is in microseconds. */ - if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) { + if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) pretbtt = 2; - } else { - if (dev->phy.type == B43_PHYTYPE_A) - pretbtt = 120; - else - pretbtt = 250; - } + else + pretbtt = 250; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt); b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt); } @@ -5376,10 +5358,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy, /* As a fallback, try to guess using PHY type */ switch (dev->phy.type) { - case B43_PHYTYPE_A: - *have_2ghz_phy = false; - *have_5ghz_phy = true; - return; case B43_PHYTYPE_G: case B43_PHYTYPE_N: case B43_PHYTYPE_LP: @@ -5451,7 +5429,6 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) /* We don't support 5 GHz on some PHYs yet */ if (have_5ghz_phy) { switch (dev->phy.type) { - case B43_PHYTYPE_A: case B43_PHYTYPE_G: case B43_PHYTYPE_LP: case B43_PHYTYPE_HT: diff --git a/drivers/net/wireless/broadcom/b43/phy_a.h b/drivers/net/wireless/broadcom/b43/phy_a.h index f7d0d929a..0a92d01c2 100644 --- a/drivers/net/wireless/broadcom/b43/phy_a.h +++ b/drivers/net/wireless/broadcom/b43/phy_a.h @@ -101,26 +101,4 @@ u32 b43_ofdmtab_read32(struct b43_wldev *dev, u16 table, u16 offset); void b43_ofdmtab_write32(struct b43_wldev *dev, u16 table, u16 offset, u32 value); - -struct b43_phy_a { - /* Pointer to the table used to convert a - * TSSI value to dBm-Q5.2 */ - const s8 *tssi2dbm; - /* Target idle TSSI */ - int tgt_idle_tssi; - /* Current idle TSSI */ - int cur_idle_tssi;//FIXME value currently not set - - /* A-PHY TX Power control value. */ - u16 txpwr_offset; - - //TODO lots of missing stuff -}; - -/** - * b43_phy_inita - Lowlevel A-PHY init routine. - * This is _only_ used by the G-PHY code. - */ -void b43_phy_inita(struct b43_wldev *dev); - #endif /* LINUX_B43_PHY_A_H_ */ diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h index 78d865267..ced054a98 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.h +++ b/drivers/net/wireless/broadcom/b43/phy_common.h @@ -190,7 +190,6 @@ struct b43_phy_operations { void (*pwork_60sec)(struct b43_wldev *dev); }; -struct b43_phy_a; struct b43_phy_g; struct b43_phy_n; struct b43_phy_lp; @@ -210,8 +209,6 @@ struct b43_phy { #else union { #endif - /* A-PHY specific information */ - struct b43_phy_a *a; /* G-PHY specific information */ struct b43_phy_g *g; /* N-PHY specific information */ diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c index 462310e6e..822dcaa8a 100644 --- a/drivers/net/wireless/broadcom/b43/phy_g.c +++ b/drivers/net/wireless/broadcom/b43/phy_g.c @@ -31,6 +31,7 @@ #include "phy_common.h" #include "lo.h" #include "main.h" +#include "wa.h" #include <linux/bitrev.h> #include <linux/slab.h> @@ -1987,6 +1988,25 @@ static void b43_phy_init_pctl(struct b43_wldev *dev) b43_shm_clear_tssi(dev); } +static void b43_phy_inita(struct b43_wldev *dev) +{ + struct b43_phy *phy = &dev->phy; + + might_sleep(); + + if (phy->rev >= 6) { + if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) + b43_phy_set(dev, B43_PHY_ENCORE, 0x0010); + else + b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010); + } + + b43_wa_all(dev); + + if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) + b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); +} + static void b43_phy_initg(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -2150,11 +2170,6 @@ static void default_radio_attenuation(struct b43_wldev *dev, } } - if (phy->type == B43_PHYTYPE_A) { - rf->att = 0x60; - return; - } - switch (phy->radio_ver) { case 0x2053: switch (phy->radio_rev) { diff --git a/drivers/net/wireless/broadcom/b43/wa.c b/drivers/net/wireless/broadcom/b43/wa.c index c218c08fb..0e96c08d1 100644 --- a/drivers/net/wireless/broadcom/b43/wa.c +++ b/drivers/net/wireless/broadcom/b43/wa.c @@ -30,33 +30,6 @@ #include "phy_common.h" #include "wa.h" -static void b43_wa_papd(struct b43_wldev *dev) -{ - u16 backup; - - backup = b43_ofdmtab_read16(dev, B43_OFDMTAB_PWRDYN2, 0); - b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, 7); - b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 0, 0); - b43_dummy_transmission(dev, true, true); - b43_ofdmtab_write16(dev, B43_OFDMTAB_PWRDYN2, 0, backup); -} - -static void b43_wa_auxclipthr(struct b43_wldev *dev) -{ - b43_phy_write(dev, B43_PHY_OFDM(0x8E), 0x3800); -} - -static void b43_wa_afcdac(struct b43_wldev *dev) -{ - b43_phy_write(dev, 0x0035, 0x03FF); - b43_phy_write(dev, 0x0036, 0x0400); -} - -static void b43_wa_txdc_offset(struct b43_wldev *dev) -{ - b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 0, 0x0051); -} - void b43_wa_initgains(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -81,41 +54,6 @@ void b43_wa_initgains(struct b43_wldev *dev) b43_phy_write(dev, 0x00BA, 0x3ED5); } -static void b43_wa_divider(struct b43_wldev *dev) -{ - b43_phy_mask(dev, 0x002B, ~0x0100); - b43_phy_write(dev, 0x008E, 0x58C1); -} - -static void b43_wa_gt(struct b43_wldev *dev) /* Gain table. */ -{ - if (dev->phy.rev <= 2) { - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 0, 15); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 1, 31); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 2, 42); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 3, 48); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN2, 4, 58); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 0, 3); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 1, 3); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN1, 2, 7); - } else { - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 0, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 1, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 2, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 3, 19); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 4, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 5, 21); - b43_ofdmtab_write16(dev, B43_OFDMTAB_GAIN0, 6, 25); - } -} - static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ { int i; @@ -133,15 +71,11 @@ static void b43_wa_rssi_lt(struct b43_wldev *dev) /* RSSI lookup table */ static void b43_wa_analog(struct b43_wldev *dev) { - struct b43_phy *phy = &dev->phy; u16 ofdmrev; ofdmrev = b43_phy_read(dev, B43_PHY_VERSION_OFDM) & B43_PHYVER_VERSION; if (ofdmrev > 2) { - if (phy->type == B43_PHYTYPE_A) - b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1808); - else - b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); + b43_phy_write(dev, B43_PHY_PWRDOWN, 0x1000); } else { b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 3, 0x1044); b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 4, 0x7201); @@ -149,26 +83,13 @@ static void b43_wa_analog(struct b43_wldev *dev) } } -static void b43_wa_dac(struct b43_wldev *dev) -{ - if (dev->phy.analog == 1) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, - (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0034) | 0x0008); - else - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, - (b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 1) & ~0x0078) | 0x0010); -} - static void b43_wa_fft(struct b43_wldev *dev) /* Fine frequency table */ { int i; - if (dev->phy.type == B43_PHYTYPE_A) - for (i = 0; i < B43_TAB_FINEFREQA_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqa[i]); - else - for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, b43_tab_finefreqg[i]); + for (i = 0; i < B43_TAB_FINEFREQG_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_DACRFPABB, i, + b43_tab_finefreqg[i]); } static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ @@ -176,21 +97,14 @@ static void b43_wa_nft(struct b43_wldev *dev) /* Noise figure table */ struct b43_phy *phy = &dev->phy; int i; - if (phy->type == B43_PHYTYPE_A) { - if (phy->rev == 2) - for (i = 0; i < B43_TAB_NOISEA2_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea2[i]); - else - for (i = 0; i < B43_TAB_NOISEA3_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noisea3[i]); - } else { - if (phy->rev == 1) - for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg1[i]); - else - for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, b43_tab_noiseg2[i]); - } + if (phy->rev == 1) + for (i = 0; i < B43_TAB_NOISEG1_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, + b43_tab_noiseg1[i]); + else + for (i = 0; i < B43_TAB_NOISEG2_SIZE; i++) + b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC2, i, + b43_tab_noiseg2[i]); } static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ @@ -201,14 +115,6 @@ static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */ b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); } -static void b43_write_null_nst(struct b43_wldev *dev) -{ - int i; - - for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) - b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0); -} - static void b43_write_nst(struct b43_wldev *dev, const u16 *nst) { int i; @@ -221,24 +127,13 @@ static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */ { struct b43_phy *phy = &dev->phy; - if (phy->type == B43_PHYTYPE_A) { - if (phy->rev <= 1) - b43_write_null_nst(dev); - else if (phy->rev == 2) - b43_write_nst(dev, b43_tab_noisescalea2); - else if (phy->rev == 3) - b43_write_nst(dev, b43_tab_noisescalea3); - else + if (phy->rev >= 6) { + if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) b43_write_nst(dev, b43_tab_noisescaleg3); + else + b43_write_nst(dev, b43_tab_noisescaleg2); } else { - if (phy->rev >= 6) { - if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) - b43_write_nst(dev, b43_tab_noisescaleg3); - else - b43_write_nst(dev, b43_tab_noisescaleg2); - } else { - b43_write_nst(dev, b43_tab_noisescaleg1); - } + b43_write_nst(dev, b43_tab_noisescaleg1); } } @@ -251,41 +146,13 @@ static void b43_wa_art(struct b43_wldev *dev) /* ADV retard table */ i, b43_tab_retard[i]); } -static void b43_wa_txlna_gain(struct b43_wldev *dev) -{ - b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 13, 0x0000); -} - -static void b43_wa_crs_reset(struct b43_wldev *dev) -{ - b43_phy_write(dev, 0x002C, 0x0064); -} - -static void b43_wa_2060txlna_gain(struct b43_wldev *dev) -{ - b43_hf_write(dev, b43_hf_read(dev) | - B43_HF_2060W); -} - -static void b43_wa_lms(struct b43_wldev *dev) -{ - b43_phy_maskset(dev, 0x0055, 0xFFC0, 0x0004); -} - -static void b43_wa_mixedsignal(struct b43_wldev *dev) -{ - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 1, 3); -} - static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ { struct b43_phy *phy = &dev->phy; int i; const u16 *tab; - if (phy->type == B43_PHYTYPE_A) { - tab = b43_tab_sigmasqr1; - } else if (phy->type == B43_PHYTYPE_G) { + if (phy->type == B43_PHYTYPE_G) { tab = b43_tab_sigmasqr2; } else { B43_WARN_ON(1); @@ -298,13 +165,6 @@ static void b43_wa_msst(struct b43_wldev *dev) /* Min sigma square table */ } } -static void b43_wa_iqadc(struct b43_wldev *dev) -{ - if (dev->phy.analog == 4) - b43_ofdmtab_write16(dev, B43_OFDMTAB_DAC, 0, - b43_ofdmtab_read16(dev, B43_OFDMTAB_DAC, 0) & ~0xF000); -} - static void b43_wa_crs_ed(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; @@ -450,38 +310,6 @@ static void b43_wa_cpll_nonpilot(struct b43_wldev *dev) b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_11, 1, 0); } -static void b43_wa_rssi_adc(struct b43_wldev *dev) -{ - if (dev->phy.analog == 4) - b43_phy_write(dev, 0x00DC, 0x7454); -} - -static void b43_wa_boards_a(struct b43_wldev *dev) -{ - if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM && - dev->dev->board_type == SSB_BOARD_BU4306 && - dev->dev->board_rev < 0x30) { - b43_phy_write(dev, 0x0010, 0xE000); - b43_phy_write(dev, 0x0013, 0x0140); - b43_phy_write(dev, 0x0014, 0x0280); - } else { - if (dev->dev->board_type == SSB_BOARD_MP4318 && - dev->dev->board_rev < 0x20) { - b43_phy_write(dev, 0x0013, 0x0210); - b43_phy_write(dev, 0x0014, 0x0840); - } else { - b43_phy_write(dev, 0x0013, 0x0140); - b43_phy_write(dev, 0x0014, 0x0280); - } - if (dev->phy.rev <= 4) - b43_phy_write(dev, 0x0010, 0xE000); - else - b43_phy_write(dev, 0x0010, 0x2000); - b43_ofdmtab_write16(dev, B43_OFDMTAB_DC, 1, 0x0039); - b43_ofdmtab_write16(dev, B43_OFDMTAB_UNKNOWN_APHY, 7, 0x0040); - } -} - static void b43_wa_boards_g(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; @@ -518,80 +346,7 @@ void b43_wa_all(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; - if (phy->type == B43_PHYTYPE_A) { - switch (phy->rev) { - case 2: - b43_wa_papd(dev); - b43_wa_auxclipthr(dev); - b43_wa_afcdac(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_divider(dev); - b43_wa_gt(dev); - b43_wa_rssi_lt(dev); - b43_wa_analog(dev); - b43_wa_dac(dev); - b43_wa_fft(dev); - b43_wa_nft(dev); - b43_wa_rt(dev); - b43_wa_nst(dev); - b43_wa_art(dev); - b43_wa_txlna_gain(dev); - b43_wa_crs_reset(dev); - b43_wa_2060txlna_gain(dev); - b43_wa_lms(dev); - break; - case 3: - b43_wa_papd(dev); - b43_wa_mixedsignal(dev); - b43_wa_rssi_lt(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_dac(dev); - b43_wa_nft(dev); - b43_wa_nst(dev); - b43_wa_msst(dev); - b43_wa_analog(dev); - b43_wa_gt(dev); - b43_wa_txpuoff_rxpuon(dev); - b43_wa_txlna_gain(dev); - break; - case 5: - b43_wa_iqadc(dev); - case 6: - b43_wa_papd(dev); - b43_wa_rssi_lt(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_dac(dev); - b43_wa_nft(dev); - b43_wa_nst(dev); - b43_wa_msst(dev); - b43_wa_analog(dev); - b43_wa_gt(dev); - b43_wa_txpuoff_rxpuon(dev); - b43_wa_txlna_gain(dev); - break; - case 7: - b43_wa_iqadc(dev); - b43_wa_papd(dev); - b43_wa_rssi_lt(dev); - b43_wa_txdc_offset(dev); - b43_wa_initgains(dev); - b43_wa_dac(dev); - b43_wa_nft(dev); - b43_wa_nst(dev); - b43_wa_msst(dev); - b43_wa_analog(dev); - b43_wa_gt(dev); - b43_wa_txpuoff_rxpuon(dev); - b43_wa_txlna_gain(dev); - b43_wa_rssi_adc(dev); - default: - B43_WARN_ON(1); - } - b43_wa_boards_a(dev); - } else if (phy->type == B43_PHYTYPE_G) { + if (phy->type == B43_PHYTYPE_G) { switch (phy->rev) { case 1://XXX review rev1 b43_wa_crs_ed(dev); diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c index f6201264d..b068d5aee 100644 --- a/drivers/net/wireless/broadcom/b43/xmit.c +++ b/drivers/net/wireless/broadcom/b43/xmit.c @@ -205,7 +205,7 @@ static u16 b43_generate_tx_phy_ctl1(struct b43_wldev *dev, u8 bitrate) return control; } -static u8 b43_calc_fallback_rate(u8 bitrate) +static u8 b43_calc_fallback_rate(u8 bitrate, int gmode) { switch (bitrate) { case B43_CCK_RATE_1MB: @@ -216,8 +216,15 @@ static u8 b43_calc_fallback_rate(u8 bitrate) return B43_CCK_RATE_2MB; case B43_CCK_RATE_11MB: return B43_CCK_RATE_5MB; + /* + * Don't just fallback to CCK; it may be in 5GHz operation + * and falling back to CCK won't work out very well. + */ case B43_OFDM_RATE_6MB: - return B43_CCK_RATE_5MB; + if (gmode) + return B43_CCK_RATE_5MB; + else + return B43_OFDM_RATE_6MB; case B43_OFDM_RATE_9MB: return B43_OFDM_RATE_6MB; case B43_OFDM_RATE_12MB: @@ -438,7 +445,7 @@ int b43_generate_txhdr(struct b43_wldev *dev, rts_rate = rts_cts_rate ? rts_cts_rate->hw_value : B43_CCK_RATE_1MB; rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); - rts_rate_fb = b43_calc_fallback_rate(rts_rate); + rts_rate_fb = b43_calc_fallback_rate(rts_rate, phy->gmode); rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { @@ -642,11 +649,7 @@ static s8 b43_rssinoise_postprocess(struct b43_wldev *dev, u8 in_rssi) struct b43_phy *phy = &dev->phy; s8 ret; - if (phy->type == B43_PHYTYPE_A) { - //TODO: Incomplete specs. - ret = 0; - } else - ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); + ret = b43_rssi_postprocess(dev, in_rssi, 0, 1, 1); return ret; } @@ -663,7 +666,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) u16 uninitialized_var(chanstat), uninitialized_var(mactime); u32 uninitialized_var(macstat); u16 chanid; - u16 phytype; int padding, rate_idx; memset(&status, 0, sizeof(status)); @@ -684,7 +686,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) chanstat = le16_to_cpu(rxhdr->format_351.channel); break; } - phytype = chanstat & B43_RX_CHAN_PHYTYPE; if (unlikely(macstat & B43_RX_MAC_FCSERR)) { dev->wl->ieee_stats.dot11FCSErrorCount++; @@ -755,7 +756,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) else status.signal = max(rxhdr->power0, rxhdr->power1); break; - case B43_PHYTYPE_A: case B43_PHYTYPE_B: case B43_PHYTYPE_G: case B43_PHYTYPE_LP: @@ -802,14 +802,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; switch (chanstat & B43_RX_CHAN_PHYTYPE) { - case B43_PHYTYPE_A: - status.band = NL80211_BAND_5GHZ; - B43_WARN_ON(1); - /* FIXME: We don't really know which value the "chanid" contains. - * So the following assignment might be wrong. */ - status.freq = - ieee80211_channel_to_frequency(chanid, status.band); - break; case B43_PHYTYPE_G: status.band = NL80211_BAND_2GHZ; /* Somewhere between 478.104 and 508.1084 firmware for G-PHY diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index c7550dab6..f549c2560 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -166,41 +166,45 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler); sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler); sdio_release_host(sdiodev->func[1]); + sdiodev->sd_irq_requested = true; } return 0; } -int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) +void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) { - struct brcmfmac_sdio_pd *pdata; - brcmf_dbg(SDIO, "Entering\n"); + brcmf_dbg(SDIO, "Entering oob=%d sd=%d\n", + sdiodev->oob_irq_requested, + sdiodev->sd_irq_requested); - pdata = &sdiodev->settings->bus.sdio; - if (pdata->oob_irq_supported) { + if (sdiodev->oob_irq_requested) { + struct brcmfmac_sdio_pd *pdata; + + pdata = &sdiodev->settings->bus.sdio; sdio_claim_host(sdiodev->func[1]); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); sdio_release_host(sdiodev->func[1]); - if (sdiodev->oob_irq_requested) { - sdiodev->oob_irq_requested = false; - if (sdiodev->irq_wake) { - disable_irq_wake(pdata->oob_irq_nr); - sdiodev->irq_wake = false; - } - free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev); - sdiodev->irq_en = false; + sdiodev->oob_irq_requested = false; + if (sdiodev->irq_wake) { + disable_irq_wake(pdata->oob_irq_nr); + sdiodev->irq_wake = false; } - } else { + free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev); + sdiodev->irq_en = false; + sdiodev->oob_irq_requested = false; + } + + if (sdiodev->sd_irq_requested) { sdio_claim_host(sdiodev->func[1]); sdio_release_irq(sdiodev->func[2]); sdio_release_irq(sdiodev->func[1]); sdio_release_host(sdiodev->func[1]); + sdiodev->sd_irq_requested = false; } - - return 0; } void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, @@ -722,8 +726,10 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, return -ENOMEM; err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr, glom_skb); - if (err) + if (err) { + brcmu_pkt_buf_free_skb(glom_skb); goto done; + } skb_queue_walk(pktq, skb) { memcpy(skb->data, glom_skb->data, skb->len); @@ -1197,12 +1203,17 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func) brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); brcmf_dbg(SDIO, "Function: %d\n", func->num); - if (func->num != 1) - return; - bus_if = dev_get_drvdata(&func->dev); if (bus_if) { sdiodev = bus_if->bus_priv.sdio; + + /* start by unregistering irqs */ + brcmf_sdiod_intr_unregister(sdiodev); + + if (func->num != 1) + return; + + /* only proceed with rest of cleanup if func 1 */ brcmf_sdiod_remove(sdiodev); dev_set_drvdata(&sdiodev->func[1]->dev, NULL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 121baba7a..b8aec5e5e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -541,6 +541,21 @@ brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) ADDR_INDIRECT); } +static int brcmf_get_first_free_bsscfgidx(struct brcmf_pub *drvr) +{ + int bsscfgidx; + + for (bsscfgidx = 0; bsscfgidx < BRCMF_MAX_IFS; bsscfgidx++) { + /* bsscfgidx 1 is reserved for legacy P2P */ + if (bsscfgidx == 1) + continue; + if (!drvr->iflist[bsscfgidx]) + return bsscfgidx; + } + + return -ENOMEM; +} + static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) { struct brcmf_mbss_ssid_le mbss_ssid_le; @@ -548,7 +563,7 @@ static int brcmf_cfg80211_request_ap_if(struct brcmf_if *ifp) int err; memset(&mbss_ssid_le, 0, sizeof(mbss_ssid_le)); - bsscfgidx = brcmf_get_next_free_bsscfgidx(ifp->drvr); + bsscfgidx = brcmf_get_first_free_bsscfgidx(ifp->drvr); if (bsscfgidx < 0) return bsscfgidx; @@ -586,7 +601,7 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, brcmf_dbg(INFO, "Adding vif \"%s\"\n", name); - vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP, false); + vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_AP); if (IS_ERR(vif)) return (struct wireless_dev *)vif; @@ -669,20 +684,24 @@ static struct wireless_dev *brcmf_cfg80211_add_iface(struct wiphy *wiphy, return ERR_PTR(-EOPNOTSUPP); case NL80211_IFTYPE_AP: wdev = brcmf_ap_add_vif(wiphy, name, flags, params); - if (!IS_ERR(wdev)) - brcmf_cfg80211_update_proto_addr_mode(wdev); - return wdev; + break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: wdev = brcmf_p2p_add_vif(wiphy, name, name_assign_type, type, flags, params); - if (!IS_ERR(wdev)) - brcmf_cfg80211_update_proto_addr_mode(wdev); - return wdev; + break; case NL80211_IFTYPE_UNSPECIFIED: default: return ERR_PTR(-EINVAL); } + + if (IS_ERR(wdev)) + brcmf_err("add iface %s type %d failed: err=%d\n", + name, type, (int)PTR_ERR(wdev)); + else + brcmf_cfg80211_update_proto_addr_mode(wdev); + + return wdev; } static void brcmf_scan_config_mpc(struct brcmf_if *ifp, int mpc) @@ -756,9 +775,13 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, if (!aborted) cfg80211_sched_scan_results(cfg_to_wiphy(cfg)); } else if (scan_request) { + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + brcmf_dbg(SCAN, "ESCAN Completed scan: %s\n", aborted ? "Aborted" : "Done"); - cfg80211_scan_done(scan_request, aborted); + cfg80211_scan_done(scan_request, &info); } if (!test_and_clear_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) brcmf_dbg(SCAN, "Scan complete, probably P2P scan\n"); @@ -766,12 +789,48 @@ s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, return err; } +static int brcmf_cfg80211_del_ap_iface(struct wiphy *wiphy, + struct wireless_dev *wdev) +{ + struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); + struct net_device *ndev = wdev->netdev; + struct brcmf_if *ifp = netdev_priv(ndev); + int ret; + int err; + + brcmf_cfg80211_arm_vif_event(cfg, ifp->vif); + + err = brcmf_fil_bsscfg_data_set(ifp, "interface_remove", NULL, 0); + if (err) { + brcmf_err("interface_remove failed %d\n", err); + goto err_unarm; + } + + /* wait for firmware event */ + ret = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_DEL, + BRCMF_VIF_EVENT_TIMEOUT); + if (!ret) { + brcmf_err("timeout occurred\n"); + err = -EIO; + goto err_unarm; + } + + brcmf_remove_interface(ifp, true); + +err_unarm: + brcmf_cfg80211_arm_vif_event(cfg, NULL); + return err; +} + static int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) { struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); struct net_device *ndev = wdev->netdev; + if (ndev && ndev == cfg_to_ndev(cfg)) + return -ENOTSUPP; + /* vif event pending in firmware */ if (brcmf_cfg80211_vif_event_armed(cfg)) return -EBUSY; @@ -788,12 +847,13 @@ int brcmf_cfg80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev) switch (wdev->iftype) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_MESH_POINT: return -EOPNOTSUPP; + case NL80211_IFTYPE_AP: + return brcmf_cfg80211_del_ap_iface(wiphy, wdev); case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_P2P_DEVICE: @@ -2750,7 +2810,7 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg, if (!bi->ctl_ch) { ch.chspec = le16_to_cpu(bi->chanspec); cfg->d11inf.decchspec(&ch); - bi->ctl_ch = ch.chnum; + bi->ctl_ch = ch.control_ch_num; } channel = bi->ctl_ch; @@ -2868,7 +2928,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, else band = wiphy->bands[NL80211_BAND_5GHZ]; - freq = ieee80211_channel_to_frequency(ch.chnum, band->band); + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); cfg->channel = freq; notify_channel = ieee80211_get_channel(wiphy, freq); @@ -2878,7 +2938,7 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg, notify_ielen = le32_to_cpu(bi->ie_length); notify_signal = (s16)le16_to_cpu(bi->RSSI) * 100; - brcmf_dbg(CONN, "channel: %d(%d)\n", ch.chnum, freq); + brcmf_dbg(CONN, "channel: %d(%d)\n", ch.control_ch_num, freq); brcmf_dbg(CONN, "capability: %X\n", notify_capability); brcmf_dbg(CONN, "beacon interval: %d\n", notify_interval); brcmf_dbg(CONN, "signal: %d\n", notify_signal); @@ -4439,7 +4499,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_join_params join_params; enum nl80211_iftype dev_role; struct brcmf_fil_bss_enable_le bss_enable; - u16 chanspec; + u16 chanspec = chandef_to_chanspec(&cfg->d11inf, &settings->chandef); bool mbss; int is_11d; @@ -4515,16 +4575,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_config_ap_mgmt_ie(ifp->vif, &settings->beacon); + /* Parameters shared by all radio interfaces */ if (!mbss) { - chanspec = chandef_to_chanspec(&cfg->d11inf, - &settings->chandef); - err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); - if (err < 0) { - brcmf_err("Set Channel failed: chspec=%d, %d\n", - chanspec, err); - goto exit; - } - if (is_11d != ifp->vif->is_11d) { err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY, is_11d); @@ -4572,6 +4624,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, err = -EINVAL; goto exit; } + + /* Interface specific setup */ if (dev_role == NL80211_IFTYPE_AP) { if ((brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS)) && (!mbss)) brcmf_fil_iovar_int_set(ifp, "mbss", 1); @@ -4581,6 +4635,17 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_err("setting AP mode failed %d\n", err); goto exit; } + if (!mbss) { + /* Firmware 10.x requires setting channel after enabling + * AP and before bringing interface up. + */ + err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); + if (err < 0) { + brcmf_err("Set Channel failed: chspec=%d, %d\n", + chanspec, err); + goto exit; + } + } err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 1); if (err < 0) { brcmf_err("BRCMF_C_UP error (%d)\n", err); @@ -4601,8 +4666,23 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, brcmf_err("SET SSID error (%d)\n", err); goto exit; } + + if (settings->hidden_ssid) { + err = brcmf_fil_iovar_int_set(ifp, "closednet", 1); + if (err) { + brcmf_err("closednet error (%d)\n", err); + goto exit; + } + } + brcmf_dbg(TRACE, "AP mode configuration complete\n"); - } else { + } else if (dev_role == NL80211_IFTYPE_P2P_GO) { + err = brcmf_fil_iovar_int_set(ifp, "chanspec", chanspec); + if (err < 0) { + brcmf_err("Set Channel failed: chspec=%d, %d\n", + chanspec, err); + goto exit; + } err = brcmf_fil_bsscfg_data_set(ifp, "ssid", &ssid_le, sizeof(ssid_le)); if (err < 0) { @@ -4619,7 +4699,10 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, } brcmf_dbg(TRACE, "GO mode configuration complete\n"); + } else { + WARN_ON(1); } + set_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); brcmf_net_setcarrier(ifp, true); @@ -4650,6 +4733,10 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) return err; } + /* First BSS doesn't get a full reset */ + if (ifp->bsscfgidx == 0) + brcmf_fil_iovar_int_set(ifp, "closednet", 0); + memset(&join_params, 0, sizeof(join_params)); err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID, &join_params, sizeof(join_params)); @@ -4908,6 +4995,68 @@ exit: return err; } +static int brcmf_cfg80211_get_channel(struct wiphy *wiphy, + struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); + struct net_device *ndev = wdev->netdev; + struct brcmf_if *ifp; + struct brcmu_chan ch; + enum nl80211_band band = 0; + enum nl80211_chan_width width = 0; + u32 chanspec; + int freq, err; + + if (!ndev) + return -ENODEV; + ifp = netdev_priv(ndev); + + err = brcmf_fil_iovar_int_get(ifp, "chanspec", &chanspec); + if (err) { + brcmf_err("chanspec failed (%d)\n", err); + return err; + } + + ch.chspec = chanspec; + cfg->d11inf.decchspec(&ch); + + switch (ch.band) { + case BRCMU_CHAN_BAND_2G: + band = NL80211_BAND_2GHZ; + break; + case BRCMU_CHAN_BAND_5G: + band = NL80211_BAND_5GHZ; + break; + } + + switch (ch.bw) { + case BRCMU_CHAN_BW_80: + width = NL80211_CHAN_WIDTH_80; + break; + case BRCMU_CHAN_BW_40: + width = NL80211_CHAN_WIDTH_40; + break; + case BRCMU_CHAN_BW_20: + width = NL80211_CHAN_WIDTH_20; + break; + case BRCMU_CHAN_BW_80P80: + width = NL80211_CHAN_WIDTH_80P80; + break; + case BRCMU_CHAN_BW_160: + width = NL80211_CHAN_WIDTH_160; + break; + } + + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band); + chandef->chan = ieee80211_get_channel(wiphy, freq); + chandef->width = width; + chandef->center_freq1 = ieee80211_channel_to_frequency(ch.chnum, band); + chandef->center_freq2 = 0; + + return 0; +} + static int brcmf_cfg80211_crit_proto_start(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_crit_proto_id proto, @@ -5070,6 +5219,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .mgmt_tx = brcmf_cfg80211_mgmt_tx, .remain_on_channel = brcmf_p2p_remain_on_channel, .cancel_remain_on_channel = brcmf_cfg80211_cancel_remain_on_channel, + .get_channel = brcmf_cfg80211_get_channel, .start_p2p_device = brcmf_p2p_start_device, .stop_p2p_device = brcmf_p2p_stop_device, .crit_proto_start = brcmf_cfg80211_crit_proto_start, @@ -5078,8 +5228,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { }; struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, - enum nl80211_iftype type, - bool pm_block) + enum nl80211_iftype type) { struct brcmf_cfg80211_vif *vif_walk; struct brcmf_cfg80211_vif *vif; @@ -5094,8 +5243,6 @@ struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, vif->wdev.wiphy = cfg->wiphy; vif->wdev.iftype = type; - vif->pm_block = pm_block; - brcmf_init_prof(&vif->profile); if (type == NL80211_IFTYPE_AP) { @@ -5296,7 +5443,7 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg, else band = wiphy->bands[NL80211_BAND_5GHZ]; - freq = ieee80211_channel_to_frequency(ch.chnum, band->band); + freq = ieee80211_channel_to_frequency(ch.control_ch_num, band->band); notify_channel = ieee80211_get_channel(wiphy, freq); done: @@ -5352,7 +5499,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, struct net_device *ndev, const struct brcmf_event_msg *e, void *data) { - struct brcmf_if *ifp = netdev_priv(ndev); static int generation; u32 event = e->event_code; u32 reason = e->reason; @@ -5363,8 +5509,6 @@ brcmf_notify_connect_status_ap(struct brcmf_cfg80211_info *cfg, ndev != cfg_to_ndev(cfg)) { brcmf_dbg(CONN, "AP mode link down\n"); complete(&cfg->vif_disabled); - if (ifp->vif->mbss) - brcmf_remove_interface(ifp); return 0; } @@ -5491,7 +5635,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, ifevent->action, ifevent->flags, ifevent->ifidx, ifevent->bsscfgidx); - mutex_lock(&event->vif_event_lock); + spin_lock(&event->vif_event_lock); event->action = ifevent->action; vif = event->vif; @@ -5499,7 +5643,7 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, case BRCMF_E_IF_ADD: /* waiting process may have timed out */ if (!cfg->vif_event.vif) { - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); return -EBADF; } @@ -5510,24 +5654,24 @@ static s32 brcmf_notify_vif_event(struct brcmf_if *ifp, ifp->ndev->ieee80211_ptr = &vif->wdev; SET_NETDEV_DEV(ifp->ndev, wiphy_dev(cfg->wiphy)); } - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); wake_up(&event->vif_wq); return 0; case BRCMF_E_IF_DEL: - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); /* event may not be upon user request */ if (brcmf_cfg80211_vif_event_armed(cfg)) wake_up(&event->vif_wq); return 0; case BRCMF_E_IF_CHANGE: - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); wake_up(&event->vif_wq); return 0; default: - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); break; } return -EINVAL; @@ -5648,7 +5792,7 @@ static void wl_deinit_priv(struct brcmf_cfg80211_info *cfg) static void init_vif_event(struct brcmf_cfg80211_vif_event *event) { init_waitqueue_head(&event->vif_wq); - mutex_init(&event->vif_event_lock); + spin_lock_init(&event->vif_event_lock); } static s32 brcmf_dongle_roam(struct brcmf_if *ifp) @@ -5818,14 +5962,15 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, channel = band->channels; index = band->n_channels; for (j = 0; j < band->n_channels; j++) { - if (channel[j].hw_value == ch.chnum) { + if (channel[j].hw_value == ch.control_ch_num) { index = j; break; } } channel[index].center_freq = - ieee80211_channel_to_frequency(ch.chnum, band->band); - channel[index].hw_value = ch.chnum; + ieee80211_channel_to_frequency(ch.control_ch_num, + band->band); + channel[index].hw_value = ch.control_ch_num; /* assuming the chanspecs order is HT20, * HT40 upper, HT40 lower, and VHT80. @@ -5927,7 +6072,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg) if (WARN_ON(ch.bw != BRCMU_CHAN_BW_40)) continue; for (j = 0; j < band->n_channels; j++) { - if (band->channels[j].hw_value == ch.chnum) + if (band->channels[j].hw_value == ch.control_ch_num) break; } if (WARN_ON(j == band->n_channels)) @@ -6193,29 +6338,15 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) if (!combo) goto err; - c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL); - if (!c0_limits) - goto err; - - if (p2p) { - p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL); - if (!p2p_limits) - goto err; - } - - if (mbss) { - mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL); - if (!mbss_limits) - goto err; - } - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP); c = 0; i = 0; - combo[c].num_different_channels = 1; + c0_limits = kcalloc(p2p ? 3 : 2, sizeof(*c0_limits), GFP_KERNEL); + if (!c0_limits) + goto err; c0_limits[i].max = 1; c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION); if (p2p) { @@ -6233,6 +6364,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) c0_limits[i].max = 1; c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); } + combo[c].num_different_channels = 1; combo[c].max_interfaces = i; combo[c].n_limits = i; combo[c].limits = c0_limits; @@ -6240,7 +6372,9 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) if (p2p) { c++; i = 0; - combo[c].num_different_channels = 1; + p2p_limits = kcalloc(4, sizeof(*p2p_limits), GFP_KERNEL); + if (!p2p_limits) + goto err; p2p_limits[i].max = 1; p2p_limits[i++].types = BIT(NL80211_IFTYPE_STATION); p2p_limits[i].max = 1; @@ -6249,6 +6383,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT); p2p_limits[i].max = 1; p2p_limits[i++].types = BIT(NL80211_IFTYPE_P2P_DEVICE); + combo[c].num_different_channels = 1; combo[c].max_interfaces = i; combo[c].n_limits = i; combo[c].limits = p2p_limits; @@ -6256,14 +6391,19 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) if (mbss) { c++; + i = 0; + mbss_limits = kcalloc(1, sizeof(*mbss_limits), GFP_KERNEL); + if (!mbss_limits) + goto err; + mbss_limits[i].max = 4; + mbss_limits[i++].types = BIT(NL80211_IFTYPE_AP); combo[c].beacon_int_infra_match = true; combo[c].num_different_channels = 1; - mbss_limits[0].max = 4; - mbss_limits[0].types = BIT(NL80211_IFTYPE_AP); combo[c].max_interfaces = 4; - combo[c].n_limits = 1; + combo[c].n_limits = i; combo[c].limits = mbss_limits; } + wiphy->n_iface_combinations = n_combos; wiphy->iface_combinations = combo; return 0; @@ -6551,9 +6691,9 @@ static inline bool vif_event_equals(struct brcmf_cfg80211_vif_event *event, { u8 evt_action; - mutex_lock(&event->vif_event_lock); + spin_lock(&event->vif_event_lock); evt_action = event->action; - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); return evt_action == action; } @@ -6562,10 +6702,10 @@ void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, { struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; - mutex_lock(&event->vif_event_lock); + spin_lock(&event->vif_event_lock); event->vif = vif; event->action = 0; - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); } bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) @@ -6573,9 +6713,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; bool armed; - mutex_lock(&event->vif_event_lock); + spin_lock(&event->vif_event_lock); armed = event->vif != NULL; - mutex_unlock(&event->vif_event_lock); + spin_unlock(&event->vif_event_lock); return armed; } @@ -6715,11 +6855,10 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, return NULL; } - ops = kzalloc(sizeof(*ops), GFP_KERNEL); + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(*ops), GFP_KERNEL); if (!ops) return NULL; - memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops)); ifp = netdev_priv(ndev); #ifdef CONFIG_PM if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) @@ -6740,7 +6879,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, init_vif_event(&cfg->vif_event); INIT_LIST_HEAD(&cfg->vif_list); - vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION, false); + vif = brcmf_alloc_vif(cfg, NL80211_IFTYPE_STATION); if (IS_ERR(vif)) goto wiphy_out; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 95e35bcc1..8889832c1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -20,6 +20,10 @@ /* for brcmu_d11inf */ #include <brcmu_d11.h> +#include "core.h" +#include "fwil_types.h" +#include "p2p.h" + #define WL_NUM_SCAN_MAX 10 #define WL_TLV_INFO_MAX 1024 #define WL_BSS_INFO_MAX 2048 @@ -167,7 +171,6 @@ struct vif_saved_ie { * @wdev: wireless device. * @profile: profile information. * @sme_state: SME state using enum brcmf_vif_status bits. - * @pm_block: power-management blocked. * @list: linked list. * @mgmt_rx_reg: registered rx mgmt frame types. * @mbss: Multiple BSS type, set if not first AP (not relevant for P2P). @@ -177,7 +180,6 @@ struct brcmf_cfg80211_vif { struct wireless_dev wdev; struct brcmf_cfg80211_profile profile; unsigned long sme_state; - bool pm_block; struct vif_saved_ie saved_ie; struct list_head list; u16 mgmt_rx_reg; @@ -225,7 +227,7 @@ struct escan_info { */ struct brcmf_cfg80211_vif_event { wait_queue_head_t vif_wq; - struct mutex vif_event_lock; + spinlock_t vif_event_lock; u8 action; struct brcmf_cfg80211_vif *vif; }; @@ -388,8 +390,7 @@ s32 brcmf_cfg80211_down(struct net_device *ndev); enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, - enum nl80211_iftype type, - bool pm_block); + enum nl80211_iftype type); void brcmf_free_vif(struct brcmf_cfg80211_vif *vif); s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index d3fd6b1db..05f22ff81 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -685,6 +685,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_43602_CHIP_ID: case BRCM_CC_4371_CHIP_ID: return 0x180000; + case BRCM_CC_43465_CHIP_ID: + case BRCM_CC_43525_CHIP_ID: case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: return 0x200000; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b590499f6..65e8c8766 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -516,7 +516,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) /* set appropriate operations */ ndev->netdev_ops = &brcmf_netdev_ops_pri; - ndev->hard_header_len += drvr->hdrlen; + ndev->needed_headroom += drvr->hdrlen; ndev->ethtool_ops = &brcmf_ethtool_ops; drvr->rxsz = ndev->mtu + ndev->hard_header_len + @@ -548,12 +548,16 @@ fail: return -EBADE; } -static void brcmf_net_detach(struct net_device *ndev) +static void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked) { - if (ndev->reg_state == NETREG_REGISTERED) - unregister_netdev(ndev); - else + if (ndev->reg_state == NETREG_REGISTERED) { + if (rtnl_locked) + unregister_netdevice(ndev); + else + unregister_netdev(ndev); + } else { brcmf_cfg80211_free_netdev(ndev); + } } void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on) @@ -634,7 +638,7 @@ fail: } struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, - bool is_p2pdev, char *name, u8 *mac_addr) + bool is_p2pdev, const char *name, u8 *mac_addr) { struct brcmf_if *ifp; struct net_device *ndev; @@ -651,7 +655,7 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, brcmf_err("ERROR: netdev:%s already exists\n", ifp->ndev->name); netif_stop_queue(ifp->ndev); - brcmf_net_detach(ifp->ndev); + brcmf_net_detach(ifp->ndev, false); drvr->iflist[bsscfgidx] = NULL; } else { brcmf_dbg(INFO, "netdev:%s ignore IF event\n", @@ -699,7 +703,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, return ifp; } -static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx) +static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx, + bool rtnl_locked) { struct brcmf_if *ifp; @@ -729,7 +734,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx) cancel_work_sync(&ifp->multicast_work); cancel_work_sync(&ifp->ndoffload_work); } - brcmf_net_detach(ifp->ndev); + brcmf_net_detach(ifp->ndev, rtnl_locked); } else { /* Only p2p device interfaces which get dynamically created * end up here. In this case the p2p module should be informed @@ -738,43 +743,19 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx) * serious troublesome side effects. The p2p module will clean * up the ifp if needed. */ - brcmf_p2p_ifp_removed(ifp); + brcmf_p2p_ifp_removed(ifp, rtnl_locked); kfree(ifp); } } -void brcmf_remove_interface(struct brcmf_if *ifp) +void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked) { if (!ifp || WARN_ON(ifp->drvr->iflist[ifp->bsscfgidx] != ifp)) return; brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", ifp->bsscfgidx, ifp->ifidx); brcmf_fws_del_interface(ifp); - brcmf_del_if(ifp->drvr, ifp->bsscfgidx); -} - -int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr) -{ - int ifidx; - int bsscfgidx; - bool available; - int highest; - - available = false; - bsscfgidx = 2; - highest = 2; - for (ifidx = 0; ifidx < BRCMF_MAX_IFS; ifidx++) { - if (drvr->iflist[ifidx]) { - if (drvr->iflist[ifidx]->bsscfgidx == bsscfgidx) - bsscfgidx = highest + 1; - else if (drvr->iflist[ifidx]->bsscfgidx > highest) - highest = drvr->iflist[ifidx]->bsscfgidx; - } else { - available = true; - } - } - - return available ? bsscfgidx : -ENOMEM; + brcmf_del_if(ifp->drvr, ifp->bsscfgidx, rtnl_locked); } #ifdef CONFIG_INET @@ -1081,9 +1062,9 @@ fail: brcmf_fws_deinit(drvr); } if (ifp) - brcmf_net_detach(ifp->ndev); + brcmf_net_detach(ifp->ndev, false); if (p2p_ifp) - brcmf_net_detach(p2p_ifp->ndev); + brcmf_net_detach(p2p_ifp->ndev, false); drvr->iflist[0] = NULL; drvr->iflist[1] = NULL; if (drvr->settings->ignore_probe_fail) @@ -1152,7 +1133,7 @@ void brcmf_detach(struct device *dev) /* make sure primary interface removed last */ for (i = BRCMF_MAX_IFS-1; i > -1; i--) - brcmf_remove_interface(drvr->iflist[i]); + brcmf_remove_interface(drvr->iflist[i], false); brcmf_cfg80211_detach(drvr->config); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 647d3cc2a..8fa34cad5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -215,9 +215,8 @@ char *brcmf_ifname(struct brcmf_if *ifp); struct brcmf_if *brcmf_get_ifp(struct brcmf_pub *drvr, int ifidx); int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bsscfgidx, s32 ifidx, - bool is_p2pdev, char *name, u8 *mac_addr); -void brcmf_remove_interface(struct brcmf_if *ifp); -int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); + bool is_p2pdev, const char *name, u8 *mac_addr); +void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked); void brcmf_txflowblock_if(struct brcmf_if *ifp, enum brcmf_netif_stop_reason reason, bool state); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index b39056125..79c081fd5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -18,6 +18,7 @@ #include "brcmu_wifi.h" #include "brcmu_utils.h" +#include "cfg80211.h" #include "core.h" #include "debug.h" #include "tracepoint.h" @@ -182,8 +183,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr, err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); - if (ifp && ifevent->action == BRCMF_E_IF_DEL) - brcmf_remove_interface(ifp); + if (ifp && ifevent->action == BRCMF_E_IF_DEL) { + bool armed = brcmf_cfg80211_vif_event_armed(drvr->config); + + /* Default handling in case no-one waits for this event */ + if (!armed) + brcmf_remove_interface(ifp, false); + } } /** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 2ce319903..9f9024a7b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -2101,7 +2101,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb) brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto)); /* determine the priority */ - if (!skb->priority) + if ((skb->priority == 0) || (skb->priority > 7)) skb->priority = cfg80211_classify8021d(skb, NULL); drvr->tx_multicast += !!multicast; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index a70cda6c0..de19c7c92 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1246,7 +1246,7 @@ bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, if (!bi->ctl_ch) { ch.chspec = le16_to_cpu(bi->chanspec); cfg->d11inf.decchspec(&ch); - bi->ctl_ch = ch.chnum; + bi->ctl_ch = ch.control_ch_num; } afx_hdl->peer_chan = bi->ctl_ch; brcmf_dbg(TRACE, "ACTION FRAME SCAN : Peer %pM found, channel : %d\n", @@ -1385,7 +1385,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { - afx_hdl->peer_chan = ch.chnum; + afx_hdl->peer_chan = ch.control_ch_num; brcmf_dbg(INFO, "GON request: Peer found, channel=%d\n", afx_hdl->peer_chan); complete(&afx_hdl->act_frm_scan); @@ -1428,7 +1428,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, memcpy(&mgmt_frame->u, frame, mgmt_frame_len); mgmt_frame_len += offsetof(struct ieee80211_mgmt, u); - freq = ieee80211_channel_to_frequency(ch.chnum, + freq = ieee80211_channel_to_frequency(ch.control_ch_num, ch.band == BRCMU_CHAN_BAND_2G ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ); @@ -1873,7 +1873,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, if (test_bit(BRCMF_P2P_STATUS_FINDING_COMMON_CHANNEL, &p2p->status) && (ether_addr_equal(afx_hdl->tx_dst_addr, e->addr))) { - afx_hdl->peer_chan = ch.chnum; + afx_hdl->peer_chan = ch.control_ch_num; brcmf_dbg(INFO, "PROBE REQUEST: Peer found, channel=%d\n", afx_hdl->peer_chan); complete(&afx_hdl->act_frm_scan); @@ -1898,7 +1898,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, mgmt_frame = (u8 *)(rxframe + 1); mgmt_frame_len = e->datalen - sizeof(*rxframe); - freq = ieee80211_channel_to_frequency(ch.chnum, + freq = ieee80211_channel_to_frequency(ch.control_ch_num, ch.band == BRCMU_CHAN_BAND_2G ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ); @@ -2030,8 +2030,6 @@ static int brcmf_p2p_request_p2p_if(struct brcmf_p2p_info *p2p, err = brcmf_fil_iovar_data_set(ifp, "p2p_ifadd", &if_request, sizeof(if_request)); - if (err) - return err; return err; } @@ -2076,8 +2074,7 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, if (p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif) return ERR_PTR(-ENOSPC); - p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE, - false); + p2p_vif = brcmf_alloc_vif(p2p->cfg, NL80211_IFTYPE_P2P_DEVICE); if (IS_ERR(p2p_vif)) { brcmf_err("could not create discovery vif\n"); return (struct wireless_dev *)p2p_vif; @@ -2177,7 +2174,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, return ERR_PTR(-EOPNOTSUPP); } - vif = brcmf_alloc_vif(cfg, type, false); + vif = brcmf_alloc_vif(cfg, type); if (IS_ERR(vif)) return (struct wireless_dev *)vif; brcmf_cfg80211_arm_vif_event(cfg, vif); @@ -2264,6 +2261,8 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) return 0; brcmf_p2p_cancel_remain_on_channel(vif->ifp); brcmf_p2p_deinit_discovery(p2p); + break; + default: return -ENOTSUPP; } @@ -2289,8 +2288,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) else err = 0; } - if (err) - brcmf_remove_interface(vif->ifp); + brcmf_remove_interface(vif->ifp, true); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (vif->wdev.iftype != NL80211_IFTYPE_P2P_DEVICE) @@ -2299,7 +2297,7 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) return err; } -void brcmf_p2p_ifp_removed(struct brcmf_if *ifp) +void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked) { struct brcmf_cfg80211_info *cfg; struct brcmf_cfg80211_vif *vif; @@ -2308,9 +2306,11 @@ void brcmf_p2p_ifp_removed(struct brcmf_if *ifp) vif = ifp->vif; cfg = wdev_to_cfg(&vif->wdev); cfg->p2p.bss_idx[P2PAPI_BSSCFG_DEVICE].vif = NULL; - rtnl_lock(); + if (!rtnl_locked) + rtnl_lock(); cfg80211_unregister_wdev(&vif->wdev); - rtnl_unlock(); + if (!rtnl_locked) + rtnl_unlock(); brcmf_free_vif(vif); } @@ -2396,7 +2396,7 @@ void brcmf_p2p_detach(struct brcmf_p2p_info *p2p) if (vif != NULL) { brcmf_p2p_cancel_remain_on_channel(vif->ifp); brcmf_p2p_deinit_discovery(p2p); - brcmf_remove_interface(vif->ifp); + brcmf_remove_interface(vif->ifp, false); } /* just set it all to zero */ memset(p2p, 0, sizeof(*p2p)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index a3bd18c23..8ce944753 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -155,7 +155,7 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev); int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, enum brcmf_fil_p2p_if_types if_type); -void brcmf_p2p_ifp_removed(struct brcmf_if *ifp); +void brcmf_p2p_ifp_removed(struct brcmf_if *ifp, bool rtnl_locked); int brcmf_p2p_start_device(struct wiphy *wiphy, struct wireless_dev *wdev); void brcmf_p2p_stop_device(struct wiphy *wiphy, struct wireless_dev *wdev); int brcmf_p2p_scan_prep(struct wiphy *wiphy, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 099879ce8..b3b2c27e7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -54,21 +54,25 @@ BRCMF_FW_NVRAM_DEF(43570, "/*(DEBLOBBED)*/", "brcmfmac43570-pcie.txt"); BRCMF_FW_NVRAM_DEF(4358, "/*(DEBLOBBED)*/", "brcmfmac4358-pcie.txt"); BRCMF_FW_NVRAM_DEF(4359, "/*(DEBLOBBED)*/", "brcmfmac4359-pcie.txt"); BRCMF_FW_NVRAM_DEF(4365B, "/*(DEBLOBBED)*/", "brcmfmac4365b-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4365C, "/*(DEBLOBBED)*/", "brcmfmac4365c-pcie.txt"); BRCMF_FW_NVRAM_DEF(4366B, "/*(DEBLOBBED)*/", "brcmfmac4366b-pcie.txt"); BRCMF_FW_NVRAM_DEF(4366C, "/*(DEBLOBBED)*/", "brcmfmac4366c-pcie.txt"); BRCMF_FW_NVRAM_DEF(4371, "/*(DEBLOBBED)*/", "brcmfmac4371-pcie.txt"); static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index af4652047..88159a833 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -1384,8 +1384,7 @@ static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header, return -ENXIO; } if (rd->seq_num != rx_seq) { - brcmf_err("seq %d: sequence number error, expect %d\n", - rx_seq, rd->seq_num); + brcmf_dbg(SDIO, "seq %d, expected %d\n", rx_seq, rd->seq_num); bus->sdcnt.rx_badseq++; rd->seq_num = rx_seq; } @@ -3306,10 +3305,6 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, goto err; } - /* Allow full data communication using DPC from now on. */ - brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); - bcmerror = 0; - err: brcmf_sdio_clkctl(bus, CLK_SDONLY, false); sdio_release_host(bus->sdiodev->func[1]); @@ -3666,7 +3661,7 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, str_shift = 11; break; default: - brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n", + brcmf_dbg(INFO, "No SDIO driver strength init needed for chip %s rev %d pmurev %d\n", ci->name, ci->chiprev, ci->pmurev); break; } @@ -4047,6 +4042,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, } if (err == 0) { + /* Allow full data communication using DPC from now on. */ + brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DATA); + err = brcmf_sdiod_intr_register(sdiodev); if (err != 0) brcmf_err("intr register failed:%d\n", err); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index dcf0ce8cd..f3da32fc6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -186,6 +186,7 @@ struct brcmf_sdio_dev { struct brcmf_bus *bus_if; struct brcmf_mp_device *settings; bool oob_irq_requested; + bool sd_irq_requested; bool irq_en; /* irq enable flags */ spinlock_t irq_en_lock; bool irq_wake; /* irq wake enable flags */ @@ -293,7 +294,7 @@ struct sdpcmd_regs { /* Register/deregister interrupt handler. */ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev); -int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev); +void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev); /* sdio device register access interface */ u8 brcmf_sdiod_regrb(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c index 796f5f9d5..b7df576bb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c @@ -1079,8 +1079,10 @@ bool dma_rxfill(struct dma_pub *pub) pa = dma_map_single(di->dmadev, p->data, di->rxbufsize, DMA_FROM_DEVICE); - if (dma_mapping_error(di->dmadev, pa)) + if (dma_mapping_error(di->dmadev, pa)) { + brcmu_pkt_buf_free_skb(p); return false; + } /* save the free packet pointer */ di->rxp[rxout] = p; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index e16ee6063..c2a938b59 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -3349,8 +3349,8 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc) dma_rxfill(wlc_hw->di[RX_FIFO]); } -void -static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) { +static void brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) +{ u32 macintmask; bool fastclk; struct brcms_c_info *wlc = wlc_hw->wlc; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 99dac9b8a..b3aab2fe9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -27017,7 +27017,7 @@ wlc_phy_rxcal_gainctrl_nphy_rev5(struct brcms_phy *pi, u8 rx_core, tx_core = 1 - rx_core; num_samps = 1024; - desired_log2_pwr = (cal_type == 0) ? 13 : 13; + desired_log2_pwr = 13; wlc_phy_rx_iq_coeffs_nphy(pi, 0, &save_comp); zero_comp.a0 = zero_comp.b0 = zero_comp.a1 = zero_comp.b1 = 0x0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c index dd9162722..0ab865de1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c @@ -87,7 +87,7 @@ void brcms_c_stf_ss_algo_channel_get(struct brcms_c_info *wlc, u16 *ss_algo_channel, u16 chanspec) { - struct tx_power power; + struct tx_power power = { }; u8 siso_mcs_id, cdd_mcs_id, stbc_mcs_id; /* Clear previous settings */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index 2b2522bdd..d8b79cb72 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c @@ -107,6 +107,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch) u16 val; ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); + ch->control_ch_num = ch->chnum; switch (ch->chspec & BRCMU_CHSPEC_D11N_BW_MASK) { case BRCMU_CHSPEC_D11N_BW_20: @@ -118,10 +119,10 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch) val = ch->chspec & BRCMU_CHSPEC_D11N_SB_MASK; if (val == BRCMU_CHSPEC_D11N_SB_L) { ch->sb = BRCMU_CHAN_SB_L; - ch->chnum -= CH_10MHZ_APART; + ch->control_ch_num -= CH_10MHZ_APART; } else { ch->sb = BRCMU_CHAN_SB_U; - ch->chnum += CH_10MHZ_APART; + ch->control_ch_num += CH_10MHZ_APART; } break; default: @@ -147,6 +148,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) u16 val; ch->chnum = (u8)(ch->chspec & BRCMU_CHSPEC_CH_MASK); + ch->control_ch_num = ch->chnum; switch (ch->chspec & BRCMU_CHSPEC_D11AC_BW_MASK) { case BRCMU_CHSPEC_D11AC_BW_20: @@ -158,10 +160,10 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) val = ch->chspec & BRCMU_CHSPEC_D11AC_SB_MASK; if (val == BRCMU_CHSPEC_D11AC_SB_L) { ch->sb = BRCMU_CHAN_SB_L; - ch->chnum -= CH_10MHZ_APART; + ch->control_ch_num -= CH_10MHZ_APART; } else if (val == BRCMU_CHSPEC_D11AC_SB_U) { ch->sb = BRCMU_CHAN_SB_U; - ch->chnum += CH_10MHZ_APART; + ch->control_ch_num += CH_10MHZ_APART; } else { WARN_ON_ONCE(1); } @@ -172,16 +174,16 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) BRCMU_CHSPEC_D11AC_SB_SHIFT); switch (ch->sb) { case BRCMU_CHAN_SB_LL: - ch->chnum -= CH_30MHZ_APART; + ch->control_ch_num -= CH_30MHZ_APART; break; case BRCMU_CHAN_SB_LU: - ch->chnum -= CH_10MHZ_APART; + ch->control_ch_num -= CH_10MHZ_APART; break; case BRCMU_CHAN_SB_UL: - ch->chnum += CH_10MHZ_APART; + ch->control_ch_num += CH_10MHZ_APART; break; case BRCMU_CHAN_SB_UU: - ch->chnum += CH_30MHZ_APART; + ch->control_ch_num += CH_30MHZ_APART; break; default: WARN_ON_ONCE(1); diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index 699f2c278..3cc42bef6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -40,7 +40,9 @@ #define BRCM_CC_4339_CHIP_ID 0x4339 #define BRCM_CC_43430_CHIP_ID 43430 #define BRCM_CC_4345_CHIP_ID 0x4345 +#define BRCM_CC_43465_CHIP_ID 43465 #define BRCM_CC_4350_CHIP_ID 0x4350 +#define BRCM_CC_43525_CHIP_ID 43525 #define BRCM_CC_4354_CHIP_ID 0x4354 #define BRCM_CC_4356_CHIP_ID 0x4356 #define BRCM_CC_43566_CHIP_ID 43566 diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h index f9745ea8b..8b8b2ecb3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h @@ -125,14 +125,36 @@ enum brcmu_chan_sb { BRCMU_CHAN_SB_UU = BRCMU_CHAN_SB_LUU, }; +/** + * struct brcmu_chan - stores channel formats + * + * This structure can be used with functions translating chanspec into generic + * channel info and the other way. + * + * @chspec: firmware specific format + * @chnum: center channel number + * @control_ch_num: control channel number + * @band: frequency band + * @bw: channel width + * @sb: control sideband (location of control channel against the center one) + */ struct brcmu_chan { u16 chspec; u8 chnum; + u8 control_ch_num; u8 band; enum brcmu_chan_bw bw; enum brcmu_chan_sb sb; }; +/** + * struct brcmu_d11inf - provides functions translating channel format + * + * @io_type: determines version of channel format used by firmware + * @encchspec: encodes channel info into a chanspec, requires center channel + * number, ignores control one + * @decchspec: decodes chanspec into generic info + */ struct brcmu_d11inf { u8 io_type; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index ca3cd2102..69b826d22 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -1102,8 +1102,8 @@ static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)"; struct airo_info; static int get_dec_u16( char *buffer, int *start, int limit ); -static void OUT4500( struct airo_info *, u16 register, u16 value ); -static unsigned short IN4500( struct airo_info *, u16 register ); +static void OUT4500( struct airo_info *, u16 reg, u16 value ); +static unsigned short IN4500( struct airo_info *, u16 reg ); static u16 setup_card(struct airo_info*, u8 *mac, int lock); static int enable_MAC(struct airo_info *ai, int lock); static void disable_MAC(struct airo_info *ai, int lock); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 9f02e7e78..85d91de1e 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -4093,7 +4093,7 @@ static const char *ipw_get_status_code(u16 status) return "Unknown status value."; } -static void inline average_init(struct average *avg) +static inline void average_init(struct average *avg) { memset(avg, 0, sizeof(*avg)); } diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index eb24b9241..140b6ea8f 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -1305,10 +1305,14 @@ il_send_scan_abort(struct il_priv *il) static void il_complete_scan(struct il_priv *il, bool aborted) { + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + /* check if scan was requested from mac80211 */ if (il->scan_request) { D_SCAN("Complete scan in mac80211\n"); - ieee80211_scan_completed(il->hw, aborted); + ieee80211_scan_completed(il->hw, &info); } il->scan_vif = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 05828c61d..6e7ed908d 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o -iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o +iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o iwl-9000.o iwl-a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 8dda52ae3..6c2d6da7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -205,23 +205,6 @@ static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { cpu_to_le32(0xf0005000), }; - -/* Loose Coex */ -static const __le32 iwlagn_loose_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xaeaaaaaa), - cpu_to_le32(0xaaaaaaaa), - cpu_to_le32(0xcc00ff28), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0xcc00aaaa), - cpu_to_le32(0x0000aaaa), - cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), - cpu_to_le32(0xf0005000), - cpu_to_le32(0xf0005000), -}; - /* Full concurrency */ static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = { cpu_to_le32(0xaaaaaaaa), diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 37b32a6f6..b49848683 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1317,6 +1317,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_DEF: case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; @@ -1336,6 +1337,8 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups); trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM; + trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, + driver_data[2]); WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE < priv->cfg->base_params->num_of_queues); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c index b22855218..087e57985 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c @@ -523,11 +523,6 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv, return ret; } - if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && - priv->cfg->ht_params && priv->cfg->ht_params->smps_mode) - ieee80211_request_smps(ctx->vif, - priv->cfg->ht_params->smps_mode); - return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c index d01766f16..17e6a3238 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c @@ -94,10 +94,14 @@ static int iwl_send_scan_abort(struct iwl_priv *priv) static void iwl_complete_scan(struct iwl_priv *priv, bool aborted) { + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + /* check if scan was requested from mac80211 */ if (priv->scan_request) { IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n"); - ieee80211_scan_completed(priv->hw, aborted); + ieee80211_scan_completed(priv->hw, &info); } priv->scan_type = IWL_SCAN_NORMAL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index 8d164d896..bea15510c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 21 -#define IWL3168_UCODE_API_MAX 21 +#define IWL7265D_UCODE_API_MAX 24 +#define IWL3168_UCODE_API_MAX 24 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 16 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index 0b4684234..b50aee86b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 21 -#define IWL8265_UCODE_API_MAX 21 +#define IWL8000_UCODE_API_MAX 24 +#define IWL8265_UCODE_API_MAX 24 /* Lowest firmware API version supported */ #define IWL8000_UCODE_API_MIN 16 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 19569fff2..03b26168c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 21 +#define IWL9000_UCODE_API_MAX 24 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 16 @@ -178,6 +178,7 @@ const struct iwl_cfg iwl5165_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, }; /*(DEBLOBBED)*/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-a000.c b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c new file mode 100644 index 000000000..ac29c8301 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-a000.c @@ -0,0 +1,131 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015-2016 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2015-2016 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/module.h> +#include <linux/stringify.h> +#include "iwl-config.h" +#include "iwl-agn-hw.h" + +/* Highest firmware API version supported */ +#define IWL_A000_UCODE_API_MAX 24 + +/* Lowest firmware API version supported */ +#define IWL_A000_UCODE_API_MIN 24 + +/* NVM versions */ +#define IWL_A000_NVM_VERSION 0x0a1d +#define IWL_A000_TX_POWER_VERSION 0xffff /* meaningless */ + +/* Memory offsets and lengths */ +#define IWL_A000_DCCM_OFFSET 0x800000 +#define IWL_A000_DCCM_LEN 0x18000 +#define IWL_A000_DCCM2_OFFSET 0x880000 +#define IWL_A000_DCCM2_LEN 0x8000 +#define IWL_A000_SMEM_OFFSET 0x400000 +#define IWL_A000_SMEM_LEN 0x68000 + +#define IWL_A000_FW_PRE "/*(DEBLOBBED)*/" +#define IWL_A000_MODULE_FIRMWARE(api) \ + IWL_A000_FW_PRE /*(DEBLOBBED)*/ + +#define NVM_HW_SECTION_NUM_FAMILY_A000 10 + +static const struct iwl_base_params iwl_a000_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_A000, + .num_of_queues = 31, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +static const struct iwl_ht_params iwl_a000_ht_params = { + .stbc = true, + .ldpc = true, + .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), +}; + +#define IWL_DEVICE_A000 \ + .ucode_api_max = IWL_A000_UCODE_API_MAX, \ + .ucode_api_min = IWL_A000_UCODE_API_MIN, \ + .device_family = IWL_DEVICE_FAMILY_8000, \ + .max_inst_size = IWL60_RTC_INST_SIZE, \ + .max_data_size = IWL60_RTC_DATA_SIZE, \ + .base_params = &iwl_a000_base_params, \ + .led_mode = IWL_LED_RF_STATE, \ + .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_A000, \ + .non_shared_ant = ANT_A, \ + .dccm_offset = IWL_A000_DCCM_OFFSET, \ + .dccm_len = IWL_A000_DCCM_LEN, \ + .dccm2_offset = IWL_A000_DCCM2_OFFSET, \ + .dccm2_len = IWL_A000_DCCM2_LEN, \ + .smem_offset = IWL_A000_SMEM_OFFSET, \ + .smem_len = IWL_A000_SMEM_LEN, \ + .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \ + .apmg_not_supported = true, \ + .mq_rx_supported = true, \ + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = true, \ + .use_tfh = true + +const struct iwl_cfg iwla000_2ac_cfg = { + .name = "Intel(R) Dual Band Wireless AC a000", + .fw_name_pre = IWL_A000_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +/*(DEBLOBBED)*/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 4a0af7de8..423b23320 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -66,8 +66,9 @@ #define __IWL_CONFIG_H__ #include <linux/types.h> -#include <net/mac80211.h> - +#include <linux/netdevice.h> +#include <linux/ieee80211.h> +#include <linux/nl80211.h> enum iwl_device_family { IWL_DEVICE_FAMILY_UNDEFINED, @@ -192,7 +193,6 @@ struct iwl_base_params { * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40 */ struct iwl_ht_params { - enum ieee80211_smps_mode smps_mode; u8 ht_greenfield_support:1, stbc:1, ldpc:1, @@ -261,6 +261,7 @@ struct iwl_tt_params { #define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */ #define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */ #define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000 +#define OTP_LOW_IMAGE_SIZE_FAMILY_A000 OTP_LOW_IMAGE_SIZE_FAMILY_9000 struct iwl_eeprom_params { const u8 regulatory_bands[7]; @@ -319,6 +320,7 @@ struct iwl_pwr_tx_backoff { * @mq_rx_supported: multi-queue rx support * @vht_mu_mimo_supported: VHT MU-MIMO support * @rf_id: need to read rf_id to determine the firmware image + * @integrated: discrete or integrated * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -362,7 +364,9 @@ struct iwl_cfg { apmg_not_supported:1, mq_rx_supported:1, vht_mu_mimo_supported:1, - rf_id:1; + rf_id:1, + integrated:1, + use_tfh:1; u8 valid_tx_ant; u8 valid_rx_ant; u8 non_shared_ant; @@ -448,6 +452,7 @@ extern const struct iwl_cfg iwl4165_2ac_sdio_cfg; extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9260lc_2ac_cfg; extern const struct iwl_cfg iwl5165_2ac_cfg; +extern const struct iwl_cfg iwla000_2ac_cfg; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index b52913448..871ad02fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -145,8 +145,10 @@ #define CSR_LED_REG (CSR_BASE+0x094) #define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) -#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */ - +#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE + 0x0A8) /* 6000 and up */ +#define CSR_MAC_SHADOW_REG_CTRL_RX_WAKE BIT(20) +#define CSR_MAC_SHADOW_REG_CTL2 (CSR_BASE + 0x0AC) +#define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE 0xFFFF /* GIO Chicken Bits (PCI Express bus link power management) */ #define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h index 110333208..cd77c6971 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h @@ -41,6 +41,7 @@ static inline bool iwl_have_debug_level(u32 level) #endif } +struct device; void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, const char *fmt, ...) __printf(4, 5); void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h index 27914eedc..1dccae653 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -83,6 +84,23 @@ TRACE_EVENT(iwlwifi_dev_iowrite32, __get_str(dev), __entry->offs, __entry->val) ); +TRACE_EVENT(iwlwifi_dev_iowrite64, + TP_PROTO(const struct device *dev, u64 offs, u64 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u64, offs) + __field(u64, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] write io[%llu] = %llu)", + __get_str(dev), __entry->offs, __entry->val) +); + TRACE_EVENT(iwlwifi_dev_iowrite_prph32, TP_PROTO(const struct device *dev, u32 offs, u32 val), TP_ARGS(dev, offs, val), @@ -100,6 +118,23 @@ TRACE_EVENT(iwlwifi_dev_iowrite_prph32, __get_str(dev), __entry->offs, __entry->val) ); +TRACE_EVENT(iwlwifi_dev_iowrite_prph64, + TP_PROTO(const struct device *dev, u64 offs, u64 val), + TP_ARGS(dev, offs, val), + TP_STRUCT__entry( + DEV_ENTRY + __field(u64, offs) + __field(u64, val) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->offs = offs; + __entry->val = val; + ), + TP_printk("[%s] write PRPH[%llu] = %llu)", + __get_str(dev), __entry->offs, __entry->val) +); + TRACE_EVENT(iwlwifi_dev_ioread_prph32, TP_PROTO(const struct device *dev, u32 offs, u32 val), TP_ARGS(dev, offs, val), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h index f4d3cd010..545d14b0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h @@ -1,6 +1,7 @@ /****************************************************************************** * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. + * Copyright(C) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -33,11 +34,29 @@ static inline bool iwl_trace_data(struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + __le16 fc = hdr->frame_control; + int offs = 24; /* start with normal header length */ - if (!ieee80211_is_data(hdr->frame_control)) + if (!ieee80211_is_data(fc)) return false; - return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO); + + /* Try to determine if the frame is EAPOL. This might have false + * positives (if there's no RFC 1042 header and we compare to some + * payload instead) but since we're only doing tracing that's not + * a problem. + */ + + if (ieee80211_has_a4(fc)) + offs += 6; + if (ieee80211_is_data_qos(fc)) + offs += 2; + /* don't account for crypto - these are unencrypted */ + + /* also account for the RFC 1042 header, of course */ + offs += 6; + + return skb->len > offs + 2 && + *(__be16 *)(skb->data + offs) == cpu_to_be16(ETH_P_PAE); } static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index c695125d0..f47ff2607 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -129,8 +129,8 @@ struct iwl_drv { }; enum { - DVM_OP_MODE = 0, - MVM_OP_MODE = 1, + DVM_OP_MODE, + MVM_OP_MODE, }; /* Protects the table contents, i.e. the ops pointer & drv list */ @@ -326,8 +326,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) int i, j; struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data; struct iwl_fw_cipher_scheme *fwcs; - struct ieee80211_cipher_scheme *cs; - u32 cipher; if (len < sizeof(*l) || len < sizeof(l->size) + l->size * sizeof(l->cs[0])) @@ -335,22 +333,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) { fwcs = &l->cs[j]; - cipher = le32_to_cpu(fwcs->cipher); /* we skip schemes with zero cipher suite selector */ - if (!cipher) + if (!fwcs->cipher) continue; - cs = &fw->cs[j++]; - cs->cipher = cipher; - cs->iftype = BIT(NL80211_IFTYPE_STATION); - cs->hdr_len = fwcs->hdr_len; - cs->pn_len = fwcs->pn_len; - cs->pn_off = fwcs->pn_off; - cs->key_idx_off = fwcs->key_idx_off; - cs->key_idx_mask = fwcs->key_idx_mask; - cs->key_idx_shift = fwcs->key_idx_shift; - cs->mic_len = fwcs->mic_len; + fw->cs[j++] = *fwcs; } return 0; @@ -795,17 +783,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); - drv->fw.mvm_fw = true; + drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); - drv->fw.mvm_fw = true; + drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); - drv->fw.mvm_fw = true; + drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_DEF_CALIB: if (tlv_len != sizeof(struct iwl_tlv_calib_data)) @@ -827,17 +815,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_SECURE_SEC_RT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR, tlv_len); - drv->fw.mvm_fw = true; + drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_INIT: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT, tlv_len); - drv->fw.mvm_fw = true; + drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_SECURE_SEC_WOWLAN: iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN, tlv_len); - drv->fw.mvm_fw = true; + drv->fw.type = IWL_FW_MVM; break; case IWL_UCODE_TLV_NUM_OF_CPU: if (tlv_len != sizeof(u32)) @@ -1275,7 +1263,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) * In mvm uCode there is no difference between data and instructions * sections. */ - if (!fw->mvm_fw && validate_sec_sizes(drv, pieces, drv->cfg)) + if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg)) goto try_again; /* Allocate ucode buffers for card's bus-master loading ... */ @@ -1403,10 +1391,16 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) release_firmware(ucode_raw); mutex_lock(&iwlwifi_opmode_table_mtx); - if (fw->mvm_fw) - op = &iwlwifi_opmode_table[MVM_OP_MODE]; - else + switch (fw->type) { + case IWL_FW_DVM: op = &iwlwifi_opmode_table[DVM_OP_MODE]; + break; + default: + WARN(1, "Invalid fw type %d\n", fw->type); + case IWL_FW_MVM: + op = &iwlwifi_opmode_table[MVM_OP_MODE]; + break; + } IWL_INFO(drv, "loaded firmware version %s op_mode %s\n", drv->fw.fw_version, op->name); @@ -1658,7 +1652,8 @@ MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, S_IRUGO); -MODULE_PARM_DESC(amsdu_size, "amsdu size 0:4K 1:8K 2:12K (default 0)"); +MODULE_PARM_DESC(amsdu_size, + "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index bf1b69aec..3199d345b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -766,7 +766,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, if (cfg->ht_params->ldpc) ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; - if (iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) + if ((cfg->mq_rx_supported && + iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) || + iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index 1f4e50289..e04a91d70 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -66,6 +66,7 @@ #include <linux/types.h> #include <linux/if_ether.h> +#include <net/cfg80211.h> #include "iwl-trans.h" struct iwl_nvm_data { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 270f39ecd..1d6f5d21a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -77,6 +77,7 @@ */ #define FH_MEM_LOWER_BOUND (0x1000) #define FH_MEM_UPPER_BOUND (0x2000) +#define TFH_MEM_LOWER_BOUND (0xA06000) /** * Keep-Warm (KW) buffer base address. @@ -118,10 +119,17 @@ #define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) #define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) +/* a000 TFD table address, 64 bit */ +#define TFH_TFDQ_CBB_TABLE (TFH_MEM_LOWER_BOUND + 0x1C00) /* Find TFD CB base pointer for given queue */ -static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) +static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, + unsigned int chnl) { + if (trans->cfg->use_tfh) { + WARN_ON_ONCE(chnl >= 64); + return TFH_TFDQ_CBB_TABLE + 8 * chnl; + } if (chnl < 16) return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; if (chnl < 20) @@ -130,6 +138,65 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); } +/* a000 configuration registers */ + +/* + * TFH Configuration register. + * + * BIT fields: + * + * Bits 3:0: + * Define the maximum number of pending read requests. + * Maximum configration value allowed is 0xC + * Bits 9:8: + * Define the maximum transfer size. (64 / 128 / 256) + * Bit 10: + * When bit is set and transfer size is set to 128B, the TFH will enable + * reading chunks of more than 64B only if the read address is aligned to 128B. + * In case of DRAM read address which is not aligned to 128B, the TFH will + * enable transfer size which doesn't cross 64B DRAM address boundary. +*/ +#define TFH_TRANSFER_MODE (TFH_MEM_LOWER_BOUND + 0x1F40) +#define TFH_TRANSFER_MAX_PENDING_REQ 0xc +#define TFH_CHUNK_SIZE_128 BIT(8) +#define TFH_CHUNK_SPLIT_MODE BIT(10) +/* + * Defines the offset address in dwords referring from the beginning of the + * Tx CMD which will be updated in DRAM. + * Note that the TFH offset address for Tx CMD update is always referring to + * the start of the TFD first TB. + * In case of a DRAM Tx CMD update the TFH will update PN and Key ID + */ +#define TFH_TXCMD_UPDATE_CFG (TFH_MEM_LOWER_BOUND + 0x1F48) +/* + * Controls TX DMA operation + * + * BIT fields: + * + * Bits 31:30: Enable the SRAM DMA channel. + * Turning on bit 31 will kick the SRAM2DRAM DMA. + * Note that the sram2dram may be enabled only after configuring the DRAM and + * SRAM addresses registers and the byte count register. + * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When + * set to 1 - interrupt is sent to the driver + * Bit 0: Indicates the snoop configuration +*/ +#define TFH_SRV_DMA_CHNL0_CTRL (TFH_MEM_LOWER_BOUND + 0x1F60) +#define TFH_SRV_DMA_SNOOP BIT(0) +#define TFH_SRV_DMA_TO_DRIVER BIT(24) +#define TFH_SRV_DMA_START BIT(31) + +/* Defines the DMA SRAM write start address to transfer a data block */ +#define TFH_SRV_DMA_CHNL0_SRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F64) + +/* Defines the 64bits DRAM start address to read the DMA data block from */ +#define TFH_SRV_DMA_CHNL0_DRAM_ADDR (TFH_MEM_LOWER_BOUND + 0x1F68) + +/* + * Defines the number of bytes to transfer from DRAM to SRAM. + * Note that this register may be configured with non-dword aligned size. + */ +#define TFH_SRV_DMA_CHNL0_BC (TFH_MEM_LOWER_BOUND + 0x1F70) /** * Rx SRAM Control and Status Registers (RSCSR) @@ -344,6 +411,32 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RFH_RBDBUF_RBD0_LSB 0xA08300 #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) +/** + * RFH Status Register + * + * Bit fields: + * + * Bit 29: RBD_FETCH_IDLE + * This status flag is set by the RFH when there is no active RBD fetch from + * DRAM. + * Once the RFH RBD controller starts fetching (or when there is a pending + * RBD read response from DRAM), this flag is immediately turned off. + * + * Bit 30: SRAM_DMA_IDLE + * This status flag is set by the RFH when there is no active transaction from + * SRAM to DRAM. + * Once the SRAM to DRAM DMA is active, this flag is immediately turned off. + * + * Bit 31: RXF_DMA_IDLE + * This status flag is set by the RFH when there is no active transaction from + * RXF to DRAM. + * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. + */ +#define RFH_GEN_STATUS 0xA09808 +#define RBD_FETCH_IDLE BIT(29) +#define SRAM_DMA_IDLE BIT(30) +#define RXF_DMA_IDLE BIT(31) + /* DMA configuration */ #define RFH_RXF_DMA_CFG 0xA09820 /* RB size */ @@ -384,7 +477,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RFH_GEN_CFG 0xA09800 #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) -#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */ +#define RFH_GEN_CFG_RB_CHUNK_SIZE_POS 4 +#define RFH_GEN_CFG_RB_CHUNK_SIZE_128 1 +#define RFH_GEN_CFG_RB_CHUNK_SIZE_64 0 #define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 #define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index 09b7ea28f..420c31dab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -89,6 +89,9 @@ * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were * paged to the DRAM. * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers. + * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and + * for that reason is not in use in any other place in the Linux Wi-Fi + * stack. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -106,6 +109,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_PAGING = 12, IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, + IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ IWL_FW_ERROR_DUMP_MAX, }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 37dc09e8b..1b1e045f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -301,7 +301,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics - * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD + * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it + * is standalone or with a BSS station interface in the same binding. * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to @@ -312,6 +313,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT + * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA + * countdown offloading. Beacon notifications are not sent to the host. + * The fw also offloads TBTT alignment. * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon @@ -326,6 +330,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared * memory addresses from the firmware. * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement + * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger + * command size (command version 4) that supports toggling ACK TX + * power reduction. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -347,7 +354,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, - IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26, + IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, @@ -356,6 +363,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD = (__force iwl_ucode_tlv_capa_t)70, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, @@ -365,6 +373,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80, IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81, + IWL_UCODE_TLV_CAPA_TX_POWER_ACK = (__force iwl_ucode_tlv_capa_t)84, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index e461d6318..74ea68d10 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -67,7 +67,6 @@ #ifndef __iwl_fw_h__ #define __iwl_fw_h__ #include <linux/types.h> -#include <net/mac80211.h> #include "iwl-fw-file.h" #include "iwl-fw-error-dump.h" @@ -231,6 +230,16 @@ struct iwl_gscan_capabilities { }; /** + * enum iwl_fw_type - iwlwifi firmware type + * @IWL_FW_DVM: DVM firmware + * @IWL_FW_MVM: MVM firmware + */ +enum iwl_fw_type { + IWL_FW_DVM, + IWL_FW_MVM, +}; + +/** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file @@ -244,7 +253,7 @@ struct iwl_gscan_capabilities { * @inst_evtlog_ptr: event log offset for runtime ucode. * @inst_evtlog_size: event log size for runtime ucode. * @inst_errlog_ptr: error log offfset for runtime ucode. - * @mvm_fw: indicates this is MVM firmware + * @type: firmware type (&enum iwl_fw_type) * @cipher_scheme: optional external cipher scheme. * @human_readable: human readable version * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until @@ -275,9 +284,9 @@ struct iwl_fw { u8 valid_tx_ant; u8 valid_rx_ant; - bool mvm_fw; + enum iwl_fw_type type; - struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; + struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS]; u8 human_readable[FW_VER_HUMAN_READABLE_SZ]; u32 sdio_adma_addr; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 32c8f84ae..92c8b5f9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project. * @@ -51,6 +51,14 @@ void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val) } IWL_EXPORT_SYMBOL(iwl_write32); +void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val) +{ + trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val); + iwl_trans_write32(trans, ofs, val & 0xffffffff); + iwl_trans_write32(trans, ofs + 4, val >> 32); +} +IWL_EXPORT_SYMBOL(iwl_write64); + u32 iwl_read32(struct iwl_trans *trans, u32 ofs) { u32 val = iwl_trans_read32(trans, ofs); @@ -102,6 +110,17 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) } IWL_EXPORT_SYMBOL(iwl_write_direct32); +void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value) +{ + unsigned long flags; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + iwl_write64(trans, reg, value); + iwl_trans_release_nic_access(trans, &flags); + } +} +IWL_EXPORT_SYMBOL(iwl_write_direct64); + int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, int timeout) { @@ -133,6 +152,14 @@ void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val) } IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab); +void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val) +{ + trace_iwlwifi_dev_iowrite_prph64(trans->dev, ofs, val); + iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); + iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); +} +IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab); + u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) { unsigned long flags; @@ -228,9 +255,117 @@ void iwl_force_nmi(struct iwl_trans *trans) } IWL_EXPORT_SYMBOL(iwl_force_nmi); -static const char *get_fh_string(int cmd) +static const char *get_rfh_string(int cmd) { #define IWL_CMD(x) case x: return #x +#define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; } + + int i; + + for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) { + IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i); + IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i); + IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i); + IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i); + }; + + switch (cmd) { + IWL_CMD(RFH_RXF_DMA_CFG); + IWL_CMD(RFH_GEN_CFG); + IWL_CMD(RFH_GEN_STATUS); + IWL_CMD(FH_TSSR_TX_STATUS_REG); + IWL_CMD(FH_TSSR_TX_ERROR_REG); + default: + return "UNKNOWN"; + } +#undef IWL_CMD_MQ +} + +struct reg { + u32 addr; + bool is64; +}; + +static int iwl_dump_rfh(struct iwl_trans *trans, char **buf) +{ + int i, q; + int num_q = trans->num_rx_queues; + static const u32 rfh_tbl[] = { + RFH_RXF_DMA_CFG, + RFH_GEN_CFG, + RFH_GEN_STATUS, + FH_TSSR_TX_STATUS_REG, + FH_TSSR_TX_ERROR_REG, + }; + static const struct reg rfh_mq_tbl[] = { + { RFH_Q0_FRBDCB_BA_LSB, true }, + { RFH_Q0_FRBDCB_WIDX, false }, + { RFH_Q0_FRBDCB_RIDX, false }, + { RFH_Q0_URBD_STTS_WPTR_LSB, true }, + }; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (buf) { + int pos = 0; + /* + * Register (up to 34 for name + 8 blank/q for MQ): 40 chars + * Colon + space: 2 characters + * 0X%08x: 10 characters + * New line: 1 character + * Total of 53 characters + */ + size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 + + ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40; + + *buf = kmalloc(bufsz, GFP_KERNEL); + if (!*buf) + return -ENOMEM; + + pos += scnprintf(*buf + pos, bufsz - pos, + "RFH register values:\n"); + + for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++) + pos += scnprintf(*buf + pos, bufsz - pos, + "%40s: 0X%08x\n", + get_rfh_string(rfh_tbl[i]), + iwl_read_prph(trans, rfh_tbl[i])); + + for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++) + for (q = 0; q < num_q; q++) { + u32 addr = rfh_mq_tbl[i].addr; + + addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); + pos += scnprintf(*buf + pos, bufsz - pos, + "%34s(q %2d): 0X%08x\n", + get_rfh_string(addr), q, + iwl_read_prph(trans, addr)); + } + + return pos; + } +#endif + + IWL_ERR(trans, "RFH register values:\n"); + for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++) + IWL_ERR(trans, " %34s: 0X%08x\n", + get_rfh_string(rfh_tbl[i]), + iwl_read_prph(trans, rfh_tbl[i])); + + for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++) + for (q = 0; q < num_q; q++) { + u32 addr = rfh_mq_tbl[i].addr; + + addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); + IWL_ERR(trans, " %34s(q %d): 0X%08x\n", + get_rfh_string(addr), q, + iwl_read_prph(trans, addr)); + } + + return 0; +} + +static const char *get_fh_string(int cmd) +{ switch (cmd) { IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); @@ -262,6 +397,9 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf) FH_TSSR_TX_ERROR_REG }; + if (trans->cfg->mq_rx_supported) + return iwl_dump_rfh(trans, buf); + #ifdef CONFIG_IWLWIFI_DEBUGFS if (buf) { int pos = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index a9bcc788c..5c8c0e130 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -34,6 +34,7 @@ void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val); void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val); +void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val); u32 iwl_read32(struct iwl_trans *trans, u32 ofs); static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) @@ -53,11 +54,13 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); +void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value); u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val); +void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val); void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 6c5c2f9f7..4d32b10fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -66,7 +66,6 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/gfp.h> -#include <net/mac80211.h> extern struct iwl_mod_params iwlwifi_mod_params; @@ -87,9 +86,10 @@ enum iwl_disable_11n { }; enum iwl_amsdu_size { - IWL_AMSDU_4K = 0, - IWL_AMSDU_8K = 1, - IWL_AMSDU_12K = 2, + IWL_AMSDU_DEF = 0, + IWL_AMSDU_4K = 1, + IWL_AMSDU_8K = 2, + IWL_AMSDU_12K = 3, }; enum iwl_uapsd_disable { @@ -105,7 +105,7 @@ enum iwl_uapsd_disable { * @sw_crypto: using hardware encryption, default = 0 * @disable_11n: disable 11n capabilities, default = 0, * use IWL_[DIS,EN]ABLE_HT_* constants - * @amsdu_size: enable 8K amsdu size, default = 4K. enum iwl_amsdu_size. + * @amsdu_size: See &enum iwl_amsdu_size. * @restart_fw: restart firmware, default = 1 * @bt_coex_active: enable bt coex, default = true * @led_mode: system default, default = 0 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 21653fee8..43f8f7d45 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -397,6 +397,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_DEF: + if (cfg->mq_rx_supported) + vht_cap->cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + else + vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; + break; case IWL_AMSDU_4K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 6c1d20ded..459bf736f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -417,5 +417,6 @@ enum { }; #define UREG_CHICK (0xA05C00) +#define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSIX_ENABLE BIT(25) #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 8193d36ae..5535e2238 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -211,6 +211,9 @@ struct iwl_cmd_header_wide { #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ #define FH_RSCSR_FRAME_INVALID 0x55550000 #define FH_RSCSR_FRAME_ALIGN 0x40 +#define FH_RSCSR_RPA_EN BIT(25) +#define FH_RSCSR_RXQ_POS 16 +#define FH_RSCSR_RXQ_MASK 0x3F0000 struct iwl_rx_packet { /* @@ -220,7 +223,13 @@ struct iwl_rx_packet { * 31: flag flush RB request * 30: flag ignore TC (terminal counter) request * 29: flag fast IRQ request - * 28-14: Reserved + * 28-26: Reserved + * 25: Offload enabled + * 24: RPF enabled + * 23: RSS enabled + * 22: Checksum enabled + * 21-16: RX queue + * 15-14: Reserved * 13-00: RX frame size */ __le32 len_n_flags; @@ -383,11 +392,6 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define MAX_NO_RECLAIM_CMDS 6 -/* - * The first entry in driver_data array in ieee80211_tx_info - * that can be used by the transport. - */ -#define IWL_TRANS_FIRST_DRIVER_DATA 2 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) /* @@ -491,6 +495,8 @@ struct iwl_hcmd_arr { * @command_groups_size: number of command groups, to avoid illegal access * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until * we get the ALIVE from the uCode + * @cb_data_offs: offset inside skb->cb to store transport data at, must have + * space for at least two pointers */ struct iwl_trans_config { struct iwl_op_mode *op_mode; @@ -510,6 +516,8 @@ struct iwl_trans_config { int command_groups_size; u32 sdio_adma_addr; + + u8 cb_data_offs; }; struct iwl_trans_dump_data { @@ -574,6 +582,7 @@ struct iwl_trans_txq_scd_cfg { * configured. May sleep. * @txq_disable: de-configure a Tx queue to send AMPDUs * Must be atomic + * @txq_set_shared_mode: change Tx queue shared/unshared marking * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. * @freeze_txq_timer: prevents the timer of the queue from firing until the * queue is set to awake. Must be atomic. @@ -637,6 +646,9 @@ struct iwl_trans_ops { void (*txq_disable)(struct iwl_trans *trans, int queue, bool configure_scd); + void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, + bool shared); + int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, bool freeze); @@ -749,6 +761,7 @@ enum iwl_plat_pm_mode { * @ops - pointer to iwl_trans_ops * @op_mode - pointer to the op_mode * @cfg - pointer to the configuration + * @drv - pointer to iwl_drv * @status: a bit-mask of transport status flags * @dev - pointer to struct device * that represents the device * @max_skb_frags: maximum number of fragments an SKB can have when transmitted. @@ -792,6 +805,7 @@ struct iwl_trans { const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg *cfg; + struct iwl_drv *drv; enum iwl_trans_state state; unsigned long status; @@ -1052,6 +1066,13 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout); } +static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, + int queue, bool shared_mode) +{ + if (trans->ops->txq_set_shared_mode) + trans->ops->txq_set_shared_mode(trans, queue, shared_mode); +} + static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index a63f5bbb1..5bdb6c2c8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -142,7 +142,7 @@ static const __le64 iwl_ci_mask[][3] = { cpu_to_le64(0x0) }, { - cpu_to_le64(0xFFC0000000ULL), + cpu_to_le64(0xFE00000000ULL), cpu_to_le64(0x0ULL), cpu_to_le64(0x0ULL) }, @@ -615,8 +615,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, * don't reduce the Tx power if one of these is true: * we are in LOOSE * single share antenna product - * BT is active - * we are associated + * BT is inactive + * we are not associated */ if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT || mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc || diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 406cf1cb9..b34489817 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1020,6 +1020,8 @@ static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm, int ret; ret = kstrtouint(buf, 0, &max_amsdu_len); + if (ret) + return ret; if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454) return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h index 2a33b694b..204c1b139 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h @@ -70,85 +70,6 @@ #define BITS(nb) (BIT(nb) - 1) -/** - * enum iwl_bt_coex_flags - flags for BT_COEX command - * @BT_COEX_MODE_POS: - * @BT_COEX_MODE_MSK: - * @BT_COEX_DISABLE_OLD: - * @BT_COEX_2W_OLD: - * @BT_COEX_3W_OLD: - * @BT_COEX_NW_OLD: - * @BT_COEX_AUTO_OLD: - * @BT_COEX_BT_OLD: Antenna is for BT (manufacuring tests) - * @BT_COEX_WIFI_OLD: Antenna is for BT (manufacuring tests) - * @BT_COEX_SYNC2SCO: - * @BT_COEX_CORUNNING: - * @BT_COEX_MPLUT: - * @BT_COEX_TTC: - * @BT_COEX_RRC: - * - * The COEX_MODE must be set for each command. Even if it is not changed. - */ -enum iwl_bt_coex_flags { - BT_COEX_MODE_POS = 3, - BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS, - BT_COEX_DISABLE_OLD = 0x0 << BT_COEX_MODE_POS, - BT_COEX_2W_OLD = 0x1 << BT_COEX_MODE_POS, - BT_COEX_3W_OLD = 0x2 << BT_COEX_MODE_POS, - BT_COEX_NW_OLD = 0x3 << BT_COEX_MODE_POS, - BT_COEX_AUTO_OLD = 0x5 << BT_COEX_MODE_POS, - BT_COEX_BT_OLD = 0x6 << BT_COEX_MODE_POS, - BT_COEX_WIFI_OLD = 0x7 << BT_COEX_MODE_POS, - BT_COEX_SYNC2SCO = BIT(7), - BT_COEX_CORUNNING = BIT(8), - BT_COEX_MPLUT = BIT(9), - BT_COEX_TTC = BIT(20), - BT_COEX_RRC = BIT(21), -}; - -/* - * indicates what has changed in the BT_COEX command. - * BT_VALID_ENABLE must be set for each command. Commands without this bit will - * discarded by the firmware - */ -enum iwl_bt_coex_valid_bit_msk { - BT_VALID_ENABLE = BIT(0), - BT_VALID_BT_PRIO_BOOST = BIT(1), - BT_VALID_MAX_KILL = BIT(2), - BT_VALID_3W_TMRS = BIT(3), - BT_VALID_KILL_ACK = BIT(4), - BT_VALID_KILL_CTS = BIT(5), - BT_VALID_REDUCED_TX_POWER = BIT(6), - BT_VALID_LUT = BIT(7), - BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8), - BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9), - BT_VALID_MULTI_PRIO_LUT = BIT(10), - BT_VALID_TRM_KICK_FILTER = BIT(11), - BT_VALID_CORUN_LUT_20 = BIT(12), - BT_VALID_CORUN_LUT_40 = BIT(13), - BT_VALID_ANT_ISOLATION = BIT(14), - BT_VALID_ANT_ISOLATION_THRS = BIT(15), - BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16), - BT_VALID_TXRX_MAX_FREQ_0 = BIT(17), - BT_VALID_SYNC_TO_SCO = BIT(18), - BT_VALID_TTC = BIT(20), - BT_VALID_RRC = BIT(21), -}; - -/** - * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames. - * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames - * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames - * - * This mechanism allows to have BT and WiFi run concurrently. Since WiFi - * reduces its Tx power, it can work along with BT, hence reducing the amount - * of WiFi frames being killed by BT. - */ -enum iwl_bt_reduced_tx_power { - BT_REDUCED_TX_POWER_CTL = BIT(0), - BT_REDUCED_TX_POWER_DATA = BIT(1), -}; - enum iwl_bt_coex_lut_type { BT_COEX_TIGHT_LUT = 0, BT_COEX_LOOSE_LUT, @@ -158,64 +79,9 @@ enum iwl_bt_coex_lut_type { BT_COEX_INVALID_LUT = 0xff, }; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ -#define BT_COEX_LUT_SIZE (12) #define BT_COEX_CORUN_LUT_SIZE (32) -#define BT_COEX_MULTI_PRIO_LUT_SIZE (2) -#define BT_COEX_BOOST_SIZE (4) #define BT_REDUCED_TX_POWER_BIT BIT(7) -/** - * struct iwl_bt_coex_cmd_old - bt coex configuration command - * @flags:&enum iwl_bt_coex_flags - * @max_kill: - * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power - * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT - * should be set by default - * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT - * should be set by default - * @bt4_antenna_isolation: antenna isolation - * @bt4_antenna_isolation_thr: antenna threshold value - * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency - * @bt4_tx_rx_max_freq0: TxRx max frequency - * @bt_prio_boost: BT priority boost registers - * @wifi_tx_prio_boost: SW boost of wifi tx priority - * @wifi_rx_prio_boost: SW boost of wifi rx priority - * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK. - * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS. - * @decision_lut: PTA decision LUT, per Prio-Ch - * @bt4_multiprio_lut: multi priority LUT configuration - * @bt4_corun_lut20: co-running 20 MHz LUT configuration - * @bt4_corun_lut40: co-running 40 MHz LUT configuration - * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk - * - * The structure is used for the BT_COEX command. - */ -struct iwl_bt_coex_cmd_old { - __le32 flags; - u8 max_kill; - u8 bt_reduced_tx_power; - u8 override_primary_lut; - u8 override_secondary_lut; - - u8 bt4_antenna_isolation; - u8 bt4_antenna_isolation_thr; - u8 bt4_tx_tx_delta_freq_thr; - u8 bt4_tx_rx_max_freq0; - - __le32 bt_prio_boost[BT_COEX_BOOST_SIZE]; - __le32 wifi_tx_prio_boost; - __le32 wifi_rx_prio_boost; - __le32 kill_ack_msk; - __le32 kill_cts_msk; - - __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE]; - __le32 bt4_multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE]; - __le32 bt4_corun_lut20[BT_COEX_CORUN_LUT_SIZE]; - __le32 bt4_corun_lut40[BT_COEX_CORUN_LUT_SIZE]; - - __le32 valid_bit_msk; -} __packed; /* BT_COEX_CMD_API_S_VER_5 */ - enum iwl_bt_coex_mode { BT_COEX_DISABLE = 0x0, BT_COEX_NW = 0x1, @@ -385,92 +251,4 @@ struct iwl_bt_coex_profile_notif { u8 reserved[3]; } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ -enum iwl_bt_coex_prio_table_event { - BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0, - BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4, - BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5, - BT_COEX_PRIO_TBL_EVT_DTIM = 6, - BT_COEX_PRIO_TBL_EVT_SCAN52 = 7, - BT_COEX_PRIO_TBL_EVT_SCAN24 = 8, - BT_COEX_PRIO_TBL_EVT_IDLE = 9, - BT_COEX_PRIO_TBL_EVT_MAX = 16, -}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */ - -enum iwl_bt_coex_prio_table_prio { - BT_COEX_PRIO_TBL_DISABLED = 0, - BT_COEX_PRIO_TBL_PRIO_LOW = 1, - BT_COEX_PRIO_TBL_PRIO_HIGH = 2, - BT_COEX_PRIO_TBL_PRIO_BYPASS = 3, - BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4, - BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5, - BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6, - BT_COEX_PRIO_TBL_MAX = 8, -}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */ - -#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0) -#define BT_COEX_PRIO_TBL_PRIO_POS (1) -#define BT_COEX_PRIO_TBL_RESERVED_POS (4) - -/** - * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex - * @prio_tbl: - */ -struct iwl_bt_coex_prio_tbl_cmd { - u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX]; -} __packed; - -/** - * struct iwl_bt_coex_ci_cmd_old - bt coex channel inhibition command - * @bt_primary_ci: - * @bt_secondary_ci: - * @co_run_bw_primary: - * @co_run_bw_secondary: - * @primary_ch_phy_id: - * @secondary_ch_phy_id: - * - * Used for BT_COEX_CI command - */ -struct iwl_bt_coex_ci_cmd_old { - __le64 bt_primary_ci; - __le64 bt_secondary_ci; - - u8 co_run_bw_primary; - u8 co_run_bw_secondary; - u8 primary_ch_phy_id; - u8 secondary_ch_phy_id; -} __packed; /* BT_CI_MSG_API_S_VER_1 */ - -/** - * struct iwl_bt_coex_profile_notif_old - notification about BT coex - * @mbox_msg: message from BT to WiFi - * @msg_idx: the index of the message - * @bt_status: 0 - off, 1 - on - * @bt_open_conn: number of BT connections open - * @bt_traffic_load: load of BT traffic - * @bt_agg_traffic_load: aggregated load of BT traffic - * @bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant - * @primary_ch_lut: LUT used for primary channel - * @secondary_ch_lut: LUT used for secondary channel - * @bt_activity_grading: the activity of BT enum %iwl_bt_activity_grading - */ -struct iwl_bt_coex_profile_notif_old { - __le32 mbox_msg[4]; - __le32 msg_idx; - u8 bt_status; - u8 bt_open_conn; - u8 bt_traffic_load; - u8 bt_agg_traffic_load; - u8 bt_ci_compliance; - u8 ttc_enabled; - u8 rrc_enabled; - u8 reserved; - - __le32 primary_ch_lut; - __le32 secondary_ch_lut; - __le32 bt_activity_grading; -} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_3 */ - #endif /* __fw_api_bt_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h index 95ac59d08..0246506ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h @@ -72,6 +72,9 @@ #define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX #define NUM_MAC_INDEX (MAC_INDEX_AUX + 1) +#define IWL_MVM_STATION_COUNT 16 +#define IWL_MVM_TDLS_STA_COUNT 4 + enum iwl_ac { AC_BK, AC_BE, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h index 65a7c8a4c..404b0de9e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -310,7 +310,8 @@ enum iwl_dev_tx_power_cmd_mode { IWL_TX_POWER_MODE_SET_MAC = 0, IWL_TX_POWER_MODE_SET_DEVICE = 1, IWL_TX_POWER_MODE_SET_CHAINS = 2, -}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_2 */; + IWL_TX_POWER_MODE_SET_ACK = 3, +}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */; /** * struct iwl_dev_tx_power_cmd_v2 - TX power reduction command @@ -338,7 +339,7 @@ struct iwl_dev_tx_power_cmd_v2 { * @v2: version 2 of the command, embedded here for easier software handling * @per_chain_restriction: per chain restrictions */ -struct iwl_dev_tx_power_cmd { +struct iwl_dev_tx_power_cmd_v3 { /* v3 is just an extension of v2 - keep this here */ struct iwl_dev_tx_power_cmd_v2 v2; __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; @@ -347,6 +348,19 @@ struct iwl_dev_tx_power_cmd { #define IWL_DEV_MAX_TX_POWER 0x7FFF /** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * @v3: version 3 of the command, embedded here for easier software handling + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + */ +struct iwl_dev_tx_power_cmd { + /* v4 is just an extension of v3 - keep this here */ + struct iwl_dev_tx_power_cmd_v3 v3; + u8 enable_ack_reduction; + u8 reserved[3]; +} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ + +/** * struct iwl_beacon_filter_cmd * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) * @id_and_color: MAC contex identifier diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index 1ca8e4988..acc5cd53e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -296,7 +296,7 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), - IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4), + IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), @@ -311,7 +311,7 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), - IWL_RX_MPDU_STATUS_KEY_COLOR = BIT(15), + IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), }; enum iwl_rx_mpdu_hash_filter { @@ -336,6 +336,18 @@ enum iwl_rx_mpdu_reorder_data { IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000, }; +enum iwl_rx_mpdu_phy_info { + IWL_RX_MPDU_PHY_AMPDU = BIT(5), + IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), + IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), + IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), +}; + +enum iwl_rx_mpdu_mac_info { + IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f, + IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, +}; + struct iwl_rx_mpdu_desc { /* DW2 */ __le16 mpdu_len; @@ -343,9 +355,9 @@ struct iwl_rx_mpdu_desc { u8 mac_flags2; /* DW3 */ u8 amsdu_info; - __le16 reserved_for_software; + __le16 phy_info; u8 mac_phy_idx; - /* DW4 */ + /* DW4 - carries csum data only when rpa_en == 1 */ __le16 raw_csum; /* alledgedly unreliable */ __le16 l3l4_flags; /* DW5 */ @@ -354,17 +366,17 @@ struct iwl_rx_mpdu_desc { u8 sta_id_flags; /* DW6 */ __le32 reorder_data; - /* DW7 */ + /* DW7 - carries rss_hash only when rpa_en == 1 */ __le32 rss_hash; - /* DW8 */ + /* DW8 - carries filter_match only when rpa_en == 1 */ __le32 filter_match; /* DW9 */ __le32 rate_n_flags; /* DW10 */ - u8 energy_a, energy_b, channel, reserved; + u8 energy_a, energy_b, channel, mac_context; /* DW11 */ __le32 gp2_on_air_rise; - /* DW12 & DW13 */ + /* DW12 & DW13 - carries TSF only TSF_OVERLOAD bit == 0 */ __le64 tsf_on_air_rise; } __packed; @@ -435,26 +447,26 @@ struct iwl_rxq_sync_notification { } __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ /** -* Internal message identifier -* -* @IWL_MVM_RXQ_EMPTY: empty sync notification -* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA -*/ + * Internal message identifier + * + * @IWL_MVM_RXQ_EMPTY: empty sync notification + * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA + */ enum iwl_mvm_rxq_notif_type { IWL_MVM_RXQ_EMPTY, IWL_MVM_RXQ_NOTIF_DEL_BA, }; /** -* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent -* in &iwl_rxq_sync_cmd. Should be DWORD aligned. -* FW is agnostic to the payload, so there are no endianity requirements. -* -* @type: value from &iwl_mvm_rxq_notif_type -* @sync: ctrl path is waiting for all notifications to be received -* @cookie: internal cookie to identify old notifications -* @data: payload -*/ + * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent + * in &iwl_rxq_sync_cmd. Should be DWORD aligned. + * FW is agnostic to the payload, so there are no endianity requirements. + * + * @type: value from &iwl_mvm_rxq_notif_type + * @sync: ctrl path is waiting for all notifications to be received + * @cookie: internal cookie to identify old notifications + * @data: payload + */ struct iwl_mvm_internal_rxq_notif { u16 type; u16 sync; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 38b1d045b..d1c4fb849 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -141,6 +141,7 @@ enum iwl_sta_flags { * @STA_KEY_FLG_CCM: CCMP encryption algorithm * @STA_KEY_FLG_TKIP: TKIP encryption algorithm * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) + * @STA_KEY_FLG_GCMP: GCMP encryption algorithm * @STA_KEY_FLG_CMAC: CMAC encryption algorithm * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value @@ -149,6 +150,7 @@ enum iwl_sta_flags { * @STA_KEY_FLG_KEYID_MSK: the index of the key * @STA_KEY_NOT_VALID: key is invalid * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key + * @STA_KEY_FLG_KEY_32BYTES for non-wep key set for 32 bytes key * @STA_KEY_MULTICAST: set for multical key * @STA_KEY_MFP: key is used for Management Frame Protection */ @@ -158,6 +160,7 @@ enum iwl_sta_key_flag { STA_KEY_FLG_CCM = (2 << 0), STA_KEY_FLG_TKIP = (3 << 0), STA_KEY_FLG_EXT = (4 << 0), + STA_KEY_FLG_GCMP = (5 << 0), STA_KEY_FLG_CMAC = (6 << 0), STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), STA_KEY_FLG_EN_MSK = (7 << 0), @@ -167,6 +170,7 @@ enum iwl_sta_key_flag { STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), STA_KEY_NOT_VALID = BIT(11), STA_KEY_FLG_WEP_13BYTES = BIT(12), + STA_KEY_FLG_KEY_32BYTES = BIT(12), STA_KEY_MULTICAST = BIT(14), STA_KEY_MFP = BIT(15), }; @@ -388,7 +392,6 @@ struct iwl_mvm_add_sta_cmd { * @key_offset: key offset in key storage * @key_flags: type %iwl_sta_key_flag * @key: key material data - * @key2: key material data * @rx_secur_seq_cnt: RX security sequence counter for the key * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx @@ -397,8 +400,7 @@ struct iwl_mvm_add_sta_key_cmd { u8 sta_id; u8 key_offset; __le16 key_flags; - u8 key[16]; - u8 key2[16]; + u8 key[32]; u8 rx_secur_seq_cnt[16]; u8 tkip_rx_tsc_byte2; u8 reserved; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h index 438665a54..4e638a44b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -252,6 +253,20 @@ struct mvm_statistics_general_v8 { u8 reserved[4 - (NUM_MAC_INDEX % 4)]; } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ +/** + * struct mvm_statistics_load - RX statistics for multi-queue devices + * @air_time: accumulated air time, per mac + * @byte_count: accumulated byte count, per mac + * @pkt_count: accumulated packet count, per mac + * @avg_energy: average RSSI, per station + */ +struct mvm_statistics_load { + __le32 air_time[NUM_MAC_INDEX]; + __le32 byte_count[NUM_MAC_INDEX]; + __le32 pkt_count[NUM_MAC_INDEX]; + u8 avg_energy[IWL_MVM_STATION_COUNT]; +} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ + struct mvm_statistics_rx { struct mvm_statistics_rx_phy ofdm; struct mvm_statistics_rx_phy cck; @@ -266,7 +281,6 @@ struct mvm_statistics_rx { * while associated. To disable this behavior, set DISABLE_NOTIF flag in the * STATISTICS_CMD (0x9c), below. */ - struct iwl_notif_statistics_v10 { __le32 flag; struct mvm_statistics_rx rx; @@ -274,6 +288,14 @@ struct iwl_notif_statistics_v10 { struct mvm_statistics_general_v8 general; } __packed; /* STATISTICS_NTFY_API_S_VER_10 */ +struct iwl_notif_statistics_v11 { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general_v8 general; + struct mvm_statistics_load load_stats; +} __packed; /* STATISTICS_NTFY_API_S_VER_11 */ + #define IWL_STATISTICS_FLG_CLEAR 0x1 #define IWL_STATISTICS_FLG_DISABLE_NOTIF 0x2 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index dadcccd88..4144623e1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -137,17 +137,32 @@ enum iwl_tx_pm_timeouts { PM_FRAME_ASSOC = 3, }; -/* - * TX command security control - */ -#define TX_CMD_SEC_WEP 0x01 -#define TX_CMD_SEC_CCM 0x02 -#define TX_CMD_SEC_TKIP 0x03 -#define TX_CMD_SEC_EXT 0x04 #define TX_CMD_SEC_MSK 0x07 #define TX_CMD_SEC_WEP_KEY_IDX_POS 6 #define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 -#define TX_CMD_SEC_KEY128 0x08 + +/** + * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command + * @TX_CMD_SEC_WEP: WEP encryption algorithm. + * @TX_CMD_SEC_CCM: CCM encryption algorithm. + * @TX_CMD_SEC_TKIP: TKIP encryption algorithm. + * @TX_CMD_SEC_EXT: extended cipher algorithm. + * @TX_CMD_SEC_GCMP: GCMP encryption algorithm. + * @TX_CMD_SEC_KEY128: set for 104 bits WEP key. + * @TC_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken + * from the table instead of from the TX command. + * If the key is taken from the key table its index should be given by the + * first byte of the TX command key field. + */ +enum iwl_tx_cmd_sec_ctrl { + TX_CMD_SEC_WEP = 0x01, + TX_CMD_SEC_CCM = 0x02, + TX_CMD_SEC_TKIP = 0x03, + TX_CMD_SEC_EXT = 0x04, + TX_CMD_SEC_GCMP = 0x05, + TX_CMD_SEC_KEY128 = 0x08, + TC_CMD_SEC_KEY_FROM_TABLE = 0x08, +}; /* TODO: how does these values are OK with only 16 bit variable??? */ /* @@ -562,8 +577,8 @@ struct iwl_mvm_ba_notif { u8 reserved1; } __packed; -/* - * struct iwl_mac_beacon_cmd - beacon template command +/** + * struct iwl_mac_beacon_cmd_v6 - beacon template command * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. @@ -571,13 +586,34 @@ struct iwl_mvm_ba_notif { * @tim_size: the length of the tim IE * @frame: the template of the beacon frame */ +struct iwl_mac_beacon_cmd_v6 { + struct iwl_tx_cmd tx; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + struct ieee80211_hdr frame[0]; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ + +/** + * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @ecsa_offset: offset to the ECSA IE if present + * @csa_offset: offset to the CSA IE if present + * @frame: the template of the beacon frame + */ struct iwl_mac_beacon_cmd { struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; + __le32 ecsa_offset; + __le32 csa_offset; struct ieee80211_hdr frame[0]; -} __packed; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 41b80ae2d..71076f027 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -90,6 +90,7 @@ enum { * DQA queue numbers * * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW + * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure @@ -108,6 +109,7 @@ enum { */ enum iwl_mvm_dqa_txq { IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_AUX_QUEUE = 1, IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, IWL_MVM_DQA_GCAST_QUEUE = 3, IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, @@ -127,9 +129,6 @@ enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_CMD = 7, }; -#define IWL_MVM_STATION_COUNT 16 - -#define IWL_MVM_TDLS_STA_COUNT 4 /* commands */ enum { @@ -314,6 +313,7 @@ enum { enum iwl_mac_conf_subcmd_ids { LINK_QUALITY_MEASUREMENT_CMD = 0x1, LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, + CHANNEL_SWITCH_NOA_NOTIF = 0xFF, }; enum iwl_phy_ops_subcmd_ids { @@ -329,6 +329,7 @@ enum iwl_system_subcmd_ids { }; enum iwl_data_path_subcmd_ids { + DQA_ENABLE_CMD = 0x0, UPDATE_MU_GROUPS_CMD = 0x1, TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, MU_GROUP_MGMT_NOTIF = 0xFE, @@ -359,6 +360,14 @@ struct iwl_cmd_response { }; /* + * struct iwl_dqa_enable_cmd + * @cmd_queue: the TXQ number of the command queue + */ +struct iwl_dqa_enable_cmd { + __le32 cmd_queue; +} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ + +/* * struct iwl_tx_ant_cfg_cmd * @valid: valid antenna configuration */ @@ -732,7 +741,7 @@ enum iwl_time_event_type { /* P2P GO Events */ TE_P2P_GO_ASSOC_PROT, - TE_P2P_GO_REPETITIVE_NOA, + TE_P2P_GO_REPETITIVET_NOA, TE_P2P_GO_CT_WINDOW, /* WiDi Sync Events */ @@ -2111,4 +2120,13 @@ struct iwl_link_qual_msrmnt_notif { __le32 reserved[3]; } __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ +/** + * Channel switch NOA notification + * + * @id_and_color: ID and color of the MAC + */ +struct iwl_channel_switch_noa_notif { + __le32 id_and_color; +} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index e1b6b2c66..46b52bf70 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -288,7 +288,8 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, fifo_hdr->fifo_num = cpu_to_le32(i); /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i); + iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + + ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size)); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(mvm->trans, @@ -959,5 +960,6 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) } mvm->fw_dbg_conf = conf_id; - return ret; + + return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h index f7dff7612..e9f1be9da 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h @@ -105,7 +105,8 @@ iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, { u32 trig_vif = le32_to_cpu(trig->vif_type); - return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || vif->type == trig_vif; + return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || + ieee80211_vif_type_p2p(vif) == trig_vif; } static inline bool diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 7057f35cb..7e0cdbf8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -65,6 +65,7 @@ *****************************************************************************/ #include <net/mac80211.h> #include <linux/netdevice.h> +#include <linux/acpi.h> #include "iwl-trans.h" #include "iwl-op-mode.h" @@ -122,6 +123,9 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, }; + if (mvm->trans->num_rx_queues == 1) + return 0; + /* Do not direct RSS traffic to Q 0 which is our fallback queue */ for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = @@ -131,6 +135,23 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } +static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) +{ + struct iwl_dqa_enable_cmd dqa_cmd = { + .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), + }; + u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); + int ret; + + ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); + if (ret) + IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); + else + IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); + + return ret; +} + void iwl_free_fw_paging(struct iwl_mvm *mvm) { int i; @@ -139,17 +160,21 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm) return; for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { - if (!mvm->fw_paging_db[i].fw_paging_block) { + struct iwl_fw_paging *paging = &mvm->fw_paging_db[i]; + + if (!paging->fw_paging_block) { IWL_DEBUG_FW(mvm, "Paging: block %d already freed, continue to next page\n", i); continue; } + dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys, + paging->fw_paging_size, DMA_BIDIRECTIONAL); - __free_pages(mvm->fw_paging_db[i].fw_paging_block, - get_order(mvm->fw_paging_db[i].fw_paging_size)); - mvm->fw_paging_db[i].fw_paging_block = NULL; + __free_pages(paging->fw_paging_block, + get_order(paging->fw_paging_size)); + paging->fw_paging_block = NULL; } kfree(mvm->trans->paging_download_buf); mvm->trans->paging_download_buf = NULL; @@ -882,6 +907,177 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) sizeof(cmd), &cmd); } +#define ACPI_WRDS_METHOD "WRDS" +#define ACPI_WRDS_WIFI (0x07) +#define ACPI_WRDS_TABLE_SIZE 10 + +struct iwl_mvm_sar_table { + bool enabled; + u8 values[ACPI_WRDS_TABLE_SIZE]; +}; + +#ifdef CONFIG_ACPI +static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds, + struct iwl_mvm_sar_table *sar_table) +{ + union acpi_object *data_pkg; + u32 i; + + /* We need at least two packages, one for the revision and one + * for the data itself. Also check that the revision is valid + * (i.e. it is an integer set to 0). + */ + if (wrds->type != ACPI_TYPE_PACKAGE || + wrds->package.count < 2 || + wrds->package.elements[0].type != ACPI_TYPE_INTEGER || + wrds->package.elements[0].integer.value != 0) { + IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n"); + return -EINVAL; + } + + /* loop through all the packages to find the one for WiFi */ + for (i = 1; i < wrds->package.count; i++) { + union acpi_object *domain; + + data_pkg = &wrds->package.elements[i]; + + /* Skip anything that is not a package with the right + * amount of elements (i.e. domain_type, + * enabled/disabled plus the sar table size. + */ + if (data_pkg->type != ACPI_TYPE_PACKAGE || + data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2) + continue; + + domain = &data_pkg->package.elements[0]; + if (domain->type == ACPI_TYPE_INTEGER && + domain->integer.value == ACPI_WRDS_WIFI) + break; + + data_pkg = NULL; + } + + if (!data_pkg) + return -ENOENT; + + if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) + return -EINVAL; + + sar_table->enabled = !!(data_pkg->package.elements[1].integer.value); + + for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) { + union acpi_object *entry; + + entry = &data_pkg->package.elements[i + 2]; + if ((entry->type != ACPI_TYPE_INTEGER) || + (entry->integer.value > U8_MAX)) + return -EINVAL; + + sar_table->values[i] = entry->integer.value; + } + + return 0; +} + +static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, + struct iwl_mvm_sar_table *sar_table) +{ + acpi_handle root_handle; + acpi_handle handle; + struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + int ret; + + root_handle = ACPI_HANDLE(mvm->dev); + if (!root_handle) { + IWL_DEBUG_RADIO(mvm, + "Could not retrieve root port ACPI handle\n"); + return -ENOENT; + } + + /* Get the method's handle */ + status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD, + &handle); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "WRDS method not found\n"); + return -ENOENT; + } + + /* Call WRDS with no arguments */ + status = acpi_evaluate_object(handle, NULL, NULL, &wrds); + if (ACPI_FAILURE(status)) { + IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status); + return -ENOENT; + } + + ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table); + kfree(wrds.pointer); + + return ret; +} +#else /* CONFIG_ACPI */ +static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm, + struct iwl_mvm_sar_table *sar_table) +{ + return -ENOENT; +} +#endif /* CONFIG_ACPI */ + +static int iwl_mvm_sar_init(struct iwl_mvm *mvm) +{ + struct iwl_mvm_sar_table sar_table; + struct iwl_dev_tx_power_cmd cmd = { + .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), + }; + int ret, i, j, idx; + int len = sizeof(cmd); + + /* we can't do anything with the table if the FW doesn't support it */ + if (!fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_TX_POWER_CHAIN)) { + IWL_DEBUG_RADIO(mvm, + "FW doesn't support per-chain TX power settings.\n"); + return 0; + } + + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) + len = sizeof(cmd.v3); + + ret = iwl_mvm_sar_get_table(mvm, &sar_table); + if (ret < 0) { + IWL_DEBUG_RADIO(mvm, + "SAR BIOS table invalid or unavailable. (%d)\n", + ret); + /* we don't fail if the table is not available */ + return 0; + } + + if (!sar_table.enabled) + return 0; + + IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); + + BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != + ACPI_WRDS_TABLE_SIZE); + + for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { + IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); + for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { + idx = (i * IWL_NUM_SUB_BANDS) + j; + cmd.v3.per_chain_restriction[i][j] = + cpu_to_le16(sar_table.values[idx]); + IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", + j, sar_table.values[idx]); + } + } + + ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); + if (ret) + IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret); + + return ret; +} + int iwl_mvm_up(struct iwl_mvm *mvm) { int ret, i; @@ -976,6 +1172,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); + /* Enable DQA-mode if required */ + if (iwl_mvm_is_dqa_supported(mvm)) { + ret = iwl_mvm_send_dqa_cmd(mvm); + if (ret) + goto error; + } else { + IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n"); + } + /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); if (ret) @@ -1048,6 +1253,10 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); + ret = iwl_mvm_sar_init(mvm); + if (ret) + goto error; + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 7aae068c0..69c42ce45 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1006,7 +1006,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm, } static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, - struct iwl_mac_beacon_cmd *beacon_cmd, + struct iwl_mac_beacon_cmd_v6 *beacon_cmd, u8 *beacon, u32 frame_size) { u32 tim_idx; @@ -1030,6 +1030,23 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm, } } +static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) +{ + struct ieee80211_mgmt *mgmt = (void *)beacon; + const u8 *ie; + + if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon))) + return 0; + + frame_size -= mgmt->u.beacon.variable - beacon; + + ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size); + if (!ie) + return 0; + + return ie - beacon; +} + static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) @@ -1039,7 +1056,10 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, .id = BEACON_TEMPLATE_CMD, .flags = CMD_ASYNC, }; - struct iwl_mac_beacon_cmd beacon_cmd = {}; + union { + struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; + struct iwl_mac_beacon_cmd beacon_cmd; + } u = {}; struct ieee80211_tx_info *info; u32 beacon_skb_len; u32 rate, tx_flags; @@ -1051,18 +1071,18 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, /* TODO: for now the beacon template id is set to be the mac context id. * Might be better to handle it as another resource ... */ - beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id); info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ - beacon_cmd.tx.len = cpu_to_le16((u16)beacon_skb_len); - beacon_cmd.tx.sta_id = mvmvif->bcast_sta.sta_id; - beacon_cmd.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + u.beacon_cmd_v6.tx.len = cpu_to_le16((u16)beacon_skb_len); + u.beacon_cmd_v6.tx.sta_id = mvmvif->bcast_sta.sta_id; + u.beacon_cmd_v6.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; - beacon_cmd.tx.tx_flags = cpu_to_le32(tx_flags); + u.beacon_cmd_v6.tx.tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { @@ -1071,7 +1091,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, mvm->mgmt_last_antenna_idx); } - beacon_cmd.tx.rate_n_flags = + u.beacon_cmd_v6.tx.rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); @@ -1079,20 +1099,37 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, rate = IWL_FIRST_OFDM_RATE; } else { rate = IWL_FIRST_CCK_RATE; - beacon_cmd.tx.rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); + u.beacon_cmd_v6.tx.rate_n_flags |= + cpu_to_le32(RATE_MCS_CCK_MSK); } - beacon_cmd.tx.rate_n_flags |= + u.beacon_cmd_v6.tx.rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); /* Set up TX beacon command fields */ if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd, + iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6, beacon->data, beacon_skb_len); /* Submit command */ - cmd.len[0] = sizeof(beacon_cmd); - cmd.data[0] = &beacon_cmd; + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { + u.beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon_skb_len)); + u.beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon_skb_len)); + + cmd.len[0] = sizeof(u.beacon_cmd); + } else { + cmd.len[0] = sizeof(u.beacon_cmd_v6); + } + + cmd.data[0] = &u; cmd.dataflags[0] = 0; cmd.len[1] = beacon_skb_len; cmd.data[1] = beacon->data; @@ -1538,3 +1575,48 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, /* pass it as regular rx to mac80211 */ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL); } + +void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data; + struct ieee80211_vif *csa_vif; + struct iwl_mvm_vif *mvmvif; + int len = iwl_rx_packet_payload_len(pkt); + u32 id_n_color; + + if (WARN_ON_ONCE(len < sizeof(*notif))) + return; + + rcu_read_lock(); + + csa_vif = rcu_dereference(mvm->csa_vif); + if (WARN_ON(!csa_vif || !csa_vif->csa_active)) + goto out_unlock; + + id_n_color = le32_to_cpu(notif->id_and_color); + + mvmvif = iwl_mvm_vif_from_mac80211(csa_vif); + if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color, + "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)", + FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color)) + goto out_unlock; + + IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n"); + + queue_delayed_work(system_wq, &mvm->cs_tx_unblock_dwork, + msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT * + csa_vif->bss_conf.beacon_int)); + + ieee80211_csa_finish(csa_vif); + + rcu_read_unlock(); + + RCU_INIT_POINTER(mvm->csa_vif, NULL); + + return; + +out_unlock: + rcu_read_unlock(); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 18a8474b5..5dd77e336 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -465,11 +465,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; - BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2); + BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 4); memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); hw->wiphy->cipher_suites = mvm->ciphers; + if (iwl_mvm_has_new_rx_api(mvm)) { + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_GCMP; + hw->wiphy->n_cipher_suites++; + mvm->ciphers[hw->wiphy->n_cipher_suites] = + WLAN_CIPHER_SUITE_GCMP_256; + hw->wiphy->n_cipher_suites++; + } + /* * Enable 11w if advertised by firmware and software crypto * is not enabled (as the firmware will interpret some mgmt @@ -485,10 +494,23 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) /* currently FW API supports only one optional cipher scheme */ if (mvm->fw->cs[0].cipher) { + const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; + struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; + mvm->hw->n_cipher_schemes = 1; - mvm->hw->cipher_schemes = &mvm->fw->cs[0]; - mvm->ciphers[hw->wiphy->n_cipher_suites] = - mvm->fw->cs[0].cipher; + + cs->cipher = le32_to_cpu(fwcs->cipher); + cs->iftype = BIT(NL80211_IFTYPE_STATION); + cs->hdr_len = fwcs->hdr_len; + cs->pn_len = fwcs->pn_len; + cs->pn_off = fwcs->pn_off; + cs->key_idx_off = fwcs->key_idx_off; + cs->key_idx_mask = fwcs->key_idx_mask; + cs->key_idx_shift = fwcs->key_idx_shift; + cs->mic_len = fwcs->mic_len; + + mvm->hw->cipher_schemes = mvm->cs; + mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; hw->wiphy->n_cipher_suites++; } @@ -602,6 +624,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | NL80211_FEATURE_LOW_PRIORITY_SCAN | NL80211_FEATURE_P2P_GO_OPPPS | + NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; @@ -1011,11 +1034,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); - memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); - memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old)); - memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk)); - memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk)); ieee80211_wake_queues(mvm->hw); @@ -1199,6 +1218,8 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) flush_work(&mvm->async_handlers_wk); flush_work(&mvm->add_stream_wk); cancel_delayed_work_sync(&mvm->fw_dump_wk); + cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); + cancel_delayed_work_sync(&mvm->scan_timeout_dwork); iwl_mvm_free_fw_dump_desc(mvm); mutex_lock(&mvm->mutex); @@ -1230,18 +1251,20 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, s16 tx_power) { struct iwl_dev_tx_power_cmd cmd = { - .v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), - .v2.mac_context_id = + .v3.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), + .v3.v2.mac_context_id = cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), - .v2.pwr_restriction = cpu_to_le16(8 * tx_power), + .v3.v2.pwr_restriction = cpu_to_le16(8 * tx_power), }; int len = sizeof(cmd); if (tx_power == IWL_DEFAULT_MAX_TX_POWER) - cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + cmd.v3.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) + len = sizeof(cmd.v3); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN)) - len = sizeof(cmd.v2); + len = sizeof(cmd.v3.v2); return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); } @@ -2360,7 +2383,7 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; - if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) { + if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; } @@ -2719,6 +2742,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_AES_CMAC: @@ -2780,7 +2805,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, sta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || - key->cipher == WLAN_CIPHER_SUITE_GCMP)) { + key->cipher == WLAN_CIPHER_SUITE_GCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { struct ieee80211_key_seq seq; int tid, q; @@ -2834,7 +2860,8 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, if (sta && iwl_mvm_has_new_rx_api(mvm) && key->flags & IEEE80211_KEY_FLAG_PAIRWISE && (key->cipher == WLAN_CIPHER_SUITE_CCMP || - key->cipher == WLAN_CIPHER_SUITE_GCMP)) { + key->cipher == WLAN_CIPHER_SUITE_GCMP || + key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { mvmsta = iwl_mvm_sta_from_mac80211(sta); ptk_pn = rcu_dereference_protected( mvmsta->ptk_pn[keyidx], @@ -3687,6 +3714,13 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, goto out_unlock; } + /* we still didn't unblock tx. prevent new CS meanwhile */ + if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, + lockdep_is_held(&mvm->mutex))) { + ret = -EBUSY; + goto out_unlock; + } + rcu_assign_pointer(mvm->csa_vif, vif); if (WARN_ONCE(mvmvif->csa_countdown, @@ -3695,6 +3729,8 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, goto out_unlock; } + mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; + break; case NL80211_IFTYPE_STATION: if (mvmvif->lqm_active) @@ -3898,6 +3934,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (mvmsta->avg_energy) { + sinfo->signal_avg = mvmsta->avg_energy; + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + } + if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ffbd41dcc..6a615bb73 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -452,6 +452,7 @@ struct iwl_mvm_vif { /* Indicates that CSA countdown may be started */ bool csa_countdown; bool csa_failed; + u16 csa_target_freq; /* TCP Checksum Offload */ netdev_features_t features; @@ -466,6 +467,8 @@ struct iwl_mvm_vif { static inline struct iwl_mvm_vif * iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif) { + if (!vif) + return NULL; return (void *)vif->drv_priv; } @@ -686,13 +689,28 @@ struct iwl_mvm_baid_data { * This is the state of a queue that has been fully configured (including * SCD pointers, etc), has a specific RA/TID assigned to it, and can be * used to send traffic. + * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared + * This is a state in which a single queue serves more than one TID, all of + * which are not aggregated. Note that the queue is only associated to one + * RA. + * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it + * This is a state of a queue that has had traffic on it, but during the + * last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on + * it. In this state, when a new queue is needed to be allocated but no + * such free queue exists, an inactive queue might be freed and given to + * the new RA/TID. */ enum iwl_mvm_queue_status { IWL_MVM_QUEUE_FREE, IWL_MVM_QUEUE_RESERVED, IWL_MVM_QUEUE_READY, + IWL_MVM_QUEUE_SHARED, + IWL_MVM_QUEUE_INACTIVE, }; +#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) +#define IWL_MVM_NUM_CIPHERS 8 + struct iwl_mvm { /* for logger access */ struct device *dev; @@ -731,6 +749,7 @@ struct iwl_mvm { struct iwl_sf_region sf_space; u32 ampdu_ref; + bool ampdu_toggle; struct iwl_notif_wait_data notif_wait; @@ -748,11 +767,16 @@ struct iwl_mvm { u32 hw_queue_to_mac80211; u8 hw_queue_refcount; u8 ra_sta_id; /* The RA this queue is mapped to, if exists */ + bool reserved; /* Is this the TXQ reserved for a STA */ + u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */ u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ + /* Timestamp for inactivation per TID of this queue */ + unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1]; enum iwl_mvm_queue_status status; } queue_info[IWL_MAX_HW_QUEUES]; spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ struct work_struct add_stream_wk; /* To add streams to queues */ + atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; const char *nvm_file_name; @@ -787,7 +811,7 @@ struct iwl_mvm { struct iwl_mcast_filter_cmd *mcast_filter_cmd; enum iwl_mvm_scan_type scan_type; enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; - struct timer_list scan_timer; + struct delayed_work scan_timeout_dwork; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; @@ -910,11 +934,6 @@ struct iwl_mvm { wait_queue_head_t d0i3_exit_waitq; /* BT-Coex */ - u8 bt_ack_kill_msk[NUM_PHY_CTX]; - u8 bt_cts_kill_msk[NUM_PHY_CTX]; - - struct iwl_bt_coex_profile_notif_old last_bt_notif_old; - struct iwl_bt_coex_ci_cmd_old last_bt_ci_cmd_old; struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; @@ -994,7 +1013,8 @@ struct iwl_mvm { struct iwl_mvm_shared_mem_cfg shared_mem_cfg; - u32 ciphers[6]; + u32 ciphers[IWL_MVM_NUM_CIPHERS]; + struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; struct iwl_mvm_tof_data tof_data; struct ieee80211_vif *nan_vif; @@ -1006,6 +1026,8 @@ struct iwl_mvm { * clients. */ bool drop_bcn_ap_mode; + + struct delayed_work cs_tx_unblock_dwork; }; /* Extract MVM priv from op_mode and _hw */ @@ -1158,10 +1180,10 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) } static inline -bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) +bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && + IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) && !(iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_P2P_CLIENT); } @@ -1321,7 +1343,6 @@ bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); -void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, @@ -1381,6 +1402,8 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, struct ieee80211_vif *exclude_vif); +void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); /* Bindings */ int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -1397,7 +1420,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm); int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify); int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm); void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm); -void iwl_mvm_scan_timeout(unsigned long data); +void iwl_mvm_scan_timeout_wk(struct work_struct *work); /* Scheduled scan */ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, @@ -1613,7 +1636,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, */ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u8 tid, u8 flags); -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq); +int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); /* Return a bitmask with all the hw supported queues, except for the * command queue, which can't be flushed. @@ -1720,6 +1743,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, void iwl_mvm_reorder_timer_expired(unsigned long data); struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm); +void iwl_mvm_inactivity_check(struct iwl_mvm *mvm); + void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error); unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 9a8cf2f41..fcfb7aac6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -66,7 +66,6 @@ *****************************************************************************/ #include <linux/firmware.h> #include <linux/rtnetlink.h> -#include <linux/pci.h> #include <linux/acpi.h> #include "iwl-trans.h" #include "iwl-csr.h" @@ -667,8 +666,7 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), .source_id = (u8)src_id, }; - struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL; - struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = NULL; + struct iwl_mcc_update_resp *resp_cp; struct iwl_rx_packet *pkt; struct iwl_host_cmd cmd = { .id = MCC_UPDATE_CMD, @@ -701,34 +699,36 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, /* Extract MCC response */ if (resp_v2) { - mcc_resp = (void *)pkt->data; + struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data; + n_channels = __le32_to_cpu(mcc_resp->n_channels); + resp_len = sizeof(struct iwl_mcc_update_resp) + + n_channels * sizeof(__le32); + resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); } else { - mcc_resp_v1 = (void *)pkt->data; + struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data; + n_channels = __le32_to_cpu(mcc_resp_v1->n_channels); + resp_len = sizeof(struct iwl_mcc_update_resp) + + n_channels * sizeof(__le32); + resp_cp = kzalloc(resp_len, GFP_KERNEL); + + if (resp_cp) { + resp_cp->status = mcc_resp_v1->status; + resp_cp->mcc = mcc_resp_v1->mcc; + resp_cp->cap = mcc_resp_v1->cap; + resp_cp->source_id = mcc_resp_v1->source_id; + resp_cp->n_channels = mcc_resp_v1->n_channels; + memcpy(resp_cp->channels, mcc_resp_v1->channels, + n_channels * sizeof(__le32)); + } } - resp_len = sizeof(struct iwl_mcc_update_resp) + n_channels * - sizeof(__le32); - - resp_cp = kzalloc(resp_len, GFP_KERNEL); if (!resp_cp) { ret = -ENOMEM; goto exit; } - if (resp_v2) { - memcpy(resp_cp, mcc_resp, resp_len); - } else { - resp_cp->status = mcc_resp_v1->status; - resp_cp->mcc = mcc_resp_v1->mcc; - resp_cp->cap = mcc_resp_v1->cap; - resp_cp->source_id = mcc_resp_v1->source_id; - resp_cp->n_channels = mcc_resp_v1->n_channels; - memcpy(resp_cp->channels, mcc_resp_v1->channels, - n_channels * sizeof(__le32)); - } - status = le32_to_cpu(resp_cp->status); mcc = le16_to_cpu(resp_cp->mcc); @@ -802,9 +802,8 @@ static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; u32 mcc_val; - struct pci_dev *pdev = to_pci_dev(mvm->dev); - root_handle = ACPI_HANDLE(&pdev->dev); + root_handle = ACPI_HANDLE(mvm->dev); if (!root_handle) { IWL_DEBUG_LAR(mvm, "Could not retrieve root port ACPI handle\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index a68054f12..55d9096da 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -431,6 +431,7 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD), HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF), + HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF), }; /* Please keep this array *SORTED* by hex value. @@ -494,6 +495,29 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); +static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) +{ + struct iwl_mvm *mvm = + container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work); + struct ieee80211_vif *tx_blocked_vif; + struct iwl_mvm_vif *mvmvif; + + mutex_lock(&mvm->mutex); + + tx_blocked_vif = + rcu_dereference_protected(mvm->csa_tx_blocked_vif, + lockdep_is_held(&mvm->mutex)); + + if (!tx_blocked_vif) + goto unlock; + + mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); + iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); + RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); +unlock: + mutex_unlock(&mvm->mutex); +} + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -553,18 +577,21 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0; - mvm->aux_queue = 15; if (!iwl_mvm_is_dqa_supported(mvm)) { - mvm->first_agg_queue = 16; mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; + + if (mvm->cfg->base_params->num_of_queues == 16) { + mvm->aux_queue = 11; + mvm->first_agg_queue = 12; + } else { + mvm->aux_queue = 15; + mvm->first_agg_queue = 16; + } } else { + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; } - if (mvm->cfg->base_params->num_of_queues == 16) { - mvm->aux_queue = 11; - mvm->first_agg_queue = 12; - } mvm->sf_state = SF_UNINIT; mvm->cur_ucode = IWL_UCODE_INIT; mvm->drop_bcn_ap_mode = true; @@ -584,6 +611,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); + INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); spin_lock_init(&mvm->d0i3_tx_lock); @@ -595,6 +623,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev); + INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork); + /* * Populate the state variables that the transport layer needs * to know about. @@ -603,6 +633,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_DEF: case IWL_AMSDU_4K: trans_cfg.rx_buf_size = IWL_AMSDU_4K; break; @@ -617,6 +648,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwlwifi_mod_params.amsdu_size); trans_cfg.rx_buf_size = IWL_AMSDU_4K; } + + /* the hardware splits the A-MSDU */ + if (mvm->cfg->mq_rx_supported) + trans_cfg.rx_buf_size = IWL_AMSDU_4K; trans_cfg.wide_cmd_header = fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR); @@ -633,6 +668,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; + trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info, + driver_data[2]); + trans_cfg.sdio_adma_addr = fw->sdio_adma_addr; trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD; @@ -735,9 +773,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); - setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout, - (unsigned long)mvm); - return op_mode; out_unregister: @@ -791,8 +826,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_tof_clean(mvm); - del_timer_sync(&mvm->scan_timer); - mutex_destroy(&mvm->mutex); mutex_destroy(&mvm->d0i3_suspend_mutex); @@ -936,8 +969,6 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode, if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) iwl_mvm_rx_rx_mpdu(mvm, napi, rxb); - else if (pkt->hdr.cmd == FRAME_RELEASE) - iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) iwl_mvm_rx_rx_phy_cmd(mvm, rxb); else @@ -953,11 +984,11 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); - else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) - iwl_mvm_rx_phy_cmd_mq(mvm, rxb); else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP && pkt->hdr.cmd == RX_QUEUES_NOTIFICATION)) iwl_mvm_rx_queue_notif(mvm, rxb, 0); + else if (pkt->hdr.cmd == FRAME_RELEASE) + iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 7b1f6ad60..ff85865b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -308,7 +308,7 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, /* Allow U-APSD only if p2p is stand alone */ bool is_p2p_standalone = true; - if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) + if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) return false; ieee80211_iterate_active_interfaces_atomic(mvm->hw, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 81dd2f6a4..227c5ed9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -211,6 +211,9 @@ static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (is_ht80(rate) && (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)) return true; + if (is_ht160(rate) && (vht_cap->cap & + IEEE80211_VHT_CAP_SHORT_GI_160)) + return true; return false; } @@ -399,7 +402,7 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) static void rs_rate_scale_perform(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - int tid); + int tid, bool ndp); static void rs_fill_lq_cmd(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, @@ -445,6 +448,13 @@ static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691}, }; +static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 191, 0, 244, 288, 298, 308, 313, 318, 323, 328, 330}, + {0, 0, 0, 0, 200, 0, 251, 293, 302, 312, 317, 322, 327, 332, 334}, + {0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581}, + {0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165}, +}; + static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0}, {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0}, @@ -466,6 +476,13 @@ static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = { {0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545}, }; +static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = { + {0, 0, 0, 0, 240, 0, 278, 308, 313, 319, 322, 324, 328, 330, 334}, + {0, 0, 0, 0, 247, 0, 282, 310, 315, 320, 323, 325, 329, 332, 338}, + {0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629, 10592}, + {0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640}, +}; + /* mbps, mcs */ static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { { "1", "BPSK DSSS"}, @@ -901,7 +918,6 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, } } - WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_160); WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && !is_vht(rate)); @@ -1161,7 +1177,7 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr) } void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, struct ieee80211_tx_info *info) + int tid, struct ieee80211_tx_info *info, bool ndp) { int legacy_success; int retries; @@ -1384,7 +1400,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, done: /* See if there's a better rate or modulation mode to try. */ if (sta->supp_rates[info->band]) - rs_rate_scale_perform(mvm, sta, lq_sta, tid); + rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp); } /* @@ -1407,7 +1423,8 @@ static void rs_mac80211_tx_status(void *mvm_r, info->flags & IEEE80211_TX_CTL_NO_ACK) return; - iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info); + iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info, + ieee80211_is_qos_nullfunc(hdr->frame_control)); } /* @@ -1494,6 +1511,9 @@ static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, case RATE_MCS_CHAN_WIDTH_80: ht_tbl_pointer = expected_tpt_siso_80MHz; break; + case RATE_MCS_CHAN_WIDTH_160: + ht_tbl_pointer = expected_tpt_siso_160MHz; + break; default: WARN_ON_ONCE(1); } @@ -1508,6 +1528,9 @@ static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, case RATE_MCS_CHAN_WIDTH_80: ht_tbl_pointer = expected_tpt_mimo2_80MHz; break; + case RATE_MCS_CHAN_WIDTH_160: + ht_tbl_pointer = expected_tpt_mimo2_160MHz; + break; default: WARN_ON_ONCE(1); } @@ -1582,12 +1605,17 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm, static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta) { - if (sta->bandwidth >= IEEE80211_STA_RX_BW_80) + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + return RATE_MCS_CHAN_WIDTH_160; + case IEEE80211_STA_RX_BW_80: return RATE_MCS_CHAN_WIDTH_80; - else if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) + case IEEE80211_STA_RX_BW_40: return RATE_MCS_CHAN_WIDTH_40; - - return RATE_MCS_CHAN_WIDTH_20; + case IEEE80211_STA_RX_BW_20: + default: + return RATE_MCS_CHAN_WIDTH_20; + } } /* @@ -2213,7 +2241,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm, static void rs_rate_scale_perform(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_lq_sta *lq_sta, - int tid) + int tid, bool ndp) { int low = IWL_RATE_INVALID; int high = IWL_RATE_INVALID; @@ -2512,7 +2540,7 @@ lq_update: (lq_sta->tx_agg_tid_en & (1 << tid)) && (tid != IWL_MAX_TID_COUNT)) { tid_data = &sta_priv->tid_data[tid]; - if (tid_data->state == IWL_AGG_OFF) { + if (tid_data->state == IWL_AGG_OFF && !ndp) { IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); @@ -2565,6 +2593,9 @@ static const struct rs_init_rate_info rs_optimal_rates_ht[] = { { S8_MIN, IWL_RATE_MCS_0_INDEX}, }; +/* MCS index 9 is not valid for 20MHz VHT channel width, + * but is ok for 40, 80 and 160MHz channels. + */ static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { { -60, IWL_RATE_MCS_8_INDEX }, { -64, IWL_RATE_MCS_7_INDEX }, @@ -2577,7 +2608,7 @@ static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = { { S8_MIN, IWL_RATE_MCS_0_INDEX}, }; -static const struct rs_init_rate_info rs_optimal_rates_vht_40_80mhz[] = { +static const struct rs_init_rate_info rs_optimal_rates_vht[] = { { -60, IWL_RATE_MCS_9_INDEX }, { -64, IWL_RATE_MCS_8_INDEX }, { -68, IWL_RATE_MCS_7_INDEX }, @@ -2640,9 +2671,9 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm, lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); } else { - lq_sta->optimal_rates = rs_optimal_rates_vht_40_80mhz; + lq_sta->optimal_rates = rs_optimal_rates_vht; lq_sta->optimal_nentries = - ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); + ARRAY_SIZE(rs_optimal_rates_vht); } } else if (is_ht(rate)) { lq_sta->optimal_rates = rs_optimal_rates_ht; @@ -2734,23 +2765,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm, */ if (sta->vht_cap.vht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { - if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { - initial_rates = rs_optimal_rates_vht_40_80mhz; - nentries = ARRAY_SIZE(rs_optimal_rates_vht_40_80mhz); - if (sta->bandwidth >= IEEE80211_STA_RX_BW_80) - rate->bw = RATE_MCS_CHAN_WIDTH_80; - else - rate->bw = RATE_MCS_CHAN_WIDTH_40; - } else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) { + switch (sta->bandwidth) { + case IEEE80211_STA_RX_BW_160: + case IEEE80211_STA_RX_BW_80: + case IEEE80211_STA_RX_BW_40: + initial_rates = rs_optimal_rates_vht; + nentries = ARRAY_SIZE(rs_optimal_rates_vht); + break; + case IEEE80211_STA_RX_BW_20: initial_rates = rs_optimal_rates_vht_20mhz; nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); - rate->bw = RATE_MCS_CHAN_WIDTH_20; - } else { + break; + default: IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth); goto out; } + active_rate = lq_sta->active_siso_rate; rate->type = LQ_VHT_SISO; + rate->bw = rs_bw_from_sta_bw(sta); } else if (sta->ht_cap.ht_supported && best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { initial_rates = rs_optimal_rates_ht; @@ -3057,6 +3090,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg) case RATE_MCS_CHAN_WIDTH_80: mvm->drv_rx_stats.bw_80_frames++; break; + case RATE_MCS_CHAN_WIDTH_160: + mvm->drv_rx_stats.bw_160_frames++; + break; default: WARN_ONCE(1, "bad BW. rate 0x%x", rate); } @@ -3705,7 +3741,8 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, desc += sprintf(buff + desc, " %s", (is_ht20(rate)) ? "20MHz" : (is_ht40(rate)) ? "40MHz" : - (is_ht80(rate)) ? "80Mhz" : "BAD BW"); + (is_ht80(rate)) ? "80MHz" : + (is_ht160(rate)) ? "160MHz" : "BAD BW"); desc += sprintf(buff + desc, " %s %s %s %s\n", (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", @@ -3787,9 +3824,10 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, lq_sta->active_tbl == i ? "*" : "x", rate->type, rate->sgi, - is_ht20(rate) ? "20Mhz" : - is_ht40(rate) ? "40Mhz" : - is_ht80(rate) ? "80Mhz" : "ERR", + is_ht20(rate) ? "20MHz" : + is_ht40(rate) ? "40MHz" : + is_ht80(rate) ? "80MHz" : + is_ht160(rate) ? "160MHz" : "ERR", rate->index); for (j = 0; j < IWL_RATE_COUNT; j++) { desc += sprintf(buff+desc, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 90d046fb2..ee207f2c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -205,6 +205,7 @@ struct rs_rate { #define is_ht20(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_20) #define is_ht40(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_40) #define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80) +#define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160) #define IWL_MAX_MCS_DISPLAY_SIZE 12 @@ -362,7 +363,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, /* Notify RS about Tx status */ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, struct ieee80211_tx_info *info); + int tid, struct ieee80211_tx_info *info, bool ndp); /** * iwl_rate_control_register - Register the rate control algorithm callbacks diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index ab7f7eda9..0e60e38b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -101,7 +101,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, - u32 ampdu_status, u8 crypt_len, + u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { unsigned int hdrlen, fraglen; @@ -268,7 +268,6 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u32 len; - u32 ampdu_status; u32 rate_n_flags; u32 rx_pkt_status; u8 crypt_len = 0; @@ -354,13 +353,22 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *tx_blocked_vif = + rcu_dereference(mvm->csa_tx_blocked_vif); /* We have tx blocked stations (with CS bit). If we heard * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(mvm->csa_tx_block_bcn_timeout)) - iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + if (unlikely(tx_blocked_vif) && + mvmsta->vif == tx_blocked_vif) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(tx_blocked_vif); + + if (mvmvif->csa_target_freq == rx_status->freq) + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, + false); + } rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); @@ -471,7 +479,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_ref(mvm, IWL_MVM_REF_RX); iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len, - ampdu_status, crypt_len, rxb); + crypt_len, rxb); if (take_ref) iwl_mvm_unref(mvm, IWL_MVM_REF_RX); @@ -490,6 +498,7 @@ struct iwl_mvm_stat_data { __le32 mac_id; u8 beacon_filter_average_energy; struct mvm_statistics_general_v8 *general; + struct mvm_statistics_load *load; }; static void iwl_mvm_stat_iterator(void *_data, u8 *mac, @@ -606,13 +615,15 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_notif_statistics_v10 *stats = (void *)&pkt->data; + struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data; struct iwl_mvm_stat_data data = { .mvm = mvm, }; + int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) : + sizeof(struct iwl_notif_statistics_v10); u32 temperature; - if (iwl_rx_packet_payload_len(pkt) != sizeof(*stats)) + if (iwl_rx_packet_payload_len(pkt) != expected_size) goto invalid; temperature = le32_to_cpu(stats->general.radio_temperature); @@ -630,6 +641,25 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, le64_to_cpu(stats->general.on_time_scan); data.general = &stats->general; + if (iwl_mvm_has_new_rx_api(mvm)) { + int i; + + data.load = &stats->load_stats; + + rcu_read_lock(); + for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { + struct iwl_mvm_sta *sta; + + if (!data.load->avg_energy[i]) + continue; + + sta = iwl_mvm_sta_from_staid_rcu(mvm, i); + if (!sta) + continue; + sta->avg_energy = data.load->avg_energy[i]; + } + rcu_read_unlock(); + } iwl_mvm_rx_stats_check_trigger(mvm, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 2c61516d0..df6c32caa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -65,19 +65,6 @@ #include "fw-api.h" #include "fw-dbg.h" -void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) -{ - mvm->ampdu_ref++; - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) { - spin_lock(&mvm->drv_stats_lock); - mvm->drv_rx_stats.ampdu_count++; - spin_unlock(&mvm->drv_stats_lock); - } -#endif -} - static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { @@ -489,6 +476,9 @@ void iwl_mvm_reorder_timer_expired(unsigned long data) rcu_read_lock(); sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]); /* SN is set to the last expired frame + 1 */ + IWL_DEBUG_HT(buf->mvm, + "Releasing expired frames for sta %u, sn %d\n", + buf->sta_id, sn); iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn); rcu_read_unlock(); } else if (buf->num_stored) { @@ -587,6 +577,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, struct sk_buff *tail; u32 reorder = le32_to_cpu(desc->reorder_data); bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU; + bool last_subframe = + desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; u8 sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; @@ -653,7 +645,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, /* release immediately if allowed by nssn and no stored frames */ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) { if (iwl_mvm_is_sn_less(buffer->head_sn, nssn, - buffer->buf_size)) + buffer->buf_size) && + (!amsdu || last_subframe)) buffer->head_sn = nssn; /* No need to update AMSDU last SN - we are moving the head */ spin_unlock_bh(&buffer->lock); @@ -687,7 +680,20 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, buffer->last_sub_index = sub_frame_idx; } - iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); + /* + * We cannot trust NSSN for AMSDU sub-frames that are not the last. + * The reason is that NSSN advances on the first sub-frame, and may + * cause the reorder buffer to advance before all the sub-frames arrive. + * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with + * SN 1. NSSN for first sub frame will be 3 with the result of driver + * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is + * already ahead and it will be dropped. + * If the last sub-frame is not on this queue - we will get frame + * release notification with up to date NSSN. + */ + if (!amsdu || last_subframe) + iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn); + spin_unlock_bh(&buffer->lock); return true; @@ -736,6 +742,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); u32 len = le16_to_cpu(desc->mpdu_len); u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); + u16 phy_info = le16_to_cpu(desc->phy_info); struct ieee80211_sta *sta = NULL; struct sk_buff *skb; u8 crypt_len = 0; @@ -766,16 +773,34 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, le16_to_cpu(desc->status)); rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; } - - rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); + /* set the preamble flag if appropriate */ + if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE) + rx_status->flag |= RX_FLAG_SHORTPRE; + + if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { + rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); + /* TSF as indicated by the firmware is at INA time */ + rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; + } rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, desc, rx_status); - /* TSF as indicated by the firmware is at INA time */ - rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; + + /* update aggregation data for monitor sake on default queue */ + if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->ampdu_reference = mvm->ampdu_ref; + /* toggle is switched whenever new aggregation starts */ + if (toggle_bit != mvm->ampdu_toggle) { + mvm->ampdu_ref++; + mvm->ampdu_toggle = toggle_bit; + } + } rcu_read_lock(); @@ -797,6 +822,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_vif *tx_blocked_vif = + rcu_dereference(mvm->csa_tx_blocked_vif); u8 baid = (u8)((le32_to_cpu(desc->reorder_data) & IWL_RX_MPDU_REORDER_BAID_MASK) >> IWL_RX_MPDU_REORDER_BAID_SHIFT); @@ -806,8 +833,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, * frames from a blocked station on a new channel we can * TX to it again. */ - if (unlikely(mvm->csa_tx_block_bcn_timeout)) - iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, false); + if (unlikely(tx_blocked_vif) && + tx_blocked_vif == mvmsta->vif) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(tx_blocked_vif); + + if (mvmvif->csa_target_freq == rx_status->freq) + iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, + false); + } rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status); @@ -830,8 +864,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); } - /* TODO: multi queue TCM */ - if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); @@ -856,14 +888,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_agg_rx_received(mvm, baid); } - /* - * TODO: PHY info. - * Verify we don't have the information in the MPDU descriptor and - * that it is not needed. - * Make sure for monitor mode that we are on default queue, update - * ampdu_ref and the rest of phy info then - */ - /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: @@ -907,8 +931,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->band); } - /* TODO: PHY info - update ampdu queue statistics (for debugfs) */ - /* TODO: PHY info - gscan */ + /* management stuff on default queue */ + if (!queue) { + if (unlikely((ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) && + mvm->sched_scan_pass_all == + SCHED_SCAN_PASS_ALL_ENABLED)) + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; + + if (unlikely(ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control))) + rx_status->boottime_ns = ktime_get_boot_ns(); + } iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb); if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc)) @@ -927,6 +961,9 @@ void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, int baid = release->baid; + IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n", + release->baid, le16_to_cpu(release->nssn)); + if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index e78fc567f..dac120f88 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -391,15 +391,18 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", iwl_mvm_ebs_status_str(scan_notif->ebs_status)); mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR; - ieee80211_scan_completed(mvm->hw, - scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED); + ieee80211_scan_completed(mvm->hw, &info); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - del_timer(&mvm->scan_timer); + cancel_delayed_work(&mvm->scan_timeout_dwork); } else { IWL_ERR(mvm, "got scan complete notification but no scan is running\n"); @@ -1222,15 +1225,16 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) return -EIO; } -#define SCAN_TIMEOUT (20 * HZ) +#define SCAN_TIMEOUT 20000 -void iwl_mvm_scan_timeout(unsigned long data) +void iwl_mvm_scan_timeout_wk(struct work_struct *work) { - struct iwl_mvm *mvm = (struct iwl_mvm *)data; + struct delayed_work *delayed_work = to_delayed_work(work); + struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm, + scan_timeout_dwork); IWL_ERR(mvm, "regular scan timed out\n"); - del_timer(&mvm->scan_timer); iwl_force_nmi(mvm->trans); } @@ -1313,7 +1317,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvm->scan_status |= IWL_MVM_SCAN_REGULAR; iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN); - mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT); + queue_delayed_work(system_wq, &mvm->scan_timeout_dwork, + msecs_to_jiffies(SCAN_TIMEOUT)); return 0; } @@ -1430,9 +1435,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, /* if the scan is already stopping, we don't need to notify mac80211 */ if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) { - ieee80211_scan_completed(mvm->hw, aborted); + struct cfg80211_scan_info info = { + .aborted = aborted, + }; + + ieee80211_scan_completed(mvm->hw, &info); iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - del_timer(&mvm->scan_timer); + cancel_delayed_work(&mvm->scan_timeout_dwork); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; @@ -1564,7 +1573,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR); if (uid >= 0) { - ieee80211_scan_completed(mvm->hw, true); + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(mvm->hw, &info); mvm->scan_uid_status[uid] = 0; } uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); @@ -1585,8 +1598,13 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) mvm->scan_uid_status[i] = 0; } } else { - if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) - ieee80211_scan_completed(mvm->hw, true); + if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(mvm->hw, &info); + } /* Sched scan will be restarted by mac80211 in * restart_hw, so do not report if FW is about to be @@ -1628,9 +1646,14 @@ out: * to release the scan reference here. */ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); - del_timer(&mvm->scan_timer); - if (notify) - ieee80211_scan_completed(mvm->hw, true); + cancel_delayed_work(&mvm->scan_timeout_dwork); + if (notify) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(mvm->hw, &info); + } } else if (notify) { ieee80211_sched_scan_stopped(mvm->hw); mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c index 443a42855..101fb04a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c @@ -215,7 +215,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, enum iwl_sf_state new_state) { struct iwl_sf_cfg_cmd sf_cmd = { - .state = cpu_to_le32(SF_FULL_ON), + .state = cpu_to_le32(new_state), }; struct ieee80211_sta *sta; int ret = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b23ab4a45..3130b9c68 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -310,6 +310,304 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); } +/* Disable aggregations for a bitmap of TIDs for a given station */ +static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, + unsigned long disable_agg_tids, + bool remove_queue) +{ + struct iwl_mvm_add_sta_cmd cmd = {}; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u32 status; + u8 sta_id; + int ret; + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + spin_unlock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return -EINVAL; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + mvmsta->tid_disable_agg |= disable_agg_tids; + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_QUEUES; + if (disable_agg_tids) + cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; + if (remove_queue) + cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; + cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); + + rcu_read_unlock(); + + /* Notify FW of queue removal from the STA queues */ + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), + &cmd, &status); + + return ret; +} + +static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long tid_bitmap; + unsigned long agg_tids = 0; + s8 sta_id; + int tid; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return -EINVAL; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvmsta->lock); + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) + agg_tids |= BIT(tid); + } + spin_unlock_bh(&mvmsta->lock); + + return agg_tids; +} + +/* + * Remove a queue from a station's resources. + * Note that this only marks as free. It DOESN'T delete a BA agreement, and + * doesn't disable the queue + */ +static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long tid_bitmap; + unsigned long disable_agg_tids = 0; + u8 sta_id; + int tid; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return 0; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvmsta->lock); + /* Unmap MAC queues and TIDs from this queue */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) + disable_agg_tids |= BIT(tid); + mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; + } + + mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ + spin_unlock_bh(&mvmsta->lock); + + rcu_read_unlock(); + + spin_lock_bh(&mvm->queue_info_lock); + /* Unmap MAC queues and TIDs from this queue */ + mvm->queue_info[queue].hw_queue_to_mac80211 = 0; + mvm->queue_info[queue].hw_queue_refcount = 0; + mvm->queue_info[queue].tid_bitmap = 0; + spin_unlock_bh(&mvm->queue_info_lock); + + return disable_agg_tids; +} + +static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, + unsigned long tfd_queue_mask, u8 ac) +{ + int queue = 0; + u8 ac_to_queue[IEEE80211_NUM_ACS]; + int i; + + lockdep_assert_held(&mvm->queue_info_lock); + + memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); + + /* See what ACs the existing queues for this STA have */ + for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { + /* Only DATA queues can be shared */ + if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && + i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) + continue; + + ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; + } + + /* + * The queue to share is chosen only from DATA queues as follows (in + * descending priority): + * 1. An AC_BE queue + * 2. Same AC queue + * 3. Highest AC queue that is lower than new AC + * 4. Any existing AC (there always is at least 1 DATA queue) + */ + + /* Priority 1: An AC_BE queue */ + if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_BE]; + /* Priority 2: Same AC queue */ + else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[ac]; + /* Priority 3a: If new AC is VO and VI exists - use VI */ + else if (ac == IEEE80211_AC_VO && + ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_VI]; + /* Priority 3b: No BE so only AC less than the new one is BK */ + else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_BK]; + /* Priority 4a: No BE nor BK - use VI if exists */ + else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_VI]; + /* Priority 4b: No BE, BK nor VI - use VO if exists */ + else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_VO]; + + /* Make sure queue found (or not) is legal */ + if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE && + queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) || + (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE && + queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) || + (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) { + IWL_ERR(mvm, "No DATA queues available to share\n"); + queue = -ENOSPC; + } + + return queue; +} + +/* + * If a given queue has a higher AC than the TID stream that is being added to + * it, the queue needs to be redirected to the lower AC. This function does that + * in such a case, otherwise - if no redirection required - it does nothing, + * unless the %force param is true. + */ +static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 0, + }; + bool shared_queue; + unsigned long mq; + int ret; + + /* + * If the AC is lower than current one - FIFO needs to be redirected to + * the lowest one of the streams in the queue. Check if this is needed + * here. + * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with + * value 3 and VO with value 0, so to check if ac X is lower than ac Y + * we need to check if the numerical value of X is LARGER than of Y. + */ + spin_lock_bh(&mvm->queue_info_lock); + if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { + spin_unlock_bh(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, + "No redirection needed on TXQ #%d\n", + queue); + return 0; + } + + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; + mq = mvm->queue_info[queue].hw_queue_to_mac80211; + shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); + spin_unlock_bh(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n", + queue, iwl_mvm_ac_to_tx_fifo[ac]); + + /* Stop MAC queues and wait for this queue to empty */ + iwl_mvm_stop_mac_queues(mvm, mq); + ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); + if (ret) { + IWL_ERR(mvm, "Error draining queue %d before reconfig\n", + queue); + ret = -EIO; + goto out; + } + + /* Before redirecting the queue we need to de-activate it */ + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, + ret); + + /* Make sure the SCD wrptr is correctly set before reconfiguring */ + iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac], + cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, + ssn, wdg_timeout); + + /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ + + /* Redirect to lower AC */ + iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], + cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, + ssn); + + /* Update AC marking of the queue */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].mac80211_ac = ac; + spin_unlock_bh(&mvm->queue_info_lock); + + /* + * Mark queue as shared in transport if shared + * Note this has to be done after queue enablement because enablement + * can also set this value, and there is no indication there to shared + * queues + */ + if (shared_queue) + iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); + +out: + /* Continue using the MAC queues */ + iwl_mvm_start_mac_queues(mvm, mq); + + return ret; +} + static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u8 ac, int tid, struct ieee80211_hdr *hdr) @@ -325,11 +623,20 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); u8 mac_queue = mvmsta->vif->hw_queue[ac]; int queue = -1; + bool using_inactive_queue = false; + unsigned long disable_agg_tids = 0; + enum iwl_mvm_agg_state queue_state; + bool shared_queue = false; int ssn; + unsigned long tfd_queue_mask; int ret; lockdep_assert_held(&mvm->mutex); + spin_lock_bh(&mvmsta->lock); + tfd_queue_mask = mvmsta->tfd_queue_msk; + spin_unlock_bh(&mvmsta->lock); + spin_lock_bh(&mvm->queue_info_lock); /* @@ -338,7 +645,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, */ if (!ieee80211_is_data_qos(hdr->frame_control) || ieee80211_is_qos_nullfunc(hdr->frame_control)) { - queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_MGMT_QUEUE, IWL_MVM_DQA_MAX_MGMT_QUEUE); if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", @@ -347,29 +655,62 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, /* If no such queue is found, we'll use a DATA queue instead */ } - if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && + (mvm->queue_info[mvmsta->reserved_queue].status == + IWL_MVM_QUEUE_RESERVED || + mvm->queue_info[mvmsta->reserved_queue].status == + IWL_MVM_QUEUE_INACTIVE)) { queue = mvmsta->reserved_queue; + mvm->queue_info[queue].reserved = true; IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); } if (queue < 0) - queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); /* + * Check if this queue is already allocated but inactive. + * In such a case, we'll need to first free this queue before enabling + * it again, so we'll mark it as reserved to make sure no new traffic + * arrives on it + */ + if (queue > 0 && + mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { + mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; + using_inactive_queue = true; + IWL_DEBUG_TX_QUEUES(mvm, + "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", + queue, mvmsta->sta_id, tid); + } + + /* No free queue - we'll have to share */ + if (queue <= 0) { + queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); + if (queue > 0) { + shared_queue = true; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; + } + } + + /* * Mark TXQ as ready, even though it hasn't been fully configured yet, * to make sure no one else takes it. * This will allow avoiding re-acquiring the lock at the end of the * configuration. On error we'll mark it back as free. */ - if (queue >= 0) + if ((queue > 0) && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); - /* TODO: support shared queues for same RA */ - if (queue < 0) + /* This shouldn't happen - out of queues */ + if (WARN_ON(queue <= 0)) { + IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", + tid, cfg.sta_id); return -ENOSPC; + } /* * Actual en/disablement of aggregations is through the ADD_STA HCMD, @@ -380,24 +721,103 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); - IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", - queue, mvmsta->sta_id, tid); + /* + * If this queue was previously inactive (idle) - we need to free it + * first + */ + if (using_inactive_queue) { + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 0, + }; + u8 ac; + + disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); + + spin_lock_bh(&mvm->queue_info_lock); + ac = mvm->queue_info[queue].mac80211_ac; + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac]; + spin_unlock_bh(&mvm->queue_info_lock); + + /* Disable the queue */ + iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, + true); + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), + &cmd); + if (ret) { + IWL_ERR(mvm, + "Failed to free inactive queue %d (ret=%d)\n", + queue, ret); + + /* Re-mark the inactive queue as inactive */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; + spin_unlock_bh(&mvm->queue_info_lock); + + return ret; + } + } + + IWL_DEBUG_TX_QUEUES(mvm, + "Allocating %squeue #%d to sta %d on tid %d\n", + shared_queue ? "shared " : "", queue, + mvmsta->sta_id, tid); + + if (shared_queue) { + /* Disable any open aggs on this queue */ + disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); + + if (disable_agg_tids) { + IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", + queue); + iwl_mvm_invalidate_sta_queue(mvm, queue, + disable_agg_tids, false); + } + } ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, wdg_timeout); + /* + * Mark queue as shared in transport if shared + * Note this has to be done after queue enablement because enablement + * can also set this value, and there is no indication there to shared + * queues + */ + if (shared_queue) + iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); + spin_lock_bh(&mvmsta->lock); mvmsta->tid_data[tid].txq_id = queue; + mvmsta->tid_data[tid].is_tid_active = true; mvmsta->tfd_queue_msk |= BIT(queue); + queue_state = mvmsta->tid_data[tid].state; if (mvmsta->reserved_queue == queue) mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; spin_unlock_bh(&mvmsta->lock); - ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); - if (ret) - goto out_err; + if (!shared_queue) { + ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); + if (ret) + goto out_err; + + /* If we need to re-enable aggregations... */ + if (queue_state == IWL_AGG_ON) { + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + goto out_err; + } + } else { + /* Redirect queue, if needed */ + ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, + wdg_timeout, false); + if (ret) + goto out_err; + } return 0; @@ -476,6 +896,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) unsigned long deferred_tid_traffic; int sta_id, tid; + /* Check inactivity of queues */ + iwl_mvm_inactivity_check(mvm); + mutex_lock(&mvm->mutex); /* Go over all stations with deferred traffic */ @@ -505,6 +928,12 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); int queue; + /* + * Check for inactive queues, so we don't reach a situation where we + * can't add a STA due to a shortage in queues that doesn't really exist + */ + iwl_mvm_inactivity_check(mvm); + spin_lock_bh(&mvm->queue_info_lock); /* Make sure we have free resources for this STA */ @@ -514,7 +943,8 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, IWL_MVM_QUEUE_FREE)) queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; else - queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { spin_unlock_bh(&mvm->queue_info_lock); @@ -568,8 +998,11 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; - /* allocate new queues for a TDLS station */ - if (sta->tdls) { + /* + * Allocate new queues for a TDLS station, unless we're in DQA mode, + * and then they'll be allocated dynamically + */ + if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { ret = iwl_mvm_tdls_sta_init(mvm, sta); if (ret) return ret; @@ -633,7 +1066,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, return 0; err: - iwl_mvm_tdls_sta_deinit(mvm, sta); + if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) + iwl_mvm_tdls_sta_deinit(mvm, sta); return ret; } @@ -819,8 +1253,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) { + if ((vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == mvm_sta->sta_id) || + iwl_mvm_is_dqa_supported(mvm)){ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; @@ -838,16 +1273,19 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (iwl_mvm_is_dqa_supported(mvm)) iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); - /* if we are associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == mvm_sta->sta_id) { + /* if associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; - /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + /* clear d0i3_ap_sta_id if no longer relevant */ + if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + } } /* @@ -885,7 +1323,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, } else { spin_unlock_bh(&mvm_sta->lock); - if (sta->tdls) + if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) iwl_mvm_tdls_sta_deinit(mvm, sta); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); @@ -983,8 +1421,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); /* Map Aux queue to fifo - needs to happen before adding Aux station */ - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); + if (!iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), @@ -992,6 +1431,19 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) if (ret) return ret; + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = mvm->aux_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + + iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, + wdg_timeout); + } + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0); @@ -1316,8 +1768,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", - start ? "start" : "stopp"); + IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", + start ? "start" : "stopp"); break; case ADD_STA_IMMEDIATE_BA_FAILURE: IWL_WARN(mvm, "RX BA Session refused by fw\n"); @@ -1372,13 +1824,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * supposed to happen) and we will free the session data while * RX is being processed in parallel */ + IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", + mvm_sta->sta_id, tid, baid); WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); rcu_assign_pointer(mvm->baid_map[baid], baid_data); - } else if (mvm->rx_ba_sessions > 0) { + } else { u8 baid = mvm_sta->tid_to_baid[tid]; - /* check that restart flow didn't zero the counter */ - mvm->rx_ba_sessions--; + if (mvm->rx_ba_sessions > 0) + /* check that restart flow didn't zero the counter */ + mvm->rx_ba_sessions--; if (!iwl_mvm_has_new_rx_api(mvm)) return 0; @@ -1394,6 +1849,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, del_timer_sync(&baid_data->session_timer); RCU_INIT_POINTER(mvm->baid_map[baid], NULL); kfree_rcu(baid_data, rcu_head); + IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); } return 0; @@ -1402,8 +1858,8 @@ out_free: return ret; } -static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u8 queue, bool start) +int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u8 queue, bool start) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -1458,6 +1914,7 @@ const u8 tid_to_mac80211_ac[] = { IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, + IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ }; static const u8 tid_to_ucode_ac[] = { @@ -1512,7 +1969,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, txq_id = mvmsta->tid_data[tid].txq_id; if (!iwl_mvm_is_dqa_supported(mvm) || mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { - txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, + txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + mvm->first_agg_queue, mvm->last_agg_queue); if (txq_id < 0) { ret = txq_id; @@ -1907,6 +2365,13 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); break; + case WLAN_CIPHER_SUITE_GCMP_256: + key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); + /* fall through */ + case WLAN_CIPHER_SUITE_GCMP: + key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); + memcpy(cmd.key, keyconf->key, keyconf->keylen); + break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(cmd.key, keyconf->key, keyconf->keylen); @@ -2035,6 +2500,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 0, NULL, 0, key_offset); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d2c58f134..bbc1cab2c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -321,6 +321,9 @@ enum iwl_mvm_agg_state { * Basically when next_reclaimed reaches ssn, we can tell mac80211 that * we are ready to finish the Tx AGG stop / start flow. * @tx_time: medium time consumed by this A-MPDU + * @is_tid_active: has this TID sent traffic in the last + * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this + * field should be ignored. */ struct iwl_mvm_tid_data { struct sk_buff_head deferred_tx_frames; @@ -333,6 +336,7 @@ struct iwl_mvm_tid_data { u16 txq_id; u16 ssn; u16 tx_time; + bool is_tid_active; }; static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data) @@ -434,6 +438,7 @@ struct iwl_mvm_sta { bool tlc_amsdu; u8 agg_tids; u8 sleep_tx_count; + u8 avg_energy; }; static inline struct iwl_mvm_sta * @@ -509,6 +514,9 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); +int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u8 queue, bool start); + int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index b92b75fea..b3a87a31d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -138,28 +138,19 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb, protocol = ipv6h->nexthdr; while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) { + struct ipv6_opt_hdr *hp; + /* only supported extension headers */ if (protocol != NEXTHDR_ROUTING && protocol != NEXTHDR_HOP && - protocol != NEXTHDR_DEST && - protocol != NEXTHDR_FRAGMENT) { + protocol != NEXTHDR_DEST) { skb_checksum_help(skb); return; } - if (protocol == NEXTHDR_FRAGMENT) { - struct frag_hdr *hp = - OPT_HDR(struct frag_hdr, skb, off); - - protocol = hp->nexthdr; - off += sizeof(struct frag_hdr); - } else { - struct ipv6_opt_hdr *hp = - OPT_HDR(struct ipv6_opt_hdr, skb, off); - - protocol = hp->nexthdr; - off += ipv6_optlen(hp); - } + hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); + protocol = hp->nexthdr; + off += ipv6_optlen(hp); } /* if we get here - protocol now should be TCP/UDP */ #endif @@ -388,6 +379,23 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags); } +static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, + u8 *crypto_hdr) +{ + struct ieee80211_key_conf *keyconf = info->control.hw_key; + u64 pn; + + pn = atomic64_inc_return(&keyconf->tx_pn); + crypto_hdr[0] = pn; + crypto_hdr[2] = 0; + crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); + crypto_hdr[1] = pn >> 8; + crypto_hdr[4] = pn >> 16; + crypto_hdr[5] = pn >> 24; + crypto_hdr[6] = pn >> 32; + crypto_hdr[7] = pn >> 40; +} + /* * Sets the fields in the Tx cmd that are crypto related */ @@ -405,15 +413,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); - pn = atomic64_inc_return(&keyconf->tx_pn); - crypto_hdr[0] = pn; - crypto_hdr[2] = 0; - crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6); - crypto_hdr[1] = pn >> 8; - crypto_hdr[4] = pn >> 16; - crypto_hdr[5] = pn >> 24; - crypto_hdr[6] = pn >> 32; - crypto_hdr[7] = pn >> 40; + iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); break; case WLAN_CIPHER_SUITE_TKIP: @@ -433,6 +433,18 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); break; + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: + /* TODO: Taking the key from the table might introduce a race + * when PTK rekeying is done, having an old packets with a PN + * based on the old key but the message encrypted with a new + * one. + * Need to handle this. + */ + tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE; + tx_cmd->key[0] = keyconf->hw_key_idx; + iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); + break; default: tx_cmd->sec_ctl |= TX_CMD_SEC_EXT; } @@ -533,6 +545,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) * (this is not possible for unicast packets as a TLDS discovery * response are sent without a station entry); otherwise use the * AUX station. + * In DQA mode, if vif is of type STATION and frames are not multicast, + * they should be sent from the BSS queue. For example, TDLS setup + * frames should be sent on this queue, as they go through the AP. */ sta_id = mvm->aux_sta.sta_id; if (info.control.vif) { @@ -550,6 +565,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_STATION_COUNT) sta_id = ap_sta_id; + } else if (iwl_mvm_is_dqa_supported(mvm) && + info.control.vif->type == NL80211_IFTYPE_STATION) { + queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; } } @@ -883,15 +901,17 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, * nullfunc frames should go to the MGMT queue regardless of QOS */ tid = IWL_MAX_TID_COUNT; - txq_id = mvmsta->tid_data[tid].txq_id; } + if (iwl_mvm_is_dqa_supported(mvm)) + txq_id = mvmsta->tid_data[tid].txq_id; + /* Copy MAC header from skb into command buffer */ memcpy(tx_cmd->hdr, hdr, hdrlen); WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); - if (sta->tdls) { + if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { /* default to TID 0 for non-QoS packets */ u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; @@ -904,9 +924,12 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id = mvmsta->tid_data[tid].txq_id; } - if (iwl_mvm_is_dqa_supported(mvm)) { - if (unlikely(mvmsta->tid_data[tid].txq_id == - IEEE80211_INVAL_HW_QUEUE)) { + /* Check if TXQ needs to be allocated or re-activated */ + if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE || + !mvmsta->tid_data[tid].is_tid_active) && + iwl_mvm_is_dqa_supported(mvm)) { + /* If TXQ needs to be allocated... */ + if (txq_id == IEEE80211_INVAL_HW_QUEUE) { iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); /* @@ -916,11 +939,22 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); return 0; + } - txq_id = mvmsta->tid_data[tid].txq_id; + /* If we are here - TXQ exists and needs to be re-activated */ + spin_lock(&mvm->queue_info_lock); + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; + mvmsta->tid_data[tid].is_tid_active = true; + spin_unlock(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n", + txq_id); } + /* Keep track of the time of the last frame for this RA/TID */ + mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; + IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); @@ -1312,7 +1346,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); - txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON); + if (iwl_mvm_is_dqa_supported(mvm)) { + enum iwl_mvm_agg_state state; + + state = mvmsta->tid_data[tid].state; + txq_agg = (state == IWL_AGG_ON || + state == IWL_EMPTYING_HW_QUEUE_DELBA); + } else { + txq_agg = txq_id >= mvm->first_agg_queue; + } if (!is_ndp) { tid_data->next_reclaimed = next_reclaimed; @@ -1643,7 +1685,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data); IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n"); - iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info); + iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false); } out: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 161b99efd..68f4e7fdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) iwl_mvm_dump_umac_error_log(mvm); } -int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) +int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) { int i; lockdep_assert_held(&mvm->queue_info_lock); + /* Start by looking for a free queue */ for (i = minq; i <= maxq; i++) if (mvm->queue_info[i].hw_queue_refcount == 0 && mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) return i; + /* + * If no free queue found - settle for an inactive one to reconfigure + * Make sure that the inactive queue either already belongs to this STA, + * or that if it belongs to another one - it isn't the reserved queue + */ + for (i = minq; i <= maxq; i++) + if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && + (sta_id == mvm->queue_info[i].ra_sta_id || + !mvm->queue_info[i].reserved)) + return i; + return -ENOSPC; } @@ -643,13 +655,21 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, } /* Update mappings and refcounts */ + if (mvm->queue_info[queue].hw_queue_refcount > 0) + enable_queue = false; + mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); mvm->queue_info[queue].hw_queue_refcount++; - if (mvm->queue_info[queue].hw_queue_refcount > 1) - enable_queue = false; - else - mvm->queue_info[queue].ra_sta_id = cfg->sta_id; mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); + mvm->queue_info[queue].ra_sta_id = cfg->sta_id; + + if (enable_queue) { + if (cfg->tid != IWL_MAX_TID_COUNT) + mvm->queue_info[queue].mac80211_ac = + tid_to_mac80211_ac[cfg->tid]; + else + mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; + } IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", @@ -671,6 +691,10 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, .tid = cfg->tid, }; + /* Set sta_id in the command, if it exists */ + if (iwl_mvm_is_dqa_supported(mvm)) + cmd.sta_id = cfg->sta_id; + iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), @@ -752,6 +776,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, mvm->queue_info[queue].tid_bitmap = 0; mvm->queue_info[queue].hw_queue_to_mac80211 = 0; + /* Regardless if this is a reserved TXQ for a STA - mark it as false */ + mvm->queue_info[queue].reserved = false; + spin_unlock_bh(&mvm->queue_info_lock); iwl_trans_txq_disable(mvm->trans, queue, false); @@ -1039,6 +1066,155 @@ out: ieee80211_connection_loss(vif); } +/* + * Remove inactive TIDs of a given queue. + * If all queue TIDs are inactive - mark the queue as inactive + * If only some the queue TIDs are inactive - unmap them from the queue + */ +static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, int queue, + unsigned long tid_bitmap) +{ + int tid; + + lockdep_assert_held(&mvmsta->lock); + lockdep_assert_held(&mvm->queue_info_lock); + + /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + /* If some TFDs are still queued - don't mark TID as inactive */ + if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid])) + tid_bitmap &= ~BIT(tid); + } + + /* If all TIDs in the queue are inactive - mark queue as inactive. */ + if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { + mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; + + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) + mvmsta->tid_data[tid].is_tid_active = false; + + IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", + queue); + return; + } + + /* + * If we are here, this is a shared queue and not all TIDs timed-out. + * Remove the ones that did. + */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; + + mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; + mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue); + mvm->queue_info[queue].hw_queue_refcount--; + mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); + mvmsta->tid_data[tid].is_tid_active = false; + + IWL_DEBUG_TX_QUEUES(mvm, + "Removing inactive TID %d from shared Q:%d\n", + tid, queue); + } + + IWL_DEBUG_TX_QUEUES(mvm, + "TXQ #%d left with tid bitmap 0x%x\n", queue, + mvm->queue_info[queue].tid_bitmap); + + /* + * There may be different TIDs with the same mac queues, so make + * sure all TIDs have existing corresponding mac queues enabled + */ + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + mvm->queue_info[queue].hw_queue_to_mac80211 |= + BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); + } + + /* TODO: if queue was shared - need to re-enable AGGs */ +} + +void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) +{ + unsigned long timeout_queues_map = 0; + unsigned long now = jiffies; + int i; + + spin_lock_bh(&mvm->queue_info_lock); + for (i = 0; i < IWL_MAX_HW_QUEUES; i++) + if (mvm->queue_info[i].hw_queue_refcount > 0) + timeout_queues_map |= BIT(i); + spin_unlock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + /* + * If a queue time outs - mark it as INACTIVE (don't remove right away + * if we don't have to.) This is an optimization in case traffic comes + * later, and we don't HAVE to use a currently-inactive queue + */ + for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u8 sta_id; + int tid; + unsigned long inactive_tid_bitmap = 0; + unsigned long queue_tid_bitmap; + + spin_lock_bh(&mvm->queue_info_lock); + queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; + + /* If TXQ isn't in active use anyway - nothing to do here... */ + if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && + mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) { + spin_unlock_bh(&mvm->queue_info_lock); + continue; + } + + /* Check to see if there are inactive TIDs on this queue */ + for_each_set_bit(tid, &queue_tid_bitmap, + IWL_MAX_TID_COUNT + 1) { + if (time_after(mvm->queue_info[i].last_frame_time[tid] + + IWL_MVM_DQA_QUEUE_TIMEOUT, now)) + continue; + + inactive_tid_bitmap |= BIT(tid); + } + spin_unlock_bh(&mvm->queue_info_lock); + + /* If all TIDs are active - finish check on this queue */ + if (!inactive_tid_bitmap) + continue; + + /* + * If we are here - the queue hadn't been served recently and is + * in use + */ + + sta_id = mvm->queue_info[i].ra_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + /* + * If the STA doesn't exist anymore, it isn't an error. It could + * be that it was removed since getting the queues, and in this + * case it should've inactivated its queues anyway. + */ + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvmsta->lock); + spin_lock(&mvm->queue_info_lock); + iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, + inactive_tid_bitmap); + spin_unlock(&mvm->queue_info_lock); + spin_unlock_bh(&mvmsta->lock); + } + + rcu_read_unlock(); +} + int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, enum iwl_lqm_cmd_operatrions operation, u32 duration, u32 timeout) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 6f020e4ec..78cf9a7f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -516,6 +516,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)}, {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, + +/* a000 Series */ + {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ {0} @@ -607,7 +610,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) const struct iwl_cfg *cfg_7265d __maybe_unused = NULL; const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL; struct iwl_trans *iwl_trans; - struct iwl_trans_pcie *trans_pcie; int ret; iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); @@ -645,12 +647,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif pci_set_drvdata(pdev, iwl_trans); + iwl_trans->drv = iwl_drv_start(iwl_trans, cfg); - trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); - trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); - - if (IS_ERR(trans_pcie->drv)) { - ret = PTR_ERR(trans_pcie->drv); + if (IS_ERR(iwl_trans->drv)) { + ret = PTR_ERR(iwl_trans->drv); goto out_free_trans; } @@ -689,7 +689,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out_free_drv: - iwl_drv_stop(trans_pcie->drv); + iwl_drv_stop(iwl_trans->drv); out_free_trans: iwl_trans_pcie_free(iwl_trans); return ret; @@ -698,7 +698,6 @@ out_free_trans: static void iwl_pci_remove(struct pci_dev *pdev) { struct iwl_trans *trans = pci_get_drvdata(pdev); - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* if RTPM was in use, restore it to the state before probe */ if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { @@ -709,7 +708,7 @@ static void iwl_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(trans->dev); } - iwl_drv_stop(trans_pcie->drv); + iwl_drv_stop(trans->drv); iwl_trans_pcie_free(trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 2d8cce290..11e347dd4 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -68,12 +68,14 @@ struct iwl_host_cmd; * struct iwl_rx_mem_buffer * @page_dma: bus address of rxb page * @page: driver's pointer to the rxb page + * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; u16 vid; + bool invalid; struct list_head list; }; @@ -230,15 +232,16 @@ struct iwl_queue { #define TFD_CMD_SLOTS 32 /* - * The FH will write back to the first TB only, so we need - * to copy some data into the buffer regardless of whether - * it should be mapped or not. This indicates how big the - * first TB must be to include the scratch buffer. Since - * the scratch is 4 bytes at offset 12, it's 16 now. If we - * make it bigger then allocations will be bigger and copy - * slower, so that's probably not useful. + * The FH will write back to the first TB only, so we need to copy some data + * into the buffer regardless of whether it should be mapped or not. + * This indicates how big the first TB must be to include the scratch buffer + * and the assigned PN. + * Since PN location is 16 bytes at offset 24, it's 40 now. + * If we make it bigger then allocations will be bigger and copy slower, so + * that's probably not useful. */ -#define IWL_HCMD_SCRATCHBUF_SIZE 16 +#define IWL_FIRST_TB_SIZE 40 +#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { struct iwl_device_cmd *cmd; @@ -248,20 +251,18 @@ struct iwl_pcie_txq_entry { struct iwl_cmd_meta meta; }; -struct iwl_pcie_txq_scratch_buf { - struct iwl_cmd_header hdr; - u8 buf[8]; - __le32 scratch; +struct iwl_pcie_first_tb_buf { + u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; }; /** * struct iwl_txq - Tx Queue for DMA * @q: generic Rx/Tx queue descriptor * @tfds: transmit frame descriptors (DMA memory) - * @scratchbufs: start of command headers, including scratch buffers, for + * @first_tb_bufs: start of command headers, including scratch buffers, for * the writeback -- this is DMA memory and an array holding one buffer * for each command on the queue - * @scratchbufs_dma: DMA address for the scratchbufs start + * @first_tb_dma: DMA address for the first_tb_bufs start * @entries: transmit entries (driver state) * @lock: queue lock * @stuck_timer: timer that fires if queue gets stuck @@ -279,8 +280,8 @@ struct iwl_pcie_txq_scratch_buf { struct iwl_txq { struct iwl_queue q; struct iwl_tfd *tfds; - struct iwl_pcie_txq_scratch_buf *scratchbufs; - dma_addr_t scratchbufs_dma; + struct iwl_pcie_first_tb_buf *first_tb_bufs; + dma_addr_t first_tb_dma; struct iwl_pcie_txq_entry *entries; spinlock_t lock; unsigned long frozen_expiry_remainder; @@ -296,10 +297,10 @@ struct iwl_txq { }; static inline dma_addr_t -iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) +iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) { - return txq->scratchbufs_dma + - sizeof(struct iwl_pcie_txq_scratch_buf) * idx; + return txq->first_tb_dma + + sizeof(struct iwl_pcie_first_tb_buf) * idx; } struct iwl_tso_hdr_page { @@ -313,7 +314,6 @@ struct iwl_tso_hdr_page { * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing - * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM * @scd_bc_tbls: pointer to the byte count table of the scheduler @@ -351,7 +351,6 @@ struct iwl_trans_pcie { struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; - struct iwl_drv *drv; struct net_device napi_dev; @@ -385,6 +384,8 @@ struct iwl_trans_pcie { wait_queue_head_t wait_command_queue; wait_queue_head_t d0i3_waitq; + u8 page_offs, dev_cmd_offs; + u8 cmd_queue; u8 cmd_fifo; unsigned int cmd_q_wdg_timeout; @@ -471,6 +472,10 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, unsigned int wdg_timeout); void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd); +void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, + bool shared_mode); +void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, + struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); @@ -690,4 +695,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); +void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index aaaf2ad6e..5c36e6d00 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -161,21 +161,21 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } -static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs, - u64 val) -{ - iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); - iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); -} - /* * iwl_pcie_rx_stop - stops the Rx DMA */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, - FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + if (trans->cfg->mq_rx_supported) { + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + } else { + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, + 1000); + } } /* @@ -211,12 +211,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, if (trans->cfg->mq_rx_supported) iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); - /* - * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to - * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will - * not wake the NIC. - */ - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + else + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) @@ -237,10 +233,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } /* - * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx + * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ -static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; @@ -263,7 +259,7 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); - + rxb->invalid = false; /* 12 first bits are expected to be empty */ WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); /* Point to Rx buffer via next RBD in circular buffer */ @@ -285,10 +281,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, } /* - * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx + * iwl_pcie_rxsq_restock - restock implementation for single queue rx */ -static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; @@ -314,6 +310,7 @@ static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); + rxb->invalid = false; /* Point to Rx buffer via next RBD in circular buffer */ bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); @@ -347,9 +344,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { if (trans->cfg->mq_rx_supported) - iwl_pcie_rxq_mq_restock(trans, rxq); + iwl_pcie_rxmq_restock(trans, rxq); else - iwl_pcie_rxq_sq_restock(trans, rxq); + iwl_pcie_rxsq_restock(trans, rxq); } /* @@ -764,6 +761,23 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } +void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) +{ + /* + * Turn on the chicken-bits that cause MAC wakeup for RX-related + * values. + * This costs some power, but needed for W/A 9000 integrated A-step + * bug where shadow registers are not in the retention list and their + * value is lost when NIC powers down + */ + if (trans->cfg->integrated) { + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, + CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, + CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); + } +} + static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -796,17 +810,17 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) for (i = 0; i < trans->num_rx_queues; i++) { /* Tell device where to find RBD free table in DRAM */ - iwl_pcie_write_prph_64_no_grab(trans, - RFH_Q_FRBDCB_BA_LSB(i), - trans_pcie->rxq[i].bd_dma); + iwl_write_prph64_no_grab(trans, + RFH_Q_FRBDCB_BA_LSB(i), + trans_pcie->rxq[i].bd_dma); /* Tell device where to find RBD used table in DRAM */ - iwl_pcie_write_prph_64_no_grab(trans, - RFH_Q_URBDCB_BA_LSB(i), - trans_pcie->rxq[i].used_bd_dma); + iwl_write_prph64_no_grab(trans, + RFH_Q_URBDCB_BA_LSB(i), + trans_pcie->rxq[i].used_bd_dma); /* Tell device where in DRAM to update its Rx status */ - iwl_pcie_write_prph_64_no_grab(trans, - RFH_Q_URBD_STTS_WPTR_LSB(i), - trans_pcie->rxq[i].rb_stts_dma); + iwl_write_prph64_no_grab(trans, + RFH_Q_URBD_STTS_WPTR_LSB(i), + trans_pcie->rxq[i].rb_stts_dma); /* Reset device indice tables */ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); @@ -815,33 +829,32 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) enabled |= BIT(i) | BIT(i + 16); } - /* restock default queue */ - iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); - /* * Enable Rx DMA - * Single frame mode * Rx buffer size 4 or 8k or 12k * Min RB size 4 or 8 * Drop frames that exceed RB size * 512 RBDs */ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, - RFH_DMA_EN_ENABLE_VAL | - rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | + RFH_DMA_EN_ENABLE_VAL | rb_size | RFH_RXF_DMA_MIN_RB_4_8 | RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_RBDCB_SIZE_512); /* * Activate DMA snooping. - * Set RX DMA chunk size to 64B + * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe * Default queue is 0 */ iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | - RFH_GEN_CFG_SERVICE_DMA_SNOOP); + RFH_GEN_CFG_SERVICE_DMA_SNOOP | + (trans->cfg->integrated ? + RFH_GEN_CFG_RB_CHUNK_SIZE_64 : + RFH_GEN_CFG_RB_CHUNK_SIZE_128) << + RFH_GEN_CFG_RB_CHUNK_SIZE_POS); /* Enable the relevant rx queues */ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); @@ -849,6 +862,8 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + iwl_pcie_enable_rx_wake(trans, true); } static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) @@ -939,16 +954,18 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) else list_add(&rxb->list, &def_rxq->rx_used); trans_pcie->global_table[i] = rxb; - rxb->vid = (u16)i; + rxb->vid = (u16)(i + 1); + rxb->invalid = true; } iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); - if (trans->cfg->mq_rx_supported) { + + if (trans->cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); - } else { - iwl_pcie_rxq_sq_restock(trans, def_rxq); + else iwl_pcie_rx_hw_init(trans, def_rxq); - } + + iwl_pcie_rxq_restock(trans, def_rxq); spin_lock(&def_rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); @@ -1087,6 +1104,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) break; + WARN_ON((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> + FH_RSCSR_RXQ_POS != rxq->id); + IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", rxcb._offset, @@ -1224,10 +1244,19 @@ restart: */ u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; - if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table), - "Invalid rxb index from HW %u\n", (u32)vid)) + if (WARN(!vid || + vid > ARRAY_SIZE(trans_pcie->global_table), + "Invalid rxb index from HW %u\n", (u32)vid)) { + iwl_force_nmi(trans); goto out; - rxb = trans_pcie->global_table[vid]; + } + rxb = trans_pcie->global_table[vid - 1]; + if (WARN(rxb->invalid, + "Invalid rxb from HW %u\n", (u32)vid)) { + iwl_force_nmi(trans); + goto out; + } + rxb->invalid = true; } else { rxb = rxq->queue[i]; rxq->queue[i] = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index d9f139462..74f2f035b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -608,18 +608,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) /* * ucode */ -static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, - dma_addr_t phy_addr, u32 byte_cnt) +static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, + u32 dst_addr, dma_addr_t phy_addr, + u32 byte_cnt) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - unsigned long flags; - int ret; - - trans_pcie->ucode_write_complete = false; - - if (!iwl_trans_grab_nic_access(trans, &flags)) - return -EIO; - iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); @@ -642,7 +634,50 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); +} + +static void iwl_pcie_load_firmware_chunk_tfh(struct iwl_trans *trans, + u32 dst_addr, dma_addr_t phy_addr, + u32 byte_cnt) +{ + /* Stop DMA channel */ + iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, 0); + + /* Configure SRAM address */ + iwl_write32(trans, TFH_SRV_DMA_CHNL0_SRAM_ADDR, + dst_addr); + + /* Configure DRAM address - 64 bit */ + iwl_write64(trans, TFH_SRV_DMA_CHNL0_DRAM_ADDR, phy_addr); + /* Configure byte count to transfer */ + iwl_write32(trans, TFH_SRV_DMA_CHNL0_BC, byte_cnt); + + /* Enable the DRAM2SRAM to start */ + iwl_write32(trans, TFH_SRV_DMA_CHNL0_CTRL, TFH_SRV_DMA_SNOOP | + TFH_SRV_DMA_TO_DRIVER | + TFH_SRV_DMA_START); +} + +static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, + u32 dst_addr, dma_addr_t phy_addr, + u32 byte_cnt) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + int ret; + + trans_pcie->ucode_write_complete = false; + + if (!iwl_trans_grab_nic_access(trans, &flags)) + return -EIO; + + if (trans->cfg->use_tfh) + iwl_pcie_load_firmware_chunk_tfh(trans, dst_addr, phy_addr, + byte_cnt); + else + iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, + byte_cnt); iwl_trans_release_nic_access(trans, &flags); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, @@ -1285,6 +1320,8 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + iwl_pcie_enable_rx_wake(trans, false); + if (reset) { /* * reset TX queues -- some of their registers reset during S3 @@ -1310,6 +1347,8 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } + iwl_pcie_enable_rx_wake(trans, true); + /* * Also enables interrupts - none will happen as the device doesn't * know we're waking it up, only when the opmode actually tells it @@ -1388,8 +1427,12 @@ static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) max_rx_vector = trans_pcie->allocated_vector - 1; - if (!trans_pcie->msix_enabled) + if (!trans_pcie->msix_enabled) { + if (trans->cfg->mq_rx_supported) + iwl_write_prph(trans, UREG_CHICK, + UREG_CHICK_MSI_ENABLE); return; + } iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); @@ -1634,6 +1677,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx; + trans_pcie->page_offs = trans_cfg->cb_data_offs; + trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); + trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; @@ -1904,6 +1950,48 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) #define IWL_FLUSH_WAIT_MS 2000 +void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 scd_sram_addr; + u8 buf[16]; + int cnt; + + IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", + txq->q.read_ptr, txq->q.write_ptr); + + scd_sram_addr = trans_pcie->scd_base_addr + + SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); + iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); + + iwl_print_hex_error(trans, buf, sizeof(buf)); + + for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) + IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, + iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); + + for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { + u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); + u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + u32 tbl_dw = + iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + + SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); + + if (cnt & 0x1) + tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; + else + tbl_dw = tbl_dw & 0x0000FFFF; + + IWL_ERR(trans, + "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", + cnt, active ? "" : "in", fifo, tbl_dw, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & + (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); + } +} + static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1911,8 +1999,6 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) struct iwl_queue *q; int cnt; unsigned long now = jiffies; - u32 scd_sram_addr; - u8 buf[16]; int ret = 0; /* waiting for all the tx frames complete might take a while */ @@ -1952,42 +2038,8 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt); } - if (!ret) - return 0; - - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->q.read_ptr, txq->q.write_ptr); - - scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); - iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt, - iwl_read_direct32(trans, FH_TX_TRB_REG(cnt))); - - for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(cnt)); - - if (cnt & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; - - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - cnt, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) & - (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt))); - } + if (ret) + iwl_trans_pcie_log_scd_error(trans, txq); return ret; } @@ -2736,6 +2788,8 @@ static const struct iwl_trans_ops trans_ops_pcie = { .txq_disable = iwl_trans_pcie_txq_disable, .txq_enable = iwl_trans_pcie_txq_enable, + .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, + .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index d6beac9af..18650dccd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -70,6 +70,7 @@ * Tx queue resumed. * ***************************************************/ + static int iwl_queue_space(const struct iwl_queue *q) { unsigned int max; @@ -154,10 +155,6 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) struct iwl_txq *txq = (void *)data; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); - u32 scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_QUEUE_OFFSET(txq->q.id); - u8 buf[16]; - int i; spin_lock(&txq->lock); /* check if triggered erroneously */ @@ -169,38 +166,8 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, jiffies_to_msecs(txq->wd_timeout)); - IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", - txq->q.read_ptr, txq->q.write_ptr); - - iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (i = 0; i < FH_TCSR_CHNL_NUM; i++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, - iwl_read_direct32(trans, FH_TX_TRB_REG(i))); - - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_trans_read_mem32(trans, - trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(i)); - - if (i & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - i, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(i)) & - (TFD_QUEUE_SIZE_MAX - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); - } + iwl_trans_pcie_log_scd_error(trans, txq); iwl_force_nmi(trans); } @@ -393,7 +360,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, return; } - /* first TB is never freed - it's the scratchbuf data */ + /* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) @@ -491,7 +458,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; - size_t scratchbuf_sz; + size_t tb0_buf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) @@ -526,17 +493,14 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans, if (!txq->tfds) goto error; - BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); - BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != - sizeof(struct iwl_cmd_header) + - offsetof(struct iwl_tx_cmd, scratch)); + BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); - scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num; + tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; - txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz, - &txq->scratchbufs_dma, + txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, + &txq->first_tb_dma, GFP_KERNEL); - if (!txq->scratchbufs) + if (!txq->first_tb_bufs) goto err_free_tfds; txq->q.id = txq_id; @@ -578,22 +542,27 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, * Tell nic where to find circular buffer of Tx Frame Descriptors for * given Tx queue, and enable the DMA channel used for that queue. * Circular buffer (TFD queue in DRAM) physical base address */ - iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); + if (trans->cfg->use_tfh) + iwl_write_direct64(trans, + FH_MEM_CBBC_QUEUE(trans, txq_id), + txq->q.dma_addr); + else + iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), + txq->q.dma_addr >> 8); return 0; } -static void iwl_pcie_free_tso_page(struct sk_buff *skb) +static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, + struct sk_buff *skb) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct page **page_ptr; - if (info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]) { - struct page *page = - info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA]; + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - __free_page(page); - info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = NULL; + if (*page_ptr) { + __free_page(*page_ptr); + *page_ptr = NULL; } } @@ -639,7 +608,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(skb); + iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_txq_free_tfd(trans, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); @@ -708,8 +677,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) txq->tfds = NULL; dma_free_coherent(dev, - sizeof(*txq->scratchbufs) * txq->q.n_window, - txq->scratchbufs, txq->scratchbufs_dma); + sizeof(*txq->first_tb_bufs) * txq->q.n_window, + txq->first_tb_bufs, txq->first_tb_dma); } kfree(txq->entries); @@ -786,9 +755,14 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; txq_id++) { struct iwl_txq *txq = &trans_pcie->txq[txq_id]; - - iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); + if (trans->cfg->use_tfh) + iwl_write_direct64(trans, + FH_MEM_CBBC_QUEUE(trans, txq_id), + txq->q.dma_addr); + else + iwl_write_direct32(trans, + FH_MEM_CBBC_QUEUE(trans, txq_id), + txq->q.dma_addr >> 8); iwl_pcie_txq_unmap(trans, txq_id); txq->q.read_ptr = 0; txq->q.write_ptr = 0; @@ -996,6 +970,12 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } } + if (trans->cfg->use_tfh) + iwl_write_direct32(trans, TFH_TRANSFER_MODE, + TFH_TRANSFER_MAX_PENDING_REQ | + TFH_CHUNK_SIZE_128 | + TFH_CHUNK_SPLIT_MODE); + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, @@ -1084,7 +1064,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(skb); + iwl_pcie_free_tso_page(trans_pcie, skb); __skb_queue_tail(skbs, skb); @@ -1115,17 +1095,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; - struct iwl_device_cmd *dev_cmd = - info->driver_data[dev_cmd_idx]; + struct iwl_device_cmd *dev_cmd_ptr; + + dev_cmd_ptr = *(void **)((u8 *)skb->cb + + trans_pcie->dev_cmd_offs); /* * Note that we can very well be overflowing again. * In that case, iwl_queue_space will be small again * and we won't wake mac80211's queue. */ - iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id); + iwl_trans_pcie_tx(trans, skb, dev_cmd_ptr, txq_id); } spin_lock_bh(&txq->lock); @@ -1354,6 +1334,15 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, txq->active = true; } +void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, + bool shared_mode) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = &trans_pcie->txq[txq_id]; + + txq->ampdu = !shared_mode; +} + void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, bool configure_scd) { @@ -1413,7 +1402,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, void *dup_buf = NULL; dma_addr_t phys_addr; int idx; - u16 copy_size, cmd_size, scratch_size; + u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; @@ -1444,9 +1433,9 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, if (!cmd->len[i]) continue; - /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */ - if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { - int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; + /* need at least IWL_FIRST_TB_SIZE copied */ + if (copy_size < IWL_FIRST_TB_SIZE) { + int copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmdlen[i]) copy = cmdlen[i]; @@ -1567,8 +1556,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } /* - * Otherwise we need at least IWL_HCMD_SCRATCHBUF_SIZE copied - * in total (for the scratchbuf handling), but copy up to what + * Otherwise we need at least IWL_FIRST_TB_SIZE copied + * in total (for bi-directional DMA), but copy up to what * we can fit into the payload for debug dump purposes. */ copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); @@ -1577,8 +1566,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, cmd_pos += copy; /* However, treat copy_size the proper way, we need it below */ - if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) { - copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size; + if (copy_size < IWL_FIRST_TB_SIZE) { + copy = IWL_FIRST_TB_SIZE - copy_size; if (copy > cmd->len[i]) copy = cmd->len[i]; @@ -1593,18 +1582,18 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); - /* start the TFD with the scratchbuf */ - scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE); - memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size); + /* start the TFD with the minimum copy bytes */ + tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); iwl_pcie_txq_build_tfd(trans, txq, - iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr), - scratch_size, true); + iwl_pcie_get_first_tb_dma(txq, idx), + tb0_size, true); /* map first command fragment, if any remains */ - if (copy_size > scratch_size) { + if (copy_size > tb0_size) { phys_addr = dma_map_single(trans->dev, - ((u8 *)&out_cmd->hdr) + scratch_size, - copy_size - scratch_size, + ((u8 *)&out_cmd->hdr) + tb0_size, + copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { iwl_pcie_tfd_unmap(trans, out_meta, @@ -1614,7 +1603,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } iwl_pcie_txq_build_tfd(trans, txq, phys_addr, - copy_size - scratch_size, false); + copy_size - tb0_size, false); } /* map the remaining (adjusted) nocopy/dup fragments */ @@ -1959,7 +1948,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx(trans->dev, skb, &txq->tfds[txq->q.write_ptr], sizeof(struct iwl_tfd), - &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len, skb->len - hdr_len); @@ -2015,7 +2004,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, struct iwl_device_cmd *dev_cmd, u16 tb1_len) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; @@ -2024,6 +2012,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; + struct page **page_ptr; int ret; struct tso_t tso; @@ -2035,7 +2024,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx(trans->dev, skb, &txq->tfds[txq->q.write_ptr], sizeof(struct iwl_tfd), - &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, NULL, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); @@ -2054,7 +2043,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, get_page(hdr_page->page); start_hdr = hdr_page->pos; - info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA] = hdr_page->page; + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; @@ -2264,10 +2254,12 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(q) < 3)) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct iwl_device_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans_pcie->dev_cmd_offs); - info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] = - dev_cmd; + *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); spin_unlock(&txq->lock); @@ -2294,7 +2286,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(q->write_ptr))); - tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr); + tb0_phys = iwl_pcie_get_first_tb_dma(txq, q->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); @@ -2312,7 +2304,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * setup of the first TB) */ len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + - hdr_len - IWL_HCMD_SCRATCHBUF_SIZE; + hdr_len - IWL_FIRST_TB_SIZE; /* do not align A-MSDU to dword as the subframe header aligns it */ amsdu = ieee80211_is_data_qos(fc) && (*ieee80211_get_qos_ctl(hdr) & @@ -2326,17 +2318,17 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb1_len = len; } - /* The first TB points to the scratchbuf data - min_copy bytes */ - memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr, - IWL_HCMD_SCRATCHBUF_SIZE); + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[q->write_ptr], &dev_cmd->hdr, + IWL_FIRST_TB_SIZE); iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, - IWL_HCMD_SCRATCHBUF_SIZE, true); + IWL_FIRST_TB_SIZE, true); /* there must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE; + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) goto out_err; diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c index d0ceb06c7..6d1d08485 100644 --- a/drivers/net/wireless/intersil/orinoco/scan.c +++ b/drivers/net/wireless/intersil/orinoco/scan.c @@ -237,7 +237,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv, scan_abort: if (priv->scan_request) { - cfg80211_scan_done(priv->scan_request, abort); + struct cfg80211_scan_info info = { + .aborted = abort, + }; + + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; } } @@ -245,7 +249,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv, void orinoco_scan_done(struct orinoco_private *priv, bool abort) { if (priv->scan_request) { - cfg80211_scan_done(priv->scan_request, abort); + struct cfg80211_scan_info info = { + .aborted = abort, + }; + + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; } } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4dd5adcdd..8c35ac838 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -30,6 +30,8 @@ #include <linux/module.h> #include <linux/ktime.h> #include <net/genetlink.h> +#include <net/net_namespace.h> +#include <net/netns/generic.h> #include "mac80211_hwsim.h" #define WARN_QUEUE 100 @@ -39,8 +41,6 @@ MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); -static u32 wmediumd_portid; - static int radios = 2; module_param(radios, int, 0444); MODULE_PARM_DESC(radios, "Number of simulated radios"); @@ -250,6 +250,43 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) cp->magic = 0; } +static int hwsim_net_id; + +static int hwsim_netgroup; + +struct hwsim_net { + int netgroup; + u32 wmediumd; +}; + +static inline int hwsim_net_get_netgroup(struct net *net) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + return hwsim_net->netgroup; +} + +static inline void hwsim_net_set_netgroup(struct net *net) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + hwsim_net->netgroup = hwsim_netgroup++; +} + +static inline u32 hwsim_net_get_wmediumd(struct net *net) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + return hwsim_net->wmediumd; +} + +static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid) +{ + struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); + + hwsim_net->wmediumd = portid; +} + static struct class *hwsim_class; static struct net_device *hwsim_mon; /* global monitor netdev */ @@ -420,10 +457,6 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = { { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } }; -static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = { - { .max = 8, .types = BIT(NL80211_IFTYPE_AP) }, -}; - static const struct ieee80211_iface_combination hwsim_if_comb[] = { { .limits = hwsim_if_limits, @@ -431,18 +464,12 @@ static const struct ieee80211_iface_combination hwsim_if_comb[] = { .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1, .max_interfaces = 2048, .num_different_channels = 1, - }, - { - .limits = hwsim_if_dfs_limits, - .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits), - .max_interfaces = 8, - .num_different_channels = 1, .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), - } + }, }; static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { @@ -451,18 +478,12 @@ static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { .n_limits = ARRAY_SIZE(hwsim_if_limits), .max_interfaces = 2048, .num_different_channels = 1, - }, - { - .limits = hwsim_if_dfs_limits, - .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits), - .max_interfaces = 8, - .num_different_channels = 1, .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160), - } + }, }; static spinlock_t hwsim_radio_lock; @@ -526,6 +547,11 @@ struct mac80211_hwsim_data { */ u64 group; + /* group shared by radios created in the same netns */ + int netgroup; + /* wmediumd portid responsible for netgroup of this radio */ + u32 wmediumd; + int power_level; /* difference between this hw's clock and the real clock, in usecs */ @@ -568,6 +594,7 @@ static struct genl_family hwsim_genl_family = { .name = "MAC80211_HWSIM", .version = 1, .maxattr = HWSIM_ATTR_MAX, + .netnsok = true, }; enum hwsim_multicast_groups { @@ -955,6 +982,29 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, return true; } +static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, + struct sk_buff *skb, int portid) +{ + struct net *net; + bool found = false; + int res = -ENOENT; + + rcu_read_lock(); + for_each_net_rcu(net) { + if (data->netgroup == hwsim_net_get_netgroup(net)) { + res = genlmsg_unicast(net, skb, portid); + found = true; + break; + } + } + rcu_read_unlock(); + + if (!found) + nlmsg_free(skb); + + return res; +} + static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, int dst_portid) @@ -1034,7 +1084,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; genlmsg_end(skb, msg_head); - if (genlmsg_unicast(&init_net, skb, dst_portid)) + if (hwsim_unicast_netgroup(data, skb, dst_portid)) goto err_free_txskb; /* Enqueue the packet */ @@ -1202,6 +1252,9 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, if (!(data->group & data2->group)) continue; + if (data->netgroup != data2->netgroup) + continue; + if (!hwsim_chans_compat(chan, data2->tmp_chan) && !hwsim_chans_compat(chan, data2->channel)) { ieee80211_iterate_active_interfaces_atomic( @@ -1324,7 +1377,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ - _portid = ACCESS_ONCE(wmediumd_portid); + _portid = ACCESS_ONCE(data->wmediumd); if (_portid) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); @@ -1420,7 +1473,8 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { - u32 _pid = ACCESS_ONCE(wmediumd_portid); + struct mac80211_hwsim_data *data = hw->priv; + u32 _pid = ACCESS_ONCE(data->wmediumd); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); @@ -1887,8 +1941,12 @@ static void hw_scan_work(struct work_struct *work) mutex_lock(&hwsim->mutex); if (hwsim->scan_chan_idx >= req->n_channels) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + wiphy_debug(hwsim->hw->wiphy, "hw scan complete\n"); - ieee80211_scan_completed(hwsim->hw, false); + ieee80211_scan_completed(hwsim->hw, &info); hwsim->hw_scan_request = NULL; hwsim->hw_scan_vif = NULL; hwsim->tmp_chan = NULL; @@ -1973,13 +2031,16 @@ static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; + struct cfg80211_scan_info info = { + .aborted = true, + }; wiphy_debug(hw->wiphy, "hwsim cancel_hw_scan\n"); cancel_delayed_work_sync(&hwsim->hw_scan); mutex_lock(&hwsim->mutex); - ieee80211_scan_completed(hwsim->hw, true); + ieee80211_scan_completed(hwsim->hw, &info); hwsim->tmp_chan = NULL; hwsim->hw_scan_request = NULL; hwsim->hw_scan_vif = NULL; @@ -2349,6 +2410,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, struct ieee80211_hw *hw; enum nl80211_band band; const struct ieee80211_ops *ops = &mac80211_hwsim_ops; + struct net *net; int idx; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) @@ -2366,6 +2428,13 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, err = -ENOMEM; goto failed; } + + if (info) + net = genl_info_net(info); + else + net = &init_net; + wiphy_net_set(hw->wiphy, net); + data = hw->priv; data->hw = hw; @@ -2409,13 +2478,14 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, hw->wiphy->max_scan_ssids = 255; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->max_remain_on_channel_duration = 1000; - /* For channels > 1 DFS is not allowed */ - hw->wiphy->n_iface_combinations = 1; hw->wiphy->iface_combinations = &data->if_combination; if (param->p2p_device) data->if_combination = hwsim_if_comb_p2p_dev[0]; else data->if_combination = hwsim_if_comb[0]; + hw->wiphy->n_iface_combinations = 1; + /* For channels > 1 DFS is not allowed */ + data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; } else if (param->p2p_device) { hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev; @@ -2541,6 +2611,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, data->group = 1; mutex_init(&data->mutex); + data->netgroup = hwsim_net_get_netgroup(net); + /* Enable frame retransmissions for lossy channels */ hw->max_rates = 4; hw->max_rate_tries = 11; @@ -2755,6 +2827,20 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) return data; } +static void hwsim_register_wmediumd(struct net *net, u32 portid) +{ + struct mac80211_hwsim_data *data; + + hwsim_net_set_wmediumd(net, portid); + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry(data, &hwsim_radios, list) { + if (data->netgroup == hwsim_net_get_netgroup(net)) + data->wmediumd = portid; + } + spin_unlock_bh(&hwsim_radio_lock); +} + static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { @@ -2770,9 +2856,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, int i; bool found = false; - if (info->snd_portid != wmediumd_portid) - return -EINVAL; - if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || !info->attrs[HWSIM_ATTR_FLAGS] || !info->attrs[HWSIM_ATTR_COOKIE] || @@ -2788,6 +2871,12 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, if (!data2) goto out; + if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) + goto out; + + if (info->snd_portid != data2->wmediumd) + goto out; + /* look for the skb matching the cookie passed back from user */ skb_queue_walk_safe(&data2->pending, skb, tmp) { u64 skb_cookie; @@ -2851,9 +2940,6 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, void *frame_data; struct sk_buff *skb = NULL; - if (info->snd_portid != wmediumd_portid) - return -EINVAL; - if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || !info->attrs[HWSIM_ATTR_FRAME] || !info->attrs[HWSIM_ATTR_RX_RATE] || @@ -2879,6 +2965,12 @@ static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, if (!data2) goto out; + if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) + goto out; + + if (info->snd_portid != data2->wmediumd) + goto out; + /* check if radio is configured properly */ if (data2->idle || !data2->started) @@ -2925,6 +3017,7 @@ out: static int hwsim_register_received_nl(struct sk_buff *skb_2, struct genl_info *info) { + struct net *net = genl_info_net(info); struct mac80211_hwsim_data *data; int chans = 1; @@ -2941,10 +3034,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, if (chans > 1) return -EOPNOTSUPP; - if (wmediumd_portid) + if (hwsim_net_get_wmediumd(net)) return -EBUSY; - wmediumd_portid = info->snd_portid; + hwsim_register_wmediumd(net, info->snd_portid); printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " "switching to wmediumd mode with pid %d\n", info->snd_portid); @@ -3014,6 +3107,9 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) continue; } + if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) + continue; + list_del(&data->list); spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), @@ -3040,6 +3136,9 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) if (data->idx != idx) continue; + if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) + continue; + skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) { res = -ENOMEM; @@ -3079,6 +3178,9 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, if (data->idx < idx) continue; + if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk))) + continue; + res = mac80211_hwsim_get_radio(skb, data, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, @@ -3102,7 +3204,7 @@ static const struct genl_ops hwsim_ops[] = { .cmd = HWSIM_CMD_REGISTER, .policy = hwsim_genl_policy, .doit = hwsim_register_received_nl, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_FRAME, @@ -3118,13 +3220,13 @@ static const struct genl_ops hwsim_ops[] = { .cmd = HWSIM_CMD_NEW_RADIO, .policy = hwsim_genl_policy, .doit = hwsim_new_radio_nl, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_DEL_RADIO, .policy = hwsim_genl_policy, .doit = hwsim_del_radio_nl, - .flags = GENL_ADMIN_PERM, + .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_GET_RADIO, @@ -3168,10 +3270,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, remove_user_radios(notify->portid); - if (notify->portid == wmediumd_portid) { + if (notify->portid == hwsim_net_get_wmediumd(notify->net)) { printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" " socket, switching to perfect channel medium\n"); - wmediumd_portid = 0; + hwsim_register_wmediumd(notify->net, 0); } return NOTIFY_DONE; @@ -3206,6 +3308,40 @@ failure: return -EINVAL; } +static __net_init int hwsim_init_net(struct net *net) +{ + hwsim_net_set_netgroup(net); + + return 0; +} + +static void __net_exit hwsim_exit_net(struct net *net) +{ + struct mac80211_hwsim_data *data, *tmp; + + spin_lock_bh(&hwsim_radio_lock); + list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { + if (!net_eq(wiphy_net(data->hw->wiphy), net)) + continue; + + /* Radios created in init_net are returned to init_net. */ + if (data->netgroup == hwsim_net_get_netgroup(&init_net)) + continue; + + list_del(&data->list); + INIT_WORK(&data->destroy_work, destroy_radio); + schedule_work(&data->destroy_work); + } + spin_unlock_bh(&hwsim_radio_lock); +} + +static struct pernet_operations hwsim_net_ops = { + .init = hwsim_init_net, + .exit = hwsim_exit_net, + .id = &hwsim_net_id, + .size = sizeof(struct hwsim_net), +}; + static void hwsim_exit_netlink(void) { /* unregister the notifier */ @@ -3242,10 +3378,14 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); INIT_LIST_HEAD(&hwsim_radios); - err = platform_driver_register(&mac80211_hwsim_driver); + err = register_pernet_device(&hwsim_net_ops); if (err) return err; + err = platform_driver_register(&mac80211_hwsim_driver); + if (err) + goto out_unregister_pernet; + hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); if (IS_ERR(hwsim_class)) { err = PTR_ERR(hwsim_class); @@ -3363,6 +3503,8 @@ out_free_radios: mac80211_hwsim_free(); out_unregister_driver: platform_driver_unregister(&mac80211_hwsim_driver); +out_unregister_pernet: + unregister_pernet_device(&hwsim_net_ops); return err; } module_init(init_mac80211_hwsim); @@ -3376,5 +3518,6 @@ static void __exit exit_mac80211_hwsim(void) mac80211_hwsim_free(); unregister_netdev(hwsim_mon); platform_driver_unregister(&mac80211_hwsim_driver); + unregister_pernet_device(&hwsim_net_ops); } module_exit(exit_mac80211_hwsim); diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 776b44bfd..7ff2efadc 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -796,10 +796,15 @@ void lbs_scan_done(struct lbs_private *priv) { WARN_ON(!priv->scan_req); - if (priv->internal_scan) + if (priv->internal_scan) { kfree(priv->scan_req); - else - cfg80211_scan_done(priv->scan_req, false); + } else { + struct cfg80211_scan_info info = { + .aborted = false, + }; + + cfg80211_scan_done(priv->scan_req, &info); + } priv->scan_req = NULL; } @@ -2039,8 +2044,8 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) -int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, - bool enabled, int timeout) +static int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout) { struct lbs_private *priv = wiphy_priv(wiphy); diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index c95bf6dc9..c753e36c2 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -27,6 +27,8 @@ void lbs_mac_event_disconnected(struct lbs_private *priv, bool locally_generated) { + unsigned long flags; + if (priv->connect_status != LBS_CONNECTED) return; @@ -46,9 +48,11 @@ void lbs_mac_event_disconnected(struct lbs_private *priv, netif_carrier_off(priv->dev); /* Free Tx and Rx packets */ + spin_lock_irqsave(&priv->driver_lock, flags); kfree_skb(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; + spin_unlock_irqrestore(&priv->driver_lock, flags); priv->connect_status = LBS_DISCONNECTED; diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 5b6cf1a7b..7fd8b68e5 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1215,7 +1215,7 @@ static int if_sdio_probe(struct sdio_func *func, } spin_lock_init(&card->lock); - card->workqueue = create_workqueue("libertas_sdio"); + card->workqueue = alloc_workqueue("libertas_sdio", WQ_MEM_RECLAIM, 0); INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); init_waitqueue_head(&card->pwron_waitq); @@ -1313,7 +1313,6 @@ static void if_sdio_remove(struct sdio_func *func) lbs_stop_card(card->priv); lbs_remove_card(card->priv); - flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); while (card->packets) { diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index d51293895..e1ee7e68e 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -1172,7 +1172,7 @@ static int if_spi_probe(struct spi_device *spi) priv->fw_ready = 1; /* Initialize interrupt handling stuff. */ - card->workqueue = create_workqueue("libertas_spi"); + card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0); INIT_WORK(&card->packet_work, if_spi_host_to_card_worker); INIT_WORK(&card->resume_work, if_spi_resume_worker); @@ -1200,7 +1200,6 @@ static int if_spi_probe(struct spi_device *spi) release_irq: free_irq(spi->irq, card); terminate_workqueue: - flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); lbs_remove_card(priv); /* will call free_netdev */ free_card: @@ -1227,7 +1226,6 @@ static int libertas_spi_remove(struct spi_device *spi) lbs_remove_card(priv); /* will call free_netdev */ free_irq(spi->irq, card); - flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); if (card->pdata->teardown) card->pdata->teardown(spi); diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 0bf8916a0..54e426c1e 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -16,7 +16,6 @@ #include <linux/module.h> #include "libertas_tf.h" -#define DRIVER_RELEASE_VERSION "004.p0" /* thinfirm version: 5.132.X.pX */ #define LBTF_FW_VER_MIN 0x05840300 #define LBTF_FW_VER_MAX 0x0584ffff @@ -27,12 +26,6 @@ unsigned int lbtf_debug; EXPORT_SYMBOL_GPL(lbtf_debug); module_param_named(libertas_tf_debug, lbtf_debug, int, 0644); -static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION -#ifdef DEBUG - "-dbg" -#endif - ""; - struct workqueue_struct *lbtf_wq; static const struct ieee80211_channel lbtf_channels[] = { @@ -742,7 +735,7 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent); static int __init lbtf_init_module(void) { lbtf_deb_enter(LBTF_DEB_MAIN); - lbtf_wq = create_workqueue("libertastf"); + lbtf_wq = alloc_workqueue("libertastf", WQ_MEM_RECLAIM, 0); if (lbtf_wq == NULL) { printk(KERN_ERR "libertastf: couldn't create workqueue\n"); return -ENOMEM; diff --git a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c index 1efef3b82..c47d63668 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/marvell/mwifiex/11n_aggr.c @@ -184,7 +184,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, tx_info_src = MWIFIEX_SKB_TXCB(skb_src); skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size, - GFP_ATOMIC | GFP_DMA); + GFP_ATOMIC); if (!skb_aggr) { spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); @@ -205,7 +205,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, do { /* Check if AMSDU can accommodate this MSDU */ - if (skb_tailroom(skb_aggr) < (skb_src->len + LLC_SNAP_LEN)) + if ((skb_aggr->len + skb_src->len + LLC_SNAP_LEN) > + adapter->tx_buf_size) break; skb_src = skb_dequeue(&pra_list->skb_head); diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index ff948a922..a8ff969c9 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -377,6 +377,29 @@ mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, } /* + * CFG802.11 operation handler to get Tx power. + */ +static int +mwifiex_cfg80211_get_tx_power(struct wiphy *wiphy, + struct wireless_dev *wdev, + int *dbm) +{ + struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); + struct mwifiex_private *priv = mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_ANY); + int ret = mwifiex_send_cmd(priv, HostCmd_CMD_RF_TX_PWR, + HostCmd_ACT_GEN_GET, 0, NULL, true); + + if (ret < 0) + return ret; + + /* tx_power_level is set in HostCmd_CMD_RF_TX_PWR command handler */ + *dbm = priv->tx_power_level; + + return 0; +} + +/* * CFG802.11 operation handler to set Power Save option. * * The timeout value, if provided, is currently ignored. @@ -1672,6 +1695,9 @@ static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy, struct cfg80211_beacon_data *data) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct mwifiex_adapter *adapter = priv->adapter; + + mwifiex_cancel_scan(adapter); if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_UAP) { mwifiex_dbg(priv->adapter, ERROR, @@ -1804,6 +1830,21 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) HostCmd_ACT_GEN_SET, 0, &ant_cfg, true); } +static int +mwifiex_cfg80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant) +{ + struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); + struct mwifiex_private *priv = mwifiex_get_priv(adapter, + MWIFIEX_BSS_ROLE_ANY); + mwifiex_send_cmd(priv, HostCmd_CMD_RF_ANTENNA, + HostCmd_ACT_GEN_GET, 0, NULL, true); + + *tx_ant = priv->tx_ant; + *rx_ant = priv->rx_ant; + + return 0; +} + /* cfg80211 operation handler for stop ap. * Function stops BSS running at uAP interface. */ @@ -1895,10 +1936,9 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, mwifiex_set_uap_rates(bss_cfg, params); if (mwifiex_set_secure_params(priv, bss_cfg, params)) { - kfree(bss_cfg); mwifiex_dbg(priv->adapter, ERROR, "Failed to parse secuirty parameters!\n"); - return -1; + goto out; } mwifiex_set_ht_params(priv, bss_cfg, params); @@ -1927,7 +1967,7 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, if (mwifiex_11h_activate(priv, false)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to disable 11h extensions!!"); - return -1; + goto out; } priv->state_11h.is_11h_active = false; } @@ -1935,12 +1975,11 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, if (mwifiex_config_start_uap(priv, bss_cfg)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start AP\n"); - kfree(bss_cfg); - return -1; + goto out; } if (mwifiex_set_mgmt_ies(priv, ¶ms->beacon)) - return -1; + goto out; if (!netif_carrier_ok(priv->netdev)) netif_carrier_on(priv->netdev); @@ -1949,6 +1988,10 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, memcpy(&priv->bss_cfg, bss_cfg, sizeof(priv->bss_cfg)); kfree(bss_cfg); return 0; + +out: + kfree(bss_cfg); + return -1; } /* @@ -2209,6 +2252,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, return -EALREADY; } + if (priv->scan_block) + priv->scan_block = false; + if (adapter->surprise_removed || adapter->is_cmd_timedout) { mwifiex_dbg(adapter, ERROR, "%s: Ignore connection.\t" @@ -2427,6 +2473,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } + if (!priv->wdev.current_bss && priv->scan_block) + priv->scan_block = false; + if (!mwifiex_stop_bg_scan(priv)) cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); @@ -2734,6 +2783,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, struct mwifiex_private *priv; struct net_device *dev; void *mdev_priv; + int ret; if (!adapter) return ERR_PTR(-EFAULT); @@ -2859,6 +2909,15 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, mwifiex_init_priv_params(priv, dev); priv->netdev = dev; + ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, + HostCmd_ACT_GEN_SET, 0, NULL, true); + if (ret) + return ERR_PTR(ret); + + ret = mwifiex_sta_init_cmd(priv, false, false); + if (ret) + return ERR_PTR(ret); + mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv); if (adapter->is_hw_11ac_capable) mwifiex_setup_vht_caps( @@ -3262,7 +3321,10 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, struct mwifiex_ds_hs_cfg hs_cfg; int i, ret = 0, retry_num = 10; struct mwifiex_private *priv; + struct mwifiex_private *sta_priv = + mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + sta_priv->scan_aborting = true; for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; mwifiex_abort_cac(priv); @@ -3291,21 +3353,21 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, if (!wowlan) { mwifiex_dbg(adapter, ERROR, "None of the WOWLAN triggers enabled\n"); - return 0; + ret = 0; + goto done; } - priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); - - if (!priv->media_connected && !wowlan->nd_config) { + if (!sta_priv->media_connected && !wowlan->nd_config) { mwifiex_dbg(adapter, ERROR, "Can not configure WOWLAN in disconnected state\n"); - return 0; + ret = 0; + goto done; } - ret = mwifiex_set_mef_filter(priv, wowlan); + ret = mwifiex_set_mef_filter(sta_priv, wowlan); if (ret) { mwifiex_dbg(adapter, ERROR, "Failed to set MEF filter\n"); - return ret; + goto done; } memset(&hs_cfg, 0, sizeof(hs_cfg)); @@ -3314,26 +3376,25 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, if (wowlan->nd_config) { mwifiex_dbg(adapter, INFO, "Wake on net detect\n"); hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; - mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev, + mwifiex_cfg80211_sched_scan_start(wiphy, sta_priv->netdev, wowlan->nd_config); } if (wowlan->disconnect) { hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; - mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n"); + mwifiex_dbg(sta_priv->adapter, INFO, "Wake on device disconnect\n"); } hs_cfg.is_invoke_hostcmd = false; hs_cfg.gpio = adapter->hs_cfg.gpio; hs_cfg.gap = adapter->hs_cfg.gap; - ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, + ret = mwifiex_set_hs_params(sta_priv, HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hs_cfg); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "Failed to set HS params\n"); - return ret; - } + if (ret) + mwifiex_dbg(adapter, ERROR, "Failed to set HS params\n"); +done: + sta_priv->scan_aborting = false; return ret; } @@ -3940,12 +4001,14 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .set_default_key = mwifiex_cfg80211_set_default_key, .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, .set_tx_power = mwifiex_cfg80211_set_tx_power, + .get_tx_power = mwifiex_cfg80211_get_tx_power, .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask, .start_ap = mwifiex_cfg80211_start_ap, .stop_ap = mwifiex_cfg80211_stop_ap, .change_beacon = mwifiex_cfg80211_change_beacon, .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, .set_antenna = mwifiex_cfg80211_set_antenna, + .get_antenna = mwifiex_cfg80211_get_antenna, .del_station = mwifiex_cfg80211_del_station, .sched_scan_start = mwifiex_cfg80211_sched_scan_start, .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop, diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 6bc2011d8..c29f26d8b 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1020,8 +1020,6 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL, *tmp_node; unsigned long flags, cmd_flags; - struct mwifiex_private *priv; - int i; spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); /* Cancel current cmd */ @@ -1046,23 +1044,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags); spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); - mwifiex_cancel_pending_scan_cmd(adapter); - - if (adapter->scan_processing) { - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); - adapter->scan_processing = false; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (!priv) - continue; - if (priv->scan_request) { - mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } - } - } + mwifiex_cancel_scan(adapter); } /* @@ -1080,8 +1062,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL; unsigned long cmd_flags; - struct mwifiex_private *priv; - int i; if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) { @@ -1101,23 +1081,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) mwifiex_recycle_cmd_node(adapter, cmd_node); } - mwifiex_cancel_pending_scan_cmd(adapter); - - if (adapter->scan_processing) { - spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); - adapter->scan_processing = false; - spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (!priv) - continue; - if (priv->scan_request) { - mwifiex_dbg(adapter, WARN, "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } - } - } + mwifiex_cancel_scan(adapter); } /* diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 8e4145abd..5596b6be1 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -462,6 +462,9 @@ enum P2P_MODES { #define HostCmd_ACT_SET_RX 0x0001 #define HostCmd_ACT_SET_TX 0x0002 #define HostCmd_ACT_SET_BOTH 0x0003 +#define HostCmd_ACT_GET_RX 0x0004 +#define HostCmd_ACT_GET_TX 0x0008 +#define HostCmd_ACT_GET_BOTH 0x000c #define RF_ANTENNA_AUTO 0xFFFF @@ -1958,8 +1961,8 @@ struct mwifiex_ie_types_btcoex_scan_time { struct mwifiex_ie_types_header header; u8 coex_scan; u8 reserved; - u16 min_scan_time; - u16 max_scan_time; + __le16 min_scan_time; + __le16 max_scan_time; } __packed; struct mwifiex_ie_types_btcoex_aggr_win_size { diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 78c532f0d..1489c9019 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -60,7 +60,7 @@ static void wakeup_timer_fn(unsigned long data) adapter->hw_status = MWIFIEX_HW_STATUS_RESET; mwifiex_cancel_all_pending_cmd(adapter); - if (adapter->if_ops.card_reset) + if (adapter->if_ops.card_reset && !adapter->hs_activated) adapter->if_ops.card_reset(adapter); } @@ -110,6 +110,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv) priv->tx_power_level = 0; priv->max_tx_power_level = 0; priv->min_tx_power_level = 0; + priv->tx_ant = 0; + priv->rx_ant = 0; priv->tx_rate = 0; priv->rxpd_htinfo = 0; priv->rxpd_rate = 0; @@ -788,3 +790,4 @@ poll_fw: return ret; } +EXPORT_SYMBOL_GPL(mwifiex_dnld_fw); diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index a5a48c183..70429815f 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -83,6 +83,8 @@ struct wep_key { #define MWIFIEX_AUTH_MODE_AUTO 0xFF #define BAND_CONFIG_BG 0x00 #define BAND_CONFIG_A 0x01 +#define MWIFIEX_SEC_CHAN_BELOW 0x30 +#define MWIFIEX_SEC_CHAN_ABOVE 0x10 #define MWIFIEX_SUPPORTED_RATES 14 #define MWIFIEX_SUPPORTED_RATES_EXT 32 #define MWIFIEX_TDLS_SUPPORTED_RATES 8 @@ -341,16 +343,16 @@ enum { }; struct mwifiex_ds_reg_rw { - __le32 type; - __le32 offset; - __le32 value; + u32 type; + u32 offset; + u32 value; }; #define MAX_EEPROM_DATA 256 struct mwifiex_ds_read_eeprom { - __le16 offset; - __le16 byte_count; + u16 offset; + u16 byte_count; u8 value[MAX_EEPROM_DATA]; }; diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index 62211fca9..1c7b00630 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -647,6 +647,12 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, const u8 *ie_ptr; struct ieee80211_ht_operation *assoc_resp_ht_oper; + if (!priv->attempted_bss_desc) { + mwifiex_dbg(priv->adapter, ERROR, + "ASSOC_RESP: failed, association terminated by host\n"); + goto done; + } + assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; cap_info = le16_to_cpu(assoc_rsp->cap_info_bitmap); @@ -1270,6 +1276,12 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, u16 cmd = le16_to_cpu(resp->command); u8 result; + if (!priv->attempted_bss_desc) { + mwifiex_dbg(priv->adapter, ERROR, + "ADHOC_RESP: failed, association terminated by host\n"); + goto done; + } + if (cmd == HostCmd_CMD_802_11_AD_HOC_START) result = start_result->result; else @@ -1281,7 +1293,7 @@ int mwifiex_ret_802_11_ad_hoc(struct mwifiex_private *priv, if (result) { mwifiex_dbg(priv->adapter, ERROR, "ADHOC_RESP: failed\n"); if (priv->media_connected) - mwifiex_reset_connect_state(priv, result); + mwifiex_reset_connect_state(priv, result, true); memset(&priv->curr_bss_params.bss_descriptor, 0x00, sizeof(struct mwifiex_bssdescriptor)); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 4dde1ae35..1afd6a94c 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -526,10 +526,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context) fw.fw_buf = (u8 *) adapter->firmware->data; fw.fw_len = adapter->firmware->size; - if (adapter->if_ops.dnld_fw) + if (adapter->if_ops.dnld_fw) { ret = adapter->if_ops.dnld_fw(adapter, &fw); - else + } else { ret = mwifiex_dnld_fw(adapter, &fw); + } + if (ret == -1) goto err_dnld_fw; @@ -695,9 +697,13 @@ mwifiex_close(struct net_device *dev) struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->scan_request) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + mwifiex_dbg(priv->adapter, INFO, "aborting scan on ndo_stop\n"); - cfg80211_scan_done(priv->scan_request, 1); + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; priv->scan_aborting = true; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 0207af00b..9f6bb400b 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -533,6 +533,8 @@ struct mwifiex_private { u16 tx_power_level; u8 max_tx_power_level; u8 min_tx_power_level; + u32 tx_ant; + u32 rx_ant; u8 tx_rate; u8 tx_htinfo; u8 rxpd_htinfo; @@ -1054,6 +1056,7 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); +void mwifiex_cancel_scan(struct mwifiex_adapter *adapter); void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); @@ -1128,7 +1131,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc); int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); -void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason); +void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason, + bool from_ap); u8 mwifiex_band_to_radio_type(u8 band); int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac); void mwifiex_deauthenticate_all(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 0c7937eb6..453ab6ad4 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -202,7 +202,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, if (mwifiex_add_card(card, &add_remove_card_sem, &pcie_ops, MWIFIEX_PCIE)) { pr_err("%s failed\n", __func__); - kfree(card); return -1; } @@ -440,6 +439,11 @@ static int mwifiex_pcie_disable_host_int(struct mwifiex_adapter *adapter) return 0; } +static void mwifiex_pcie_disable_host_int_noerr(struct mwifiex_adapter *adapter) +{ + WARN_ON(mwifiex_pcie_disable_host_int(adapter)); +} + /* * This function enables the host interrupt. * @@ -507,7 +511,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter) for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { /* Allocate skb here so that firmware can DMA data from it */ skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!skb) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb for RX ring.\n"); @@ -1319,7 +1323,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) } skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, - GFP_KERNEL | GFP_DMA); + GFP_KERNEL); if (!skb_tmp) { mwifiex_dbg(adapter, ERROR, "Unable to allocate skb.\n"); @@ -1612,6 +1616,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) pkt_len = *((__le16 *)skb->data); rx_len = le16_to_cpu(pkt_len); + skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len); skb_trim(skb, rx_len); skb_pull(skb, INTF_HEADER_LEN); @@ -2086,6 +2091,13 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, unsigned long flags; struct pcie_service_card *card = adapter->card; + if (card->msi_enable) { + spin_lock_irqsave(&adapter->int_lock, flags); + adapter->int_status = 1; + spin_unlock_irqrestore(&adapter->int_lock, flags); + return; + } + if (!mwifiex_pcie_ok_to_access_hw(adapter)) return; @@ -2187,15 +2199,44 @@ exit: static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) { int ret; - u32 pcie_ireg; + u32 pcie_ireg = 0; unsigned long flags; + struct pcie_service_card *card = adapter->card; spin_lock_irqsave(&adapter->int_lock, flags); - /* Clear out unused interrupts */ - pcie_ireg = adapter->int_status; + if (!card->msi_enable) { + /* Clear out unused interrupts */ + pcie_ireg = adapter->int_status; + } adapter->int_status = 0; spin_unlock_irqrestore(&adapter->int_lock, flags); + if (card->msi_enable) { + if (mwifiex_pcie_ok_to_access_hw(adapter)) { + if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, + &pcie_ireg)) { + mwifiex_dbg(adapter, ERROR, + "Read register failed\n"); + return -1; + } + + if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { + if (mwifiex_write_reg(adapter, + PCIE_HOST_INT_STATUS, + ~pcie_ireg)) { + mwifiex_dbg(adapter, ERROR, + "Write register failed\n"); + return -1; + } + if (!adapter->pps_uapsd_mode && + adapter->ps_state == PS_STATE_SLEEP) { + adapter->ps_state = PS_STATE_AWAKE; + adapter->pm_wakeup_fw_try = false; + del_timer(&adapter->wakeup_timer); + } + } + } + } while (pcie_ireg & HOST_INTR_MASK) { if (pcie_ireg & HOST_INTR_DNLD_DONE) { pcie_ireg &= ~HOST_INTR_DNLD_DONE; @@ -2235,6 +2276,12 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) return ret; } + if (card->msi_enable) { + spin_lock_irqsave(&adapter->int_lock, flags); + adapter->int_status = 0; + spin_unlock_irqrestore(&adapter->int_lock, flags); + } + if (mwifiex_pcie_ok_to_access_hw(adapter)) { if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { @@ -2254,11 +2301,17 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) } } + if (!card->msi_enable) { + spin_lock_irqsave(&adapter->int_lock, flags); + pcie_ireg |= adapter->int_status; + adapter->int_status = 0; + spin_unlock_irqrestore(&adapter->int_lock, flags); + } } mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); - if (adapter->ps_state != PS_STATE_SLEEP) + if (!card->msi_enable && adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); return 0; @@ -2796,7 +2849,6 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) "MRVL_PCIE", &card->share_irq_ctx); if (ret) { pr_err("request_irq failed: ret=%d\n", ret); - adapter->card = NULL; return -1; } @@ -2804,7 +2856,7 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) } /* - * This function get firmare name for downloading by revision id + * This function gets the firmware name for downloading by revision id * * Read revision id register to get revision id */ @@ -2841,20 +2893,20 @@ static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter) version &= 0x7; switch (revision_id) { case PCIE8997_V2: - if (version == CHIP_VER_PCIEUSB) + if (version == CHIP_VER_PCIEUART) strcpy(adapter->fw_name, - PCIEUSB8997_FW_NAME_V2); + PCIEUART8997_FW_NAME_V2); else strcpy(adapter->fw_name, - PCIEUART8997_FW_NAME_V2); + PCIEUSB8997_FW_NAME_V2); break; case PCIE8997_Z: - if (version == CHIP_VER_PCIEUSB) + if (version == CHIP_VER_PCIEUART) strcpy(adapter->fw_name, - PCIEUSB8997_FW_NAME_Z); + PCIEUART8997_FW_NAME_Z); else strcpy(adapter->fw_name, - PCIEUART8997_FW_NAME_Z); + PCIEUSB8997_FW_NAME_Z); break; default: strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME); @@ -2901,10 +2953,11 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg; - struct pci_dev *pdev = card->dev; + struct pci_dev *pdev; int i; if (card) { + pdev = card->dev; if (card->msix_enable) { for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) synchronize_irq(card->msix_entries[i].vector); @@ -2945,6 +2998,7 @@ static struct mwifiex_if_ops pcie_ops = { .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, .enable_int = mwifiex_pcie_enable_host_int, + .disable_int = mwifiex_pcie_disable_host_int_noerr, .process_int_status = mwifiex_process_int_status, .host_to_card = mwifiex_pcie_host_to_card, .wakeup = mwifiex_pm_wakeup_card, diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index db18ac6f5..609588400 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -48,7 +48,7 @@ #define PCIE8897_B0 0x1200 #define PCIE8997_Z 0x0 #define PCIE8997_V2 0x471 -#define CHIP_VER_PCIEUSB 0x2 +#define CHIP_VER_PCIEUART 0x3 /* Constants for Buffer Descriptor (BD) rings */ #define MWIFIEX_MAX_TXRX_BD 0x20 @@ -258,7 +258,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .fw_dump_end = 0xcff, .fw_dump_host_ready = 0xcc, .fw_dump_read_done = 0xdd, - .msix_support = 1, + .msix_support = 0, }; static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = { diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index bc5e52ceb..21ec84794 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1896,7 +1896,8 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv) u8 id = 0; struct mwifiex_user_scan_cfg *user_scan_cfg; - if (adapter->active_scan_triggered || !priv->scan_request) { + if (adapter->active_scan_triggered || !priv->scan_request || + priv->scan_aborting) { adapter->active_scan_triggered = false; return 0; } @@ -1956,10 +1957,15 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) mwifiex_complete_scan(priv); if (priv->scan_request) { + struct cfg80211_scan_info info = { + .aborted = false, + }; + mwifiex_dbg(adapter, INFO, "info: notifying scan done\n"); - cfg80211_scan_done(priv->scan_request, 0); + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; + priv->scan_aborting = false; } else { priv->scan_aborting = false; mwifiex_dbg(adapter, INFO, @@ -1977,10 +1983,15 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) if (!adapter->active_scan_triggered) { if (priv->scan_request) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + mwifiex_dbg(adapter, INFO, "info: aborting scan\n"); - cfg80211_scan_done(priv->scan_request, 1); + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; + priv->scan_aborting = false; } else { priv->scan_aborting = false; mwifiex_dbg(adapter, INFO, @@ -2001,6 +2012,37 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) return; } +void mwifiex_cancel_scan(struct mwifiex_adapter *adapter) +{ + struct mwifiex_private *priv; + unsigned long cmd_flags; + int i; + + mwifiex_cancel_pending_scan_cmd(adapter); + + if (adapter->scan_processing) { + spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags); + adapter->scan_processing = false; + spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags); + for (i = 0; i < adapter->priv_num; i++) { + priv = adapter->priv[i]; + if (!priv) + continue; + if (priv->scan_request) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + + mwifiex_dbg(adapter, INFO, + "info: aborting scan\n"); + cfg80211_scan_done(priv->scan_request, &info); + priv->scan_request = NULL; + priv->scan_aborting = false; + } + } + } +} + /* * This function handles the command response of scan. * diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index e757dfc51..f49bfdb7f 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -102,10 +102,9 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card) struct mwifiex_plt_wake_cfg *cfg; int ret; - if (!dev->of_node || - !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { - dev_err(dev, "sdio platform data not available\n"); - return -1; + if (!of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) { + dev_err(dev, "required compatible string missing\n"); + return -EINVAL; } card->plt_of_node = dev->of_node; @@ -115,7 +114,7 @@ static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card) if (cfg && card->plt_of_node) { cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0); if (!cfg->irq_wifi) { - dev_err(dev, + dev_dbg(dev, "fail to parse irq_wifi from device tree\n"); } else { ret = devm_request_irq(dev, cfg->irq_wifi, @@ -183,24 +182,35 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) sdio_release_host(func); if (ret) { - pr_err("%s: failed to enable function\n", __func__); - kfree(card); - return -EIO; + dev_err(&func->dev, "failed to enable function\n"); + goto err_free; } /* device tree node parsing and platform specific configuration*/ - mwifiex_sdio_probe_of(&func->dev, card); - - if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, - MWIFIEX_SDIO)) { - pr_err("%s: add card failed\n", __func__); - kfree(card); - sdio_claim_host(func); - ret = sdio_disable_func(func); - sdio_release_host(func); - ret = -1; + if (func->dev.of_node) { + ret = mwifiex_sdio_probe_of(&func->dev, card); + if (ret) { + dev_err(&func->dev, "SDIO dt node parse failed\n"); + goto err_disable; + } + } + + ret = mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops, + MWIFIEX_SDIO); + if (ret) { + dev_err(&func->dev, "add card failed\n"); + goto err_disable; } + return 0; + +err_disable: + sdio_claim_host(func); + sdio_disable_func(func); + sdio_release_host(func); +err_free: + kfree(card); + return ret; } @@ -544,6 +554,19 @@ static int mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter) return mwifiex_write_reg(adapter, CONFIGURATION_REG, 0); } +static int mwifiex_sdio_dnld_fw(struct mwifiex_adapter *adapter, + struct mwifiex_fw_image *fw) +{ + struct sdio_mmc_card *card = adapter->card; + int ret; + + sdio_claim_host(card->func); + ret = mwifiex_dnld_fw(adapter, fw); + sdio_release_host(card->func); + + return ret; +} + /* * This function is used to initialize IO ports for the * chipsets supporting SDIO new mode eg SD8897. @@ -1492,7 +1515,7 @@ rx_curr_single: mwifiex_dbg(adapter, INFO, "info: RX: port: %d, rx_len: %d\n", port, rx_len); - skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); + skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL); if (!skb) { mwifiex_dbg(adapter, ERROR, "single skb allocated fail,\t" @@ -1597,7 +1620,7 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); mwifiex_dbg(adapter, INFO, "info: rx_len = %d\n", rx_len); - skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); + skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL); if (!skb) return -1; @@ -2732,6 +2755,7 @@ static struct mwifiex_if_ops sdio_ops = { .cleanup_mpa_buf = mwifiex_cleanup_mpa_buf, .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, .event_complete = mwifiex_sdio_event_complete, + .dnld_fw = mwifiex_sdio_dnld_fw, .card_reset = mwifiex_sdio_card_reset, .reg_dump = mwifiex_sdio_reg_dump, .device_dump = mwifiex_sdio_device_dump, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index e436574b1..7897037b0 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -313,23 +313,41 @@ static int mwifiex_cmd_rf_antenna(struct mwifiex_private *priv, cmd->command = cpu_to_le16(HostCmd_CMD_RF_ANTENNA); - if (cmd_action != HostCmd_ACT_GEN_SET) - return 0; - - if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) { - cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_mimo) + - S_DS_GEN); - ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX); - ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant); - ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX); - ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg->rx_ant); - } else { - cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_siso) + - S_DS_GEN); - ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH); - ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant); + switch (cmd_action) { + case HostCmd_ACT_GEN_SET: + if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) { + cmd->size = cpu_to_le16(sizeof(struct + host_cmd_ds_rf_ant_mimo) + + S_DS_GEN); + ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX); + ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg-> + tx_ant); + ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX); + ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg-> + rx_ant); + } else { + cmd->size = cpu_to_le16(sizeof(struct + host_cmd_ds_rf_ant_siso) + + S_DS_GEN); + ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH); + ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant); + } + break; + case HostCmd_ACT_GEN_GET: + if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) { + cmd->size = cpu_to_le16(sizeof(struct + host_cmd_ds_rf_ant_mimo) + + S_DS_GEN); + ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_GET_TX); + ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_GET_RX); + } else { + cmd->size = cpu_to_le16(sizeof(struct + host_cmd_ds_rf_ant_siso) + + S_DS_GEN); + ant_siso->action = cpu_to_le16(HostCmd_ACT_GET_BOTH); + } + break; } - return 0; } @@ -1130,9 +1148,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd, cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN); mac_reg = &cmd->params.mac_reg; mac_reg->action = cpu_to_le16(cmd_action); - mac_reg->offset = - cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); - mac_reg->value = reg_rw->value; + mac_reg->offset = cpu_to_le16((u16) reg_rw->offset); + mac_reg->value = cpu_to_le32(reg_rw->value); break; } case HostCmd_CMD_BBP_REG_ACCESS: @@ -1142,9 +1159,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd, cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN); bbp_reg = &cmd->params.bbp_reg; bbp_reg->action = cpu_to_le16(cmd_action); - bbp_reg->offset = - cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); - bbp_reg->value = (u8) le32_to_cpu(reg_rw->value); + bbp_reg->offset = cpu_to_le16((u16) reg_rw->offset); + bbp_reg->value = (u8) reg_rw->value; break; } case HostCmd_CMD_RF_REG_ACCESS: @@ -1154,8 +1170,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd, cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN); rf_reg = &cmd->params.rf_reg; rf_reg->action = cpu_to_le16(cmd_action); - rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); - rf_reg->value = (u8) le32_to_cpu(reg_rw->value); + rf_reg->offset = cpu_to_le16((u16) reg_rw->offset); + rf_reg->value = (u8) reg_rw->value; break; } case HostCmd_CMD_PMIC_REG_ACCESS: @@ -1165,9 +1181,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd, cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN); pmic_reg = &cmd->params.pmic_reg; pmic_reg->action = cpu_to_le16(cmd_action); - pmic_reg->offset = - cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); - pmic_reg->value = (u8) le32_to_cpu(reg_rw->value); + pmic_reg->offset = cpu_to_le16((u16) reg_rw->offset); + pmic_reg->value = (u8) reg_rw->value; break; } case HostCmd_CMD_CAU_REG_ACCESS: @@ -1177,9 +1192,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd, cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN); cau_reg = &cmd->params.rf_reg; cau_reg->action = cpu_to_le16(cmd_action); - cau_reg->offset = - cpu_to_le16((u16) le32_to_cpu(reg_rw->offset)); - cau_reg->value = (u8) le32_to_cpu(reg_rw->value); + cau_reg->offset = cpu_to_le16((u16) reg_rw->offset); + cau_reg->value = (u8) reg_rw->value; break; } case HostCmd_CMD_802_11_EEPROM_ACCESS: @@ -1190,8 +1204,8 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd, cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN); cmd_eeprom->action = cpu_to_le16(cmd_action); - cmd_eeprom->offset = rd_eeprom->offset; - cmd_eeprom->byte_count = rd_eeprom->byte_count; + cmd_eeprom->offset = cpu_to_le16(rd_eeprom->offset); + cmd_eeprom->byte_count = cpu_to_le16(rd_eeprom->byte_count); cmd_eeprom->value = 0; break; } diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index d18c7979d..ccf54932e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -469,7 +469,9 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv, struct host_cmd_ds_rf_ant_siso *ant_siso = &resp->params.ant_siso; struct mwifiex_adapter *adapter = priv->adapter; - if (adapter->hw_dev_mcs_support == HT_STREAM_2X2) + if (adapter->hw_dev_mcs_support == HT_STREAM_2X2) { + priv->tx_ant = le16_to_cpu(ant_mimo->tx_ant_mode); + priv->rx_ant = le16_to_cpu(ant_mimo->rx_ant_mode); mwifiex_dbg(adapter, INFO, "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x\t" "Rx action = 0x%x, Rx Mode = 0x%04x\n", @@ -477,12 +479,14 @@ static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv, le16_to_cpu(ant_mimo->tx_ant_mode), le16_to_cpu(ant_mimo->action_rx), le16_to_cpu(ant_mimo->rx_ant_mode)); - else + } else { + priv->tx_ant = le16_to_cpu(ant_siso->ant_mode); + priv->rx_ant = le16_to_cpu(ant_siso->ant_mode); mwifiex_dbg(adapter, INFO, "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n", le16_to_cpu(ant_siso->action), le16_to_cpu(ant_siso->ant_mode)); - + } return 0; } @@ -553,7 +557,8 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, if (!memcmp(resp->params.deauth.mac_addr, &priv->curr_bss_params.bss_descriptor.mac_address, sizeof(resp->params.deauth.mac_addr))) - mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); + mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, + false); return 0; } @@ -566,7 +571,7 @@ static int mwifiex_ret_802_11_deauthenticate(struct mwifiex_private *priv, static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { - mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING); + mwifiex_reset_connect_state(priv, WLAN_REASON_DEAUTH_LEAVING, false); return 0; } @@ -781,45 +786,44 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp, switch (type) { case HostCmd_CMD_MAC_REG_ACCESS: r.mac = &resp->params.mac_reg; - reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset)); - reg_rw->value = r.mac->value; + reg_rw->offset = (u32) le16_to_cpu(r.mac->offset); + reg_rw->value = le32_to_cpu(r.mac->value); break; case HostCmd_CMD_BBP_REG_ACCESS: r.bbp = &resp->params.bbp_reg; - reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset)); - reg_rw->value = cpu_to_le32((u32) r.bbp->value); + reg_rw->offset = (u32) le16_to_cpu(r.bbp->offset); + reg_rw->value = (u32) r.bbp->value; break; case HostCmd_CMD_RF_REG_ACCESS: r.rf = &resp->params.rf_reg; - reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset)); - reg_rw->value = cpu_to_le32((u32) r.bbp->value); + reg_rw->offset = (u32) le16_to_cpu(r.rf->offset); + reg_rw->value = (u32) r.bbp->value; break; case HostCmd_CMD_PMIC_REG_ACCESS: r.pmic = &resp->params.pmic_reg; - reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset)); - reg_rw->value = cpu_to_le32((u32) r.pmic->value); + reg_rw->offset = (u32) le16_to_cpu(r.pmic->offset); + reg_rw->value = (u32) r.pmic->value; break; case HostCmd_CMD_CAU_REG_ACCESS: r.rf = &resp->params.rf_reg; - reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset)); - reg_rw->value = cpu_to_le32((u32) r.rf->value); + reg_rw->offset = (u32) le16_to_cpu(r.rf->offset); + reg_rw->value = (u32) r.rf->value; break; case HostCmd_CMD_802_11_EEPROM_ACCESS: r.eeprom = &resp->params.eeprom; - pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count); - if (le16_to_cpu(eeprom->byte_count) < - le16_to_cpu(r.eeprom->byte_count)) { - eeprom->byte_count = cpu_to_le16(0); + pr_debug("info: EEPROM read len=%x\n", + le16_to_cpu(r.eeprom->byte_count)); + if (eeprom->byte_count < le16_to_cpu(r.eeprom->byte_count)) { + eeprom->byte_count = 0; pr_debug("info: EEPROM read length is too big\n"); return -1; } - eeprom->offset = r.eeprom->offset; - eeprom->byte_count = r.eeprom->byte_count; - if (le16_to_cpu(eeprom->byte_count) > 0) + eeprom->offset = le16_to_cpu(r.eeprom->offset); + eeprom->byte_count = le16_to_cpu(r.eeprom->byte_count); + if (eeprom->byte_count > 0) memcpy(&eeprom->value, &r.eeprom->value, - le16_to_cpu(r.eeprom->byte_count)); - + min((u16)MAX_EEPROM_DATA, eeprom->byte_count)); break; default: return -1; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 0104108b4..a422f3306 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -40,8 +40,8 @@ * - Erases current SSID and BSSID information * - Sends a disconnect event to upper layers/applications. */ -void -mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) +void mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code, + bool from_ap) { struct mwifiex_adapter *adapter = priv->adapter; @@ -140,7 +140,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) if (priv->bss_mode == NL80211_IFTYPE_STATION || priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) { cfg80211_disconnected(priv->netdev, reason_code, NULL, 0, - false, GFP_KERNEL); + !from_ap, GFP_KERNEL); } eth_zero_addr(priv->cfg_bssid); @@ -474,8 +474,8 @@ void mwifiex_bt_coex_wlan_param_update_event(struct mwifiex_private *priv, scantlv = (struct mwifiex_ie_types_btcoex_scan_time *)tlv; adapter->coex_scan = scantlv->coex_scan; - adapter->coex_min_scan_time = scantlv->min_scan_time; - adapter->coex_max_scan_time = scantlv->max_scan_time; + adapter->coex_min_scan_time = le16_to_cpu(scantlv->min_scan_time); + adapter->coex_max_scan_time = le16_to_cpu(scantlv->max_scan_time); break; default: @@ -574,7 +574,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = le16_to_cpu(*(__le16 *)adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -589,7 +589,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = le16_to_cpu(*(__le16 *)adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -599,7 +599,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) if (priv->media_connected) { reason_code = le16_to_cpu(*(__le16 *)adapter->event_body); - mwifiex_reset_connect_state(priv, reason_code); + mwifiex_reset_connect_state(priv, reason_code, true); } break; @@ -708,7 +708,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_EXT_SCAN_REPORT: mwifiex_dbg(adapter, EVENT, "event: EXT_SCAN Report\n"); - if (adapter->ext_scan) + if (adapter->ext_scan && !priv->scan_aborting) ret = mwifiex_handle_event_ext_scan_report(priv, adapter->event_skb->data); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 8e0862657..e06647a32 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -426,6 +426,10 @@ done: if (bss_desc) kfree(bss_desc->beacon_buf); kfree(bss_desc); + + if (ret < 0) + priv->attempted_bss_desc = NULL; + return ret; } @@ -1247,7 +1251,7 @@ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv, { u16 cmd_no; - switch (le32_to_cpu(reg_rw->type)) { + switch (reg_rw->type) { case MWIFIEX_REG_MAC: cmd_no = HostCmd_CMD_MAC_REG_ACCESS; break; @@ -1282,9 +1286,9 @@ mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type, { struct mwifiex_ds_reg_rw reg_rw; - reg_rw.type = cpu_to_le32(reg_type); - reg_rw.offset = cpu_to_le32(reg_offset); - reg_rw.value = cpu_to_le32(reg_value); + reg_rw.type = reg_type; + reg_rw.offset = reg_offset; + reg_rw.value = reg_value; return mwifiex_reg_mem_ioctl_reg_rw(priv, ®_rw, HostCmd_ACT_GEN_SET); } @@ -1302,14 +1306,14 @@ mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type, int ret; struct mwifiex_ds_reg_rw reg_rw; - reg_rw.type = cpu_to_le32(reg_type); - reg_rw.offset = cpu_to_le32(reg_offset); + reg_rw.type = reg_type; + reg_rw.offset = reg_offset; ret = mwifiex_reg_mem_ioctl_reg_rw(priv, ®_rw, HostCmd_ACT_GEN_GET); if (ret) goto done; - *value = le32_to_cpu(reg_rw.value); + *value = reg_rw.value; done: return ret; @@ -1328,15 +1332,16 @@ mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes, int ret; struct mwifiex_ds_read_eeprom rd_eeprom; - rd_eeprom.offset = cpu_to_le16((u16) offset); - rd_eeprom.byte_count = cpu_to_le16((u16) bytes); + rd_eeprom.offset = offset; + rd_eeprom.byte_count = bytes; /* Send request to firmware */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_EEPROM_ACCESS, HostCmd_ACT_GEN_GET, 0, &rd_eeprom, true); if (!ret) - memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA); + memcpy(value, rd_eeprom.value, min((u16)MAX_EEPROM_DATA, + rd_eeprom.byte_count)); return ret; } diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index f79d00d1e..a7e9f544f 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -19,6 +19,7 @@ #include "main.h" #include "11ac.h" +#include "11n.h" /* This function parses security related parameters from cfg80211_ap_settings * and sets into FW understandable bss_config structure. @@ -521,9 +522,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size) tlv += sizeof(struct host_cmd_tlv_rates) + i; } if (bss_cfg->channel && - ((bss_cfg->band_cfg == BAND_CONFIG_BG && + (((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_BG && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) || - (bss_cfg->band_cfg == BAND_CONFIG_A && + ((bss_cfg->band_cfg & BIT(0)) == BAND_CONFIG_A && bss_cfg->channel <= MAX_CHANNEL_BAND_A))) { chan_band = (struct host_cmd_tlv_channel_band *)tlv; chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST); @@ -833,6 +834,31 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, config_bands |= BAND_AAC; } + switch (chandef.width) { + case NL80211_CHAN_WIDTH_5: + case NL80211_CHAN_WIDTH_10: + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + break; + case NL80211_CHAN_WIDTH_40: + if (chandef.center_freq1 < chandef.chan->center_freq) + bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_BELOW; + else + bss_cfg->band_cfg |= MWIFIEX_SEC_CHAN_ABOVE; + break; + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + bss_cfg->band_cfg |= + mwifiex_get_sec_chan_offset(bss_cfg->channel) << 4; + break; + default: + mwifiex_dbg(priv->adapter, + WARN, "Unknown channel width: %d\n", + chandef.width); + break; + } + priv->adapter->config_bands = config_bands; if (old_bands != config_bands) { diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c index 666e91af5..bf5660eb2 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c @@ -272,7 +272,7 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv, int mwifiex_uap_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb) { - struct mwifiex_adapter *adapter = adapter; + struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_sta_node *src_node; struct ethhdr *p_ethhdr; struct sk_buff *skb_uap; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 870c9cd5c..4341d5680 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -135,7 +135,8 @@ struct rtl8xxxu_rxdesc16 { u32 seq:12; u32 frag:4; - u32 nextpktlen:14; + u32 pkt_cnt:8; + u32 reserved:6; u32 nextind:1; u32 reserved0:1; @@ -198,7 +199,8 @@ struct rtl8xxxu_rxdesc16 { u32 reserved0:1; u32 nextind:1; - u32 nextpktlen:14; + u32 reserved:6; + u32 pkt_cnt:8; u32 frag:4; u32 seq:12; @@ -1245,6 +1247,7 @@ struct rtl8xxxu_priv { u32 ep_tx_normal_queue:1; u32 ep_tx_low_queue:1; u32 has_xtalk:1; + u32 rx_buf_aggregation:1; u8 xtalk; unsigned int pipe_interrupt; unsigned int pipe_in; @@ -1315,8 +1318,7 @@ struct rtl8xxxu_fileops { void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); void (*config_channel) (struct ieee80211_hw *hw); - int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb, - struct ieee80211_rx_status *rx_status); + int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb); void (*init_aggregation) (struct rtl8xxxu_priv *priv); void (*init_statistics) (struct rtl8xxxu_priv *priv); void (*enable_rf) (struct rtl8xxxu_priv *priv); @@ -1329,6 +1331,7 @@ struct rtl8xxxu_fileops { void (*report_connect) (struct rtl8xxxu_priv *priv, u8 macid, bool connect); int writeN_block_size; + int rx_agg_buf_size; char tx_desc_size; char rx_desc_size; char has_s0s1; @@ -1409,13 +1412,12 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, u8 macid, bool connect); void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, u8 macid, bool connect); +void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv); void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv); void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv); void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv); -int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb, - struct ieee80211_rx_status *rx_status); -int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb, - struct ieee80211_rx_status *rx_status); +int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb); +int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb); int rtl8xxxu_gen2_channel_to_group(int channel); bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv, int result[][8], int c1, int c2); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c index 08066dec1..1bb6ba015 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c @@ -413,13 +413,8 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) dev_info(&priv->udev->dev, "%s: dumping efuse (0x%02zx bytes):\n", __func__, sizeof(struct rtl8192cu_efuse)); - for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) { - dev_info(&priv->udev->dev, "%02x: " - "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, - raw[i], raw[i + 1], raw[i + 2], - raw[i + 3], raw[i + 4], raw[i + 5], - raw[i + 6], raw[i + 7]); - } + for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) + dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]); } return 0; } @@ -565,6 +560,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = { .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, + .init_aggregation = rtl8xxxu_gen1_init_aggregation, .enable_rf = rtl8xxxu_gen1_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, @@ -572,6 +568,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = { .update_rate_mask = rtl8xxxu_update_rate_mask, .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 128, + .rx_agg_buf_size = 16000, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16), .adda_1t_init = 0x0b1b25a0, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 9461ecd31..70e7c42cc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -622,13 +622,8 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) dev_info(&priv->udev->dev, "%s: dumping efuse (0x%02zx bytes):\n", __func__, sizeof(struct rtl8192eu_efuse)); - for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) { - dev_info(&priv->udev->dev, "%02x: " - "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, - raw[i], raw[i + 1], raw[i + 2], - raw[i + 3], raw[i + 4], raw[i + 5], - raw[i + 6], raw[i + 7]); - } + for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) + dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]); } return 0; } @@ -1249,11 +1244,9 @@ static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) reg_e94 = result[i][0]; reg_e9c = result[i][1]; reg_ea4 = result[i][2]; - reg_eac = result[i][3]; reg_eb4 = result[i][4]; reg_ebc = result[i][5]; reg_ec4 = result[i][6]; - reg_ecc = result[i][7]; } if (candidate >= 0) { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c index cd6bf209d..52688d5cd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c @@ -377,6 +377,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = { .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate, .config_channel = rtl8xxxu_gen1_config_channel, .parse_rx_desc = rtl8xxxu_parse_rxdesc16, + .init_aggregation = rtl8xxxu_gen1_init_aggregation, .enable_rf = rtl8xxxu_gen1_enable_rf, .disable_rf = rtl8xxxu_gen1_disable_rf, .usb_quirks = rtl8xxxu_gen1_usb_quirks, @@ -384,6 +385,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = { .update_rate_mask = rtl8xxxu_update_rate_mask, .report_connect = rtl8xxxu_gen1_report_connect, .writeN_block_size = 1024, + .rx_agg_buf_size = 16000, .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32), .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16), .adda_1t_init = 0x0b1b25a0, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c index 6f42f0a16..1348819f6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c @@ -466,13 +466,8 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) dev_info(&priv->udev->dev, "%s: dumping efuse (0x%02zx bytes):\n", __func__, sizeof(struct rtl8723bu_efuse)); - for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) { - dev_info(&priv->udev->dev, "%02x: " - "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, - raw[i], raw[i + 1], raw[i + 2], - raw[i + 3], raw[i + 4], raw[i + 5], - raw[i + 6], raw[i + 7]); - } + for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) + dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]); } return 0; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 05a1ff26f..4b56f3921 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -44,6 +44,9 @@ int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE; static bool rtl8xxxu_ht40_2g; +static bool rtl8xxxu_dma_aggregation; +static int rtl8xxxu_dma_agg_timeout = -1; +static int rtl8xxxu_dma_agg_pages = -1; MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>"); MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver"); @@ -54,10 +57,14 @@ module_param_named(debug, rtl8xxxu_debug, int, 0600); MODULE_PARM_DESC(debug, "Set debug mask"); module_param_named(ht40_2g, rtl8xxxu_ht40_2g, bool, 0600); MODULE_PARM_DESC(ht40_2g, "Enable HT40 support on the 2.4GHz band"); +module_param_named(dma_aggregation, rtl8xxxu_dma_aggregation, bool, 0600); +MODULE_PARM_DESC(dma_aggregation, "Enable DMA packet aggregation"); +module_param_named(dma_agg_timeout, rtl8xxxu_dma_agg_timeout, int, 0600); +MODULE_PARM_DESC(dma_agg_timeout, "Set DMA aggregation timeout (range 1-127)"); +module_param_named(dma_agg_pages, rtl8xxxu_dma_agg_pages, int, 0600); +MODULE_PARM_DESC(dma_agg_pages, "Set DMA aggregation pages (range 1-127, 0 to disable)"); #define USB_VENDOR_ID_REALTEK 0x0bda -/* Minimum IEEE80211_MAX_FRAME_LEN */ -#define RTL_RX_BUFFER_SIZE IEEE80211_MAX_FRAME_LEN #define RTL8XXXU_RX_URBS 32 #define RTL8XXXU_RX_URB_PENDING_WATER 8 #define RTL8XXXU_TX_URBS 64 @@ -4399,6 +4406,73 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); } +void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) +{ + u8 agg_ctrl, usb_spec, page_thresh, timeout; + + usb_spec = rtl8xxxu_read8(priv, REG_USB_SPECIAL_OPTION); + usb_spec &= ~USB_SPEC_USB_AGG_ENABLE; + rtl8xxxu_write8(priv, REG_USB_SPECIAL_OPTION, usb_spec); + + agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); + agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; + + if (!rtl8xxxu_dma_aggregation) { + rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); + return; + } + + agg_ctrl |= TRXDMA_CTRL_RXDMA_AGG_EN; + rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); + + /* + * The number of packets we can take looks to be buffer size / 512 + * which matches the 512 byte rounding we have to do when de-muxing + * the packets. + * + * Sample numbers from the vendor driver: + * USB High-Speed mode values: + * RxAggBlockCount = 8 : 512 byte unit + * RxAggBlockTimeout = 6 + * RxAggPageCount = 48 : 128 byte unit + * RxAggPageTimeout = 4 or 6 (absolute time 34ms/(2^6)) + */ + + page_thresh = (priv->fops->rx_agg_buf_size / 512); + if (rtl8xxxu_dma_agg_pages >= 0) { + if (rtl8xxxu_dma_agg_pages <= page_thresh) + timeout = page_thresh; + else if (rtl8xxxu_dma_agg_pages <= 6) + dev_err(&priv->udev->dev, + "%s: dma_agg_pages=%i too small, minium is 6\n", + __func__, rtl8xxxu_dma_agg_pages); + else + dev_err(&priv->udev->dev, + "%s: dma_agg_pages=%i larger than limit %i\n", + __func__, rtl8xxxu_dma_agg_pages, page_thresh); + } + rtl8xxxu_write8(priv, REG_RXDMA_AGG_PG_TH, page_thresh); + /* + * REG_RXDMA_AGG_PG_TH + 1 seems to be the timeout register on + * gen2 chips and rtl8188eu. The rtl8723au seems unhappy if we + * don't set it, so better set both. + */ + timeout = 4; + + if (rtl8xxxu_dma_agg_timeout >= 0) { + if (rtl8xxxu_dma_agg_timeout <= 127) + timeout = rtl8xxxu_dma_agg_timeout; + else + dev_err(&priv->udev->dev, + "%s: Invalid dma_agg_timeout: %i\n", + __func__, rtl8xxxu_dma_agg_timeout); + } + + rtl8xxxu_write8(priv, REG_RXDMA_AGG_PG_TH + 1, timeout); + rtl8xxxu_write8(priv, REG_USB_DMA_AGG_TO, timeout); + priv->rx_buf_aggregation = 1; +} + static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) { u32 val32; @@ -5037,55 +5111,143 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work) } } -int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb, - struct ieee80211_rx_status *rx_status) +static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, + struct sk_buff *skb) +{ + struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data; + struct device *dev = &priv->udev->dev; + int len; + + len = skb->len - 2; + + dev_dbg(dev, "C2H ID %02x seq %02x, len %02x source %02x\n", + c2h->id, c2h->seq, len, c2h->bt_info.response_source); + + switch(c2h->id) { + case C2H_8723B_BT_INFO: + if (c2h->bt_info.response_source > + BT_INFO_SRC_8723B_BT_ACTIVE_SEND) + dev_dbg(dev, "C2H_BT_INFO WiFi only firmware\n"); + else + dev_dbg(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n"); + + if (c2h->bt_info.bt_has_reset) + dev_dbg(dev, "BT has been reset\n"); + if (c2h->bt_info.tx_rx_mask) + dev_dbg(dev, "BT TRx mask\n"); + + break; + case C2H_8723B_BT_MP_INFO: + dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n", + c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); + break; + case C2H_8723B_RA_REPORT: + dev_dbg(dev, + "C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n", + c2h->ra_report.rate, c2h->ra_report.dummy0_0, + c2h->ra_report.macid, c2h->ra_report.noisy_state); + break; + default: + dev_info(dev, "Unhandled C2H event %02x seq %02x\n", + c2h->id, c2h->seq); + print_hex_dump(KERN_INFO, "C2H content: ", DUMP_PREFIX_NONE, + 16, 1, c2h->raw.payload, len, false); + break; + } +} + +int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb) { - struct rtl8xxxu_rxdesc16 *rx_desc = - (struct rtl8xxxu_rxdesc16 *)skb->data; + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_rx_status *rx_status; + struct rtl8xxxu_rxdesc16 *rx_desc; struct rtl8723au_phy_stats *phy_stats; - __le32 *_rx_desc_le = (__le32 *)skb->data; - u32 *_rx_desc = (u32 *)skb->data; + struct sk_buff *next_skb = NULL; + __le32 *_rx_desc_le; + u32 *_rx_desc; int drvinfo_sz, desc_shift; - int i; + int i, pkt_cnt, pkt_len, urb_len, pkt_offset; - for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++) - _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); + urb_len = skb->len; + pkt_cnt = 0; - skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16)); + do { + rx_desc = (struct rtl8xxxu_rxdesc16 *)skb->data; + _rx_desc_le = (__le32 *)skb->data; + _rx_desc = (u32 *)skb->data; - phy_stats = (struct rtl8723au_phy_stats *)skb->data; + for (i = 0; + i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++) + _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); - drvinfo_sz = rx_desc->drvinfo_sz * 8; - desc_shift = rx_desc->shift; - skb_pull(skb, drvinfo_sz + desc_shift); + /* + * Only read pkt_cnt from the header if we're parsing the + * first packet + */ + if (!pkt_cnt) + pkt_cnt = rx_desc->pkt_cnt; + pkt_len = rx_desc->pktlen; - if (rx_desc->phy_stats) - rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, - rx_desc->rxmcs); + drvinfo_sz = rx_desc->drvinfo_sz * 8; + desc_shift = rx_desc->shift; + pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift + + sizeof(struct rtl8xxxu_rxdesc16), 128); - rx_status->mactime = le32_to_cpu(rx_desc->tsfl); - rx_status->flag |= RX_FLAG_MACTIME_START; + if (pkt_cnt > 1) + next_skb = skb_clone(skb, GFP_ATOMIC); - if (!rx_desc->swdec) - rx_status->flag |= RX_FLAG_DECRYPTED; - if (rx_desc->crc32) - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (rx_desc->bw) - rx_status->flag |= RX_FLAG_40MHZ; + rx_status = IEEE80211_SKB_RXCB(skb); + memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); - if (rx_desc->rxht) { - rx_status->flag |= RX_FLAG_HT; - rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; - } else { - rx_status->rate_idx = rx_desc->rxmcs; - } + skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16)); + + phy_stats = (struct rtl8723au_phy_stats *)skb->data; + + skb_pull(skb, drvinfo_sz + desc_shift); + + skb_trim(skb, pkt_len); + + if (rx_desc->phy_stats) + rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, + rx_desc->rxmcs); + + rx_status->mactime = le32_to_cpu(rx_desc->tsfl); + rx_status->flag |= RX_FLAG_MACTIME_START; + + if (!rx_desc->swdec) + rx_status->flag |= RX_FLAG_DECRYPTED; + if (rx_desc->crc32) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_desc->bw) + rx_status->flag |= RX_FLAG_40MHZ; + + if (rx_desc->rxht) { + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; + } else { + rx_status->rate_idx = rx_desc->rxmcs; + } + + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; + + ieee80211_rx_irqsafe(hw, skb); + + skb = next_skb; + if (skb) + skb_pull(next_skb, pkt_offset); + + pkt_cnt--; + urb_len -= pkt_offset; + } while (skb && urb_len > 0 && pkt_cnt > 0); return RX_TYPE_DATA_PKT; } -int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb, - struct ieee80211_rx_status *rx_status) +int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb) { + struct ieee80211_hw *hw = priv->hw; + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct rtl8xxxu_rxdesc24 *rx_desc = (struct rtl8xxxu_rxdesc24 *)skb->data; struct rtl8723au_phy_stats *phy_stats; @@ -5097,6 +5259,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb, for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++) _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); + memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); + skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24)); phy_stats = (struct rtl8723au_phy_stats *)skb->data; @@ -5108,6 +5272,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb, if (rx_desc->rpt_sel) { struct device *dev = &priv->udev->dev; dev_dbg(dev, "%s: C2H packet\n", __func__); + rtl8723bu_handle_c2h(priv, skb); + dev_kfree_skb(skb); return RX_TYPE_C2H; } @@ -5132,52 +5298,11 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb, rx_status->rate_idx = rx_desc->rxmcs; } - return RX_TYPE_DATA_PKT; -} - -static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, - struct sk_buff *skb) -{ - struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data; - struct device *dev = &priv->udev->dev; - int len; - - len = skb->len - 2; - - dev_dbg(dev, "C2H ID %02x seq %02x, len %02x source %02x\n", - c2h->id, c2h->seq, len, c2h->bt_info.response_source); - - switch(c2h->id) { - case C2H_8723B_BT_INFO: - if (c2h->bt_info.response_source > - BT_INFO_SRC_8723B_BT_ACTIVE_SEND) - dev_dbg(dev, "C2H_BT_INFO WiFi only firmware\n"); - else - dev_dbg(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n"); - - if (c2h->bt_info.bt_has_reset) - dev_dbg(dev, "BT has been reset\n"); - if (c2h->bt_info.tx_rx_mask) - dev_dbg(dev, "BT TRx mask\n"); + rx_status->freq = hw->conf.chandef.chan->center_freq; + rx_status->band = hw->conf.chandef.chan->band; - break; - case C2H_8723B_BT_MP_INFO: - dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n", - c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); - break; - case C2H_8723B_RA_REPORT: - dev_dbg(dev, - "C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n", - c2h->ra_report.rate, c2h->ra_report.dummy0_0, - c2h->ra_report.macid, c2h->ra_report.noisy_state); - break; - default: - dev_info(dev, "Unhandled C2H event %02x seq %02x\n", - c2h->id, c2h->seq); - print_hex_dump(KERN_INFO, "C2H content: ", DUMP_PREFIX_NONE, - 16, 1, c2h->raw.payload, len, false); - break; - } + ieee80211_rx_irqsafe(hw, skb); + return RX_TYPE_DATA_PKT; } static void rtl8xxxu_rx_complete(struct urb *urb) @@ -5187,26 +5312,12 @@ static void rtl8xxxu_rx_complete(struct urb *urb) struct ieee80211_hw *hw = rx_urb->hw; struct rtl8xxxu_priv *priv = hw->priv; struct sk_buff *skb = (struct sk_buff *)urb->context; - struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct device *dev = &priv->udev->dev; - int rx_type; skb_put(skb, urb->actual_length); if (urb->status == 0) { - memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); - - rx_type = priv->fops->parse_rx_desc(priv, skb, rx_status); - - rx_status->freq = hw->conf.chandef.chan->center_freq; - rx_status->band = hw->conf.chandef.chan->band; - - if (rx_type == RX_TYPE_DATA_PKT) - ieee80211_rx_irqsafe(hw, skb); - else { - rtl8723bu_handle_c2h(priv, skb); - dev_kfree_skb(skb); - } + priv->fops->parse_rx_desc(priv, skb); skb = NULL; rx_urb->urb.context = NULL; @@ -5226,12 +5337,20 @@ cleanup: static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv, struct rtl8xxxu_rx_urb *rx_urb) { + struct rtl8xxxu_fileops *fops = priv->fops; struct sk_buff *skb; int skb_size; int ret, rx_desc_sz; - rx_desc_sz = priv->fops->rx_desc_size; - skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE; + rx_desc_sz = fops->rx_desc_size; + + if (priv->rx_buf_aggregation && fops->rx_agg_buf_size) { + skb_size = fops->rx_agg_buf_size; + skb_size += (rx_desc_sz + sizeof(struct rtl8723au_phy_stats)); + } else { + skb_size = IEEE80211_MAX_FRAME_LEN; + } + skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -5259,7 +5378,7 @@ static void rtl8xxxu_int_complete(struct urb *urb) if (ret) usb_unanchor_urb(urb); } else { - dev_info(dev, "%s: Error %i\n", __func__, urb->status); + dev_dbg(dev, "%s: Error %i\n", __func__, urb->status); } } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index b0e0c6423..921c5653f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -405,7 +405,11 @@ #define REG_DWBCN1_CTRL_8723B 0x0228 /* 0x0280 ~ 0x02FF RXDMA Configuration */ -#define REG_RXDMA_AGG_PG_TH 0x0280 +#define REG_RXDMA_AGG_PG_TH 0x0280 /* 0-7 : USB DMA size bits + 8-14: USB DMA timeout + 15 : Aggregation enable + Only seems to be used + on 8723bu/8192eu */ #define RXDMA_USB_AGG_ENABLE BIT(31) #define REG_RXPKT_NUM 0x0284 #define RXPKT_NUM_RXDMA_IDLE BIT(17) @@ -1052,10 +1056,14 @@ #define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */ #define REG_USB_SPECIAL_OPTION 0xfe55 +#define USB_SPEC_USB_AGG_ENABLE BIT(3) /* Enable USB aggregation */ +#define USB_SPEC_INT_BULK_SELECT BIT(4) /* Use interrupt endpoint to + deliver interrupt packet. + 0: Use int, 1: use bulk */ #define REG_USB_HRPWM 0xfe58 #define REG_USB_DMA_AGG_TO 0xfe5b -#define REG_USB_AGG_TO 0xfe5c -#define REG_USB_AGG_TH 0xfe5d +#define REG_USB_AGG_TIMEOUT 0xfe5c +#define REG_USB_AGG_THRESH 0xfe5d #define REG_NORMAL_SIE_VID 0xfe60 /* 0xfe60 - 0xfe61 */ #define REG_NORMAL_SIE_PID 0xfe62 /* 0xfe62 - 0xfe63 */ diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index b660c214d..91cc1397b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -901,7 +901,7 @@ void exhalbtc_stack_update_profile_info(void) { } -void exhalbtc_update_min_bt_rssi(char bt_rssi) +void exhalbtc_update_min_bt_rssi(s8 bt_rssi) { struct btc_coexist *btcoexist = &gl_bt_coexist; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h index 3cbe34c53..3d308ebbe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h @@ -433,7 +433,7 @@ struct btc_stack_info { u8 num_of_hid; bool pan_exist; bool unknown_acl_exist; - char min_bt_rssi; + s8 min_bt_rssi; }; struct btc_statistics { @@ -537,7 +537,7 @@ void exhalbtc_dbg_control(struct btc_coexist *btcoexist, u8 code, u8 len, void exhalbtc_stack_update_profile_info(void); void exhalbtc_set_hci_version(u16 hci_version); void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version); -void exhalbtc_update_min_bt_rssi(char bt_rssi); +void exhalbtc_update_min_bt_rssi(s8 bt_rssi); void exhalbtc_set_bt_exist(bool bt_exist); void exhalbtc_set_chip_type(u8 chip_type); void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num); diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index e6e81296c..9698cc583 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -526,7 +526,7 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw, /* 3. calculate crc */ rtl_pattern.crc = _calculate_wol_pattern_crc(content, len); RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE, - "CRC_Remainder = 0x%x", rtl_pattern.crc); + "CRC_Remainder = 0x%x\n", rtl_pattern.crc); /* 4. write crc & mask_for_hw to hw */ rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i); diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c index fd25abad2..33905bbac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.c +++ b/drivers/net/wireless/realtek/rtlwifi/debug.c @@ -48,3 +48,28 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw) /*Init Debug flag enable condition */ } EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init); + +#ifdef CONFIG_RTLWIFI_DEBUG +void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, + const char *modname, const char *fmt, ...) +{ + if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) && + (level <= rtlpriv->dbg.global_debuglevel))) { + struct va_format vaf; + va_list args; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV", + modname, __builtin_return_address(0), + in_interrupt(), in_atomic(), + &vaf); + + va_end(args); + } +} +EXPORT_SYMBOL_GPL(_rtl_dbg_trace); +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h index fc794b3e9..6156a7932 100644 --- a/drivers/net/wireless/realtek/rtlwifi/debug.h +++ b/drivers/net/wireless/realtek/rtlwifi/debug.h @@ -174,15 +174,16 @@ do { \ } \ } while (0) + +struct rtl_priv; + +__printf(5, 6) +void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level, + const char *modname, const char *fmt, ...); + #define RT_TRACE(rtlpriv, comp, level, fmt, ...) \ -do { \ - if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \ - ((level) <= rtlpriv->dbg.global_debuglevel))) { \ - printk(KERN_DEBUG KBUILD_MODNAME ":%s():<%lx-%x> " fmt, \ - __func__, in_interrupt(), in_atomic(), \ - ##__VA_ARGS__); \ - } \ -} while (0) + _rtl_dbg_trace(rtlpriv, comp, level, \ + KBUILD_MODNAME, fmt, ##__VA_ARGS__) #define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \ do { \ diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 0b4082c92..7becfef6c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -24,6 +24,7 @@ *****************************************************************************/ #include "wifi.h" #include "efuse.h" +#include "pci.h" #include <linux/export.h> static const u8 MAX_PGPKT_SIZE = 9; @@ -1243,3 +1244,80 @@ static u8 efuse_calculate_word_cnts(u8 word_en) return word_cnts; } +int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, + int max_size, u8 *hwinfo, int *params) +{ + struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); + struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw); + struct device *dev = &rtlpcipriv->dev.pdev->dev; + u16 eeprom_id; + u16 i, usvalue; + + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: + rtl_efuse_shadow_map_update(hw); + break; + + case EEPROM_93C46: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + "RTL8XXX did not boot from eeprom, check it !!\n"); + return 1; + + default: + dev_warn(dev, "no efuse data\n"); + return 1; + } + + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], max_size); + + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", + hwinfo, max_size); + + eeprom_id = *((u16 *)&hwinfo[0]); + if (eeprom_id != params[0]) { + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, + "EEPROM ID(%#x) is invalid!!\n", eeprom_id); + rtlefuse->autoload_failflag = true; + } else { + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); + rtlefuse->autoload_failflag = false; + } + + if (rtlefuse->autoload_failflag) + return 1; + + rtlefuse->eeprom_vid = *(u16 *)&hwinfo[params[1]]; + rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]]; + rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]]; + rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]]; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROMId = 0x%4x\n", eeprom_id); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); + + for (i = 0; i < 6; i += 2) { + usvalue = *(u16 *)&hwinfo[params[5] + i]; + *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; + } + RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr); + + rtlefuse->eeprom_channelplan = *&hwinfo[params[6]]; + rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]]; + rtlefuse->txpwr_fromeprom = true; + rtlefuse->eeprom_oemid = *&hwinfo[params[8]]; + + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); + + /* set channel plan to world wide 13 */ + rtlefuse->channel_plan = params[9]; + + return 0; +} +EXPORT_SYMBOL_GPL(rtl_get_hwinfo); diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h index be02e7894..51aa1210d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.h +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h @@ -109,5 +109,7 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw); void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw); void efuse_force_write_vendor_Id(struct ieee80211_hw *hw); void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx); +int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv, + int max_size, u8 *hwinfo, int *params); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c index 93579cac0..9a64f9b70 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.c +++ b/drivers/net/wireless/realtek/rtlwifi/ps.c @@ -76,9 +76,9 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw) } EXPORT_SYMBOL(rtl_ps_disable_nic); -bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, - enum rf_pwrstate state_toset, - u32 changesource, bool protect_or_not) +static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, + enum rf_pwrstate state_toset, + u32 changesource) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -86,9 +86,6 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, bool actionallowed = false; u16 rfwait_cnt = 0; - if (protect_or_not) - goto no_protect; - /*Only one thread can change *the RF state at one time, and others *should wait to be executed. @@ -119,7 +116,6 @@ bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, } } -no_protect: rtstate = ppsc->rfpwr_state; switch (state_toset) { @@ -162,15 +158,12 @@ no_protect: if (actionallowed) rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset); - if (!protect_or_not) { - spin_lock(&rtlpriv->locks.rf_ps_lock); - ppsc->rfchange_inprogress = false; - spin_unlock(&rtlpriv->locks.rf_ps_lock); - } + spin_lock(&rtlpriv->locks.rf_ps_lock); + ppsc->rfchange_inprogress = false; + spin_unlock(&rtlpriv->locks.rf_ps_lock); return actionallowed; } -EXPORT_SYMBOL(rtl_ps_set_rf_state); static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) { @@ -191,7 +184,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) } rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate, - RF_CHANGE_BY_IPS, false); + RF_CHANGE_BY_IPS); if (ppsc->inactive_pwrstate == ERFOFF && rtlhal->interface == INTF_PCI) { @@ -587,7 +580,7 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw) } spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); - rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS, false); + rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS); spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); } @@ -630,7 +623,7 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw) spin_unlock(&rtlpriv->locks.rf_ps_lock); spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag); - rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS , false); + rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS); spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag); if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM && diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h index 29dfc5142..0df2b5203 100644 --- a/drivers/net/wireless/realtek/rtlwifi/ps.h +++ b/drivers/net/wireless/realtek/rtlwifi/ps.h @@ -28,9 +28,6 @@ #define MAX_SW_LPS_SLEEP_INTV 5 -bool rtl_ps_set_rf_state(struct ieee80211_hw *hw, - enum rf_pwrstate state_toset, u32 changesource, - bool protect_or_not); bool rtl_ps_enable_nic(struct ieee80211_hw *hw); bool rtl_ps_disable_nic(struct ieee80211_hw *hw); void rtl_ips_nic_off(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 1aca77719..ce8621a0f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -94,7 +94,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, struct ieee80211_sta *sta, struct ieee80211_tx_rate *rate, struct ieee80211_tx_rate_control *txrc, - u8 tries, char rix, int rtsctsenable, + u8 tries, s8 rix, int rtsctsenable, bool not_data) { struct rtl_mac *mac = rtl_mac(rtlpriv); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile index a85419a37..676e7de27 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile @@ -12,4 +12,4 @@ rtl8188ee-objs := \ obj-$(CONFIG_RTL8188EE) += rtl8188ee.o -ccflags-y += -Idrivers/net/wireless/rtlwifi -D__CHECK_ENDIAN__ +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c index db9a7829d..f936a4913 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c @@ -886,7 +886,7 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw) u8 thermalvalue_avg_count = 0; u32 thermalvalue_avg = 0; long ele_d, temp_cck; - char ofdm_index[2], cck_index = 0, + s8 ofdm_index[2], cck_index = 0, ofdm_index_old[2] = {0, 0}, cck_index_old = 0; int i = 0; /*bool is2t = false;*/ @@ -898,7 +898,7 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw) /*0.1 the following TWO tables decide the *final index of OFDM/CCK swing table */ - char delta_swing_table_idx[2][15] = { + s8 delta_swing_table_idx[2][15] = { {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11}, {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10} }; @@ -1790,6 +1790,7 @@ void rtl88e_dm_watchdog(struct ieee80211_hw *hw) if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; + spin_lock(&rtlpriv->locks.rf_ps_lock); if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { @@ -1802,4 +1803,5 @@ void rtl88e_dm_watchdog(struct ieee80211_hw *hw) rtl88e_dm_check_edca_turbo(hw); rtl88e_dm_antenna_diversity(hw); } + spin_unlock(&rtlpriv->locks.rf_ps_lock); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c index 8ee83b093..4ab6201da 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c @@ -1835,74 +1835,24 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; - - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!"); - return; - } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "boot from neither eeprom nor efuse, check it !!"); + int params[] = {RTL8188E_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + u8 *hwinfo; + + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - } - - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", - hwinfo, HWSET_MAX_SIZE); - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL8188E_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; - if (rtlefuse->autoload_failflag == true) - return; - /*VID DID SVID SDID*/ - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); - /*customer ID*/ - rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID]; if (rtlefuse->eeprom_oemid == 0xFF) - rtlefuse->eeprom_oemid = 0; + rtlefuse->eeprom_oemid = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - /*EEPROM version*/ - rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; - /*mac address*/ - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; - } - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "dev_addr: %pM\n", rtlefuse->dev_addr); - /*channel plan */ - rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; /* set channel plan from efuse */ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*tx power*/ @@ -1976,6 +1926,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw) } } +exit: + kfree(hwinfo); } static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c index 416a9ba63..7498a1218 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c @@ -373,7 +373,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); return false; } @@ -383,7 +383,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw) phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); return false; } rtstatus = @@ -1239,7 +1239,7 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw) if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl88e_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schdule workitem current channel %d\n", + "sw_chnl_inprogress false schedule workitem current channel %d\n", rtlphy->current_channel); rtlphy->sw_chnl_inprogress = false; } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c index 40893cef7..26ac4c290 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c @@ -498,7 +498,7 @@ static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw) if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); return false; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 11701064b..3e3b88664 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -59,7 +59,7 @@ static void _rtl88ee_query_rxphystatus(struct ieee80211_hw *hw, struct phy_status_rpt *phystrpt = (struct phy_status_rpt *)p_drvinfo; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); - char rx_pwr_all = 0, rx_pwr[4]; + s8 rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, pwdb_all; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; @@ -540,7 +540,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_88e)); @@ -703,7 +703,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index 5a24d194a..9a1c2087a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -593,8 +593,8 @@ struct rx_fwinfo_88e { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h index 4422e31fe..6a72d0c8a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h @@ -135,7 +135,7 @@ void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw); void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw); void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw); void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal); -void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool recovery); void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c index 77e61b19b..60ab2ec4f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c @@ -213,7 +213,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); return false; } if (rtlphy->rf_type == RF_1T2R) { @@ -226,7 +226,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); return false; } rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw, @@ -757,7 +757,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw) if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl92c_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schdule workitem\n"); + "sw_chnl_inprogress false schedule workitem\n"); rtlphy->sw_chnl_inprogress = false; } else { RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, @@ -1353,7 +1353,7 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, } static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, - char delta, bool is2t) + s8 delta, bool is2t) { } @@ -1518,7 +1518,7 @@ void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw) } EXPORT_SYMBOL(rtl92c_phy_lc_calibrate); -void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h index 64bc49f4d..202412577 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h @@ -210,7 +210,7 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw); void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval); -void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c index 04eb5c3f8..244607951 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c @@ -1680,58 +1680,18 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; - - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - - memcpy((void *)hwinfo, - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!"); - } - - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", - hwinfo, HWSET_MAX_SIZE); - - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL8190_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - - if (rtlefuse->autoload_failflag) + int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + u8 *hwinfo; + + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); - - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; - } - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr); + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; _rtl92ce_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, @@ -1740,18 +1700,6 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw) rtl8192ce_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); - - rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN]; - rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; - rtlefuse->txpwr_fromeprom = true; - rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID]; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - - /* set channel paln to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; - if (rtlhal->oem_id == RT_CID_DEFAULT) { switch (rtlefuse->eeprom_oemid) { case EEPROM_CID_DEFAULT: @@ -1775,10 +1723,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw) default: rtlhal->oem_id = RT_CID_DEFAULT; break; - } } - +exit: + kfree(hwinfo); } static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h index e5e1353a9..dadc02b5d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h @@ -102,7 +102,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw); u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw); void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval); -void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw); void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c index 84ddd4d07..781af1b99 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c @@ -49,7 +49,7 @@ static u8 _rtl92ce_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) return skb->priority; } -static u8 _rtl92c_query_rxpwrpercentage(char antpower) +static u8 _rtl92c_query_rxpwrpercentage(s8 antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; @@ -59,9 +59,9 @@ static u8 _rtl92c_query_rxpwrpercentage(char antpower) return 100 + antpower; } -static u8 _rtl92c_evm_db_to_percentage(char value) +static u8 _rtl92c_evm_db_to_percentage(s8 value) { - char ret_val; + s8 ret_val; ret_val = value; if (ret_val >= 0) @@ -449,7 +449,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } rcu_read_lock(); @@ -615,7 +615,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h index 4bec4b07e..607304586 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h @@ -537,8 +537,8 @@ struct rx_fwinfo_92c { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index 34ce06441..8789752f8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -347,50 +347,24 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE] = {0}; - u16 eeprom_id; - - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - memcpy((void *)hwinfo, - (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!\n"); - } - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, "MAP", - hwinfo, HWSET_MAX_SIZE); - eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0])); - if (eeprom_id != RTL8190_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - if (rtlefuse->autoload_failflag) + int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + 0}; + u8 *hwinfo; + + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; - } - pr_info("MAC address: %pM\n", rtlefuse->dev_addr); + + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; + _rtl92cu_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); - rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]); - rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]); - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n", - rtlefuse->eeprom_vid, rtlefuse->eeprom_did); - rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; - rtlefuse->eeprom_version = - le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]); + _rtl92cu_read_board_type(hw, hwinfo); + rtlefuse->txpwr_fromeprom = true; - rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", - rtlefuse->eeprom_oemid); if (rtlhal->oem_id == RT_CID_DEFAULT) { switch (rtlefuse->eeprom_oemid) { case EEPROM_CID_DEFAULT: @@ -416,7 +390,8 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw) break; } } - _rtl92cu_read_board_type(hw, hwinfo); +exit: + kfree(hwinfo); } static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 035713311..68ca73485 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -596,7 +596,7 @@ void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T) /*==============================================================*/ -static u8 _rtl92c_query_rxpwrpercentage(char antpower) +static u8 _rtl92c_query_rxpwrpercentage(s8 antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; @@ -606,9 +606,9 @@ static u8 _rtl92c_query_rxpwrpercentage(char antpower) return 100 + antpower; } -static u8 _rtl92c_evm_db_to_percentage(char value) +static u8 _rtl92c_evm_db_to_percentage(s8 value) { - char ret_val; + s8 ret_val; ret_val = value; if (ret_val >= 0) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h index 553a4bfac..20a49ec84 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h @@ -79,8 +79,8 @@ struct rx_fwinfo_92c { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c index 5624ade92..ec2ea56f7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c @@ -465,7 +465,7 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw) } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); goto phy_rf_cfg_fail; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index f49b60d31..b0f632462 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -1744,65 +1744,26 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; - unsigned long flags; + int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D, + EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + int i; + u16 usvalue; + u8 *hwinfo; - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - spin_lock_irqsave(&globalmutex_for_power_and_efuse, flags); - rtl_efuse_shadow_map_update(hw); - _rtl92de_efuse_update_chip_version(hw); - spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags); - memcpy((void *)hwinfo, (void *)&rtlefuse->efuse_map - [EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!\n"); - } - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", - hwinfo, HWSET_MAX_SIZE); + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) + return; - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL8190_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - if (rtlefuse->autoload_failflag) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!\n"); + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) return; - } - rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID]; - _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); - /* VID, DID SE 0xA-D */ - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); + _rtl92de_efuse_update_chip_version(hw); + _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); - /* Read Permanent MAC address */ - if (rtlhal->interfaceindex == 0) { - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC0_92D + i]; - *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; - } - } else { + /* Read Permanent MAC address for 2nd interface */ + if (rtlhal->interfaceindex != 0) { for (i = 0; i < 6; i += 2) { usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC1_92D + i]; *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue; @@ -1828,10 +1789,8 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) rtlefuse->channel_plan = COUNTRY_CODE_FCC; break; } - rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; rtlefuse->txpwr_fromeprom = true; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); + kfree(hwinfo); } void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index 7810fe87d..d334d2a5e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -2695,7 +2695,7 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw) RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n"); } -void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) { return; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h index 48d5c6835..8115bf4ac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h @@ -160,7 +160,7 @@ void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw); bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw); void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw); -void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw); void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw); void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c index 6a6ac540d..2f479d397 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c @@ -601,7 +601,7 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw) } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); goto phy_rf_cfg_fail; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 1feaa629d..e998e98d7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -48,7 +48,7 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) return skb->priority; } -static u8 _rtl92d_query_rxpwrpercentage(char antpower) +static u8 _rtl92d_query_rxpwrpercentage(s8 antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; @@ -58,9 +58,9 @@ static u8 _rtl92d_query_rxpwrpercentage(char antpower) return 100 + antpower; } -static u8 _rtl92d_evm_db_to_percentage(char value) +static u8 _rtl92d_evm_db_to_percentage(s8 value) { - char ret_val = value; + s8 ret_val = value; if (ret_val >= 0) ret_val = 0; @@ -586,7 +586,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92d)); @@ -744,7 +744,7 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h index fb5cf0634..194d99f8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h @@ -554,8 +554,8 @@ struct rx_fwinfo_92d { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c index 459f3d0ef..e6b5786c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c @@ -496,7 +496,7 @@ static void rtl92ee_dm_find_minimum_rssi(struct ieee80211_hw *hw) rtl_dm_dig->min_undec_pwdb_for_dm = rtlpriv->dm.entry_min_undec_sm_pwdb; RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, - "AP Ext Port or disconnet PWDB = 0x%x\n", + "AP Ext Port or disconnect PWDB = 0x%x\n", rtl_dm_dig->min_undec_pwdb_for_dm); } RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, @@ -983,7 +983,7 @@ static bool _rtl92ee_dm_ra_state_check(struct ieee80211_hw *hw, break; default: RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, - "wrong rssi level setting %d !", *ratr_state); + "wrong rssi level setting %d !\n", *ratr_state); break; } @@ -1219,6 +1219,7 @@ void rtl92ee_dm_watchdog(struct ieee80211_hw *hw) if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; + spin_lock(&rtlpriv->locks.rf_ps_lock); if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { @@ -1233,4 +1234,5 @@ void rtl92ee_dm_watchdog(struct ieee80211_hw *hw) rtl92ee_dm_dynamic_atc_switch(hw); rtl92ee_dm_dynamic_primary_cca_ckeck(hw); } + spin_unlock(&rtlpriv->locks.rf_ps_lock); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 9fd3f1b6e..b07af8d15 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2098,73 +2098,24 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; - - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!"); - return; - } else { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "boot from neither eeprom nor efuse, check it !!"); + int params[] = {RTL8192E_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + u8 *hwinfo; + + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - } - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", - hwinfo, HWSET_MAX_SIZE); + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL8192E_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - - if (rtlefuse->autoload_failflag) - return; - /*VID DID SVID SDID*/ - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); - /*customer ID*/ - rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; if (rtlefuse->eeprom_oemid == 0xFF) rtlefuse->eeprom_oemid = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - /*EEPROM version*/ - rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION]; - /*mac address*/ - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; - } - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "dev_addr: %pM\n", rtlefuse->dev_addr); - /*channel plan */ - rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN]; /* set channel plan from efuse */ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; /*tx power*/ @@ -2206,6 +2157,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) break; } } +exit: + kfree(hwinfo); } static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c index 018340aed..beafc9a10 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c @@ -547,7 +547,7 @@ static void _rtl92ee_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, u8 end, u8 base) { - char i = 0; + s8 i = 0; u8 tmp = 0; u32 temp_data = 0; @@ -650,7 +650,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); return false; } @@ -662,7 +662,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw) } _rtl92ee_phy_txpower_by_rate_configuration(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); return false; } rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB); @@ -1189,7 +1189,7 @@ static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; u8 shift = 0, sec, tx_num; - char diff = 0; + s8 diff = 0; sec = _rtl92ee_phy_get_ratesection_intxpower_byrate(rf, rate); tx_num = RF_TX_NUM_NONIMPLEMENT; @@ -1265,14 +1265,14 @@ static u8 _rtl92ee_get_txpower_index(struct ieee80211_hw *hw, "Illegal channel!!\n"); } - if (IS_CCK_RATE(rate)) + if (IS_CCK_RATE((s8)rate)) tx_power = rtlefuse->txpwrlevel_cck[rfpath][index]; else if (DESC92C_RATE6M <= rate) tx_power = rtlefuse->txpwrlevel_ht40_1s[rfpath][index]; /* OFDM-1T*/ if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M && - !IS_CCK_RATE(rate)) + !IS_CCK_RATE((s8)rate)) tx_power += rtlefuse->txpwr_legacyhtdiff[rfpath][TX_1S]; /* BW20-1S, BW20-2S */ @@ -1819,7 +1819,7 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw) if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl92ee_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schdule workitem current channel %d\n", + "sw_chnl_inprogress false schedule workitem current channel %d\n", rtlphy->current_channel); rtlphy->sw_chnl_inprogress = false; } else { @@ -2414,19 +2414,10 @@ static void _rtl92ee_phy_reload_mac_registers(struct ieee80211_hw *hw, static void _rtl92ee_phy_path_adda_on(struct ieee80211_hw *hw, u32 *addareg, bool is_patha_on, bool is2t) { - u32 pathon; u32 i; - pathon = is_patha_on ? 0x0fc01616 : 0x0fc01616; - if (!is2t) { - pathon = 0x0fc01616; - rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0fc01616); - } else { - rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathon); - } - - for (i = 1; i < IQK_ADDA_REG_NUM; i++) - rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathon); + for (i = 0; i < IQK_ADDA_REG_NUM; i++) + rtl_set_bbreg(hw, addareg[i], MASKDWORD, 0x0fc01616); } static void _rtl92ee_phy_mac_setting_calibration(struct ieee80211_hw *hw, @@ -2978,7 +2969,7 @@ void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw) rtlphy->lck_inprogress = false; } -void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) { } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h index c6e97c8df..49bd0e554 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h @@ -141,7 +141,7 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw, void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw); u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw); void rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); -void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl92ee_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl92ee_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c index c9bc33cd1..73716c07d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c @@ -142,7 +142,7 @@ static bool _rtl92ee_phy_rf6052_config_parafile(struct ieee80211_hw *hw) if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); return false; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 35e6bf7e2..2d48ccd02 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -56,7 +56,7 @@ static void _rtl92ee_query_rxphystatus(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; - char rx_pwr_all = 0, rx_pwr[4]; + s8 rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, pwdb_all; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; @@ -703,7 +703,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } @@ -867,7 +867,7 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, txdesc_len); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index a4c383452..8053d1b12 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -650,8 +650,8 @@ struct rx_fwinfo { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c index 12b0978ba..ddfa0aee5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c @@ -1673,23 +1673,31 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct device *dev = &rtl_pcipriv(hw)->dev.pdev->dev; u16 i, usvalue; u16 eeprom_id; u8 tempval; u8 hwinfo[HWSET_MAX_SIZE_92S]; u8 rf_path, index; - if (rtlefuse->epromtype == EEPROM_93C46) { + switch (rtlefuse->epromtype) { + case EEPROM_BOOT_EFUSE: + rtl_efuse_shadow_map_update(hw); + break; + + case EEPROM_93C46: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "RTL819X Not boot from eeprom, check it !!\n"); - } else if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); + return; - memcpy((void *)hwinfo, (void *) - &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE_92S); + default: + dev_warn(dev, "no efuse data\n"); + return; } + memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], + HWSET_MAX_SIZE_92S); + RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP", hwinfo, HWSET_MAX_SIZE_92S); @@ -1995,7 +2003,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw) rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine; rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x", + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); /* set channel paln to world wide 13 */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c index 9475aa2a8..34e88a3f6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c @@ -137,7 +137,7 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_phy *rtlphy = &(rtlpriv->phy); - char ant_pwr_diff = 0; + s8 ant_pwr_diff = 0; u32 u4reg_val = 0; if (rtlphy->rf_type == RF_2T2R) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c index 125b29bd2..d53bbf6be 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c @@ -360,7 +360,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } if (mac->opmode == NL80211_IFTYPE_STATION) { @@ -529,7 +529,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } /* Clear all status */ diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c index 4c1c96c96..42a6fba90 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c @@ -816,6 +816,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw) if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; + spin_lock(&rtlpriv->locks.rf_ps_lock); if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { @@ -829,6 +830,7 @@ void rtl8723e_dm_watchdog(struct ieee80211_hw *hw) rtl8723e_dm_bt_coexist(hw); rtl8723e_dm_check_edca_turbo(hw); } + spin_unlock(&rtlpriv->locks.rf_ps_lock); if (rtlpriv->btcoexist.init_set) rtl_write_byte(rtlpriv, 0x76e, 0xc); } @@ -874,8 +876,8 @@ void rtl8723e_dm_bt_coexist(struct ieee80211_hw *hw) tmp_byte = rtl_read_byte(rtlpriv, 0x40); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, - "[DM][BT], 0x40 is 0x%x", tmp_byte); + "[DM][BT], 0x40 is 0x%x\n", tmp_byte); RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[DM][BT], bt_dm_coexist start"); + "[DM][BT], bt_dm_coexist start\n"); rtl8723e_dm_bt_coexist_8723(hw); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c index 44de695dc..ec9bcf32f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c @@ -185,7 +185,7 @@ static void rtl8723e_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode) struct rtl_priv *rtlpriv = rtl_priv(hw); if (BT_PTA_MODE_ON == b_mode) { - RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on, "); + RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on\n"); /* Enable GPIO 0/1/2/3/8 pins for bt */ rtl_write_byte(rtlpriv, 0x40, 0x20); rtlpriv->btcoexist.hw_coexist_all_off = false; @@ -1401,7 +1401,7 @@ static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw) (long)hal_coex_8723.bt_inq_page_start_time) / HZ) >= 10) { RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, - "[BTCoex], BT Inquiry/page >= 10sec!!!"); + "[BTCoex], BT Inquiry/page >= 10sec!!!\n"); hal_coex_8723.bt_inq_page_start_time = 0; rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_BT_INQ_PAGE; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c index a4b7eac68..b88c7ee72 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c @@ -1630,62 +1630,22 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; + int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + u8 *hwinfo; if (b_pseudo_test) { /* need add */ return; } - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!"); - } - - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", - hwinfo, HWSET_MAX_SIZE); - - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL8190_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - - if (rtlefuse->autoload_failflag) + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); - - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; - } - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "dev_addr: %pM\n", rtlefuse->dev_addr); + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; _rtl8723e_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); @@ -1693,144 +1653,138 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw, rtl8723e_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); - rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; - rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; - rtlefuse->txpwr_fromeprom = true; - rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID]; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - - /* set channel paln to world wide 13 */ - rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13; - - if (rtlhal->oem_id == RT_CID_DEFAULT) { - switch (rtlefuse->eeprom_oemid) { - case EEPROM_CID_DEFAULT: - if (rtlefuse->eeprom_did == 0x8176) { - if (CHK_SVID_SMID(0x10EC, 0x6151) || - CHK_SVID_SMID(0x10EC, 0x6152) || - CHK_SVID_SMID(0x10EC, 0x6154) || - CHK_SVID_SMID(0x10EC, 0x6155) || - CHK_SVID_SMID(0x10EC, 0x6177) || - CHK_SVID_SMID(0x10EC, 0x6178) || - CHK_SVID_SMID(0x10EC, 0x6179) || - CHK_SVID_SMID(0x10EC, 0x6180) || - CHK_SVID_SMID(0x10EC, 0x7151) || - CHK_SVID_SMID(0x10EC, 0x7152) || - CHK_SVID_SMID(0x10EC, 0x7154) || - CHK_SVID_SMID(0x10EC, 0x7155) || - CHK_SVID_SMID(0x10EC, 0x7177) || - CHK_SVID_SMID(0x10EC, 0x7178) || - CHK_SVID_SMID(0x10EC, 0x7179) || - CHK_SVID_SMID(0x10EC, 0x7180) || - CHK_SVID_SMID(0x10EC, 0x8151) || - CHK_SVID_SMID(0x10EC, 0x8152) || - CHK_SVID_SMID(0x10EC, 0x8154) || - CHK_SVID_SMID(0x10EC, 0x8155) || - CHK_SVID_SMID(0x10EC, 0x8181) || - CHK_SVID_SMID(0x10EC, 0x8182) || - CHK_SVID_SMID(0x10EC, 0x8184) || - CHK_SVID_SMID(0x10EC, 0x8185) || - CHK_SVID_SMID(0x10EC, 0x9151) || - CHK_SVID_SMID(0x10EC, 0x9152) || - CHK_SVID_SMID(0x10EC, 0x9154) || - CHK_SVID_SMID(0x10EC, 0x9155) || - CHK_SVID_SMID(0x10EC, 0x9181) || - CHK_SVID_SMID(0x10EC, 0x9182) || - CHK_SVID_SMID(0x10EC, 0x9184) || - CHK_SVID_SMID(0x10EC, 0x9185)) + if (rtlhal->oem_id != RT_CID_DEFAULT) + return; + + switch (rtlefuse->eeprom_oemid) { + case EEPROM_CID_DEFAULT: + switch (rtlefuse->eeprom_did) { + case 0x8176: + switch (rtlefuse->eeprom_svid) { + case 0x10EC: + switch (rtlefuse->eeprom_smid) { + case 0x6151 ... 0x6152: + case 0x6154 ... 0x6155: + case 0x6177 ... 0x6180: + case 0x7151 ... 0x7152: + case 0x7154 ... 0x7155: + case 0x7177 ... 0x7180: + case 0x8151 ... 0x8152: + case 0x8154 ... 0x8155: + case 0x8181 ... 0x8182: + case 0x8184 ... 0x8185: + case 0x9151 ... 0x9152: + case 0x9154 ... 0x9155: + case 0x9181 ... 0x9182: + case 0x9184 ... 0x9185: rtlhal->oem_id = RT_CID_TOSHIBA; - else if (rtlefuse->eeprom_svid == 0x1025) - rtlhal->oem_id = RT_CID_819X_ACER; - else if (CHK_SVID_SMID(0x10EC, 0x6191) || - CHK_SVID_SMID(0x10EC, 0x6192) || - CHK_SVID_SMID(0x10EC, 0x6193) || - CHK_SVID_SMID(0x10EC, 0x7191) || - CHK_SVID_SMID(0x10EC, 0x7192) || - CHK_SVID_SMID(0x10EC, 0x7193) || - CHK_SVID_SMID(0x10EC, 0x8191) || - CHK_SVID_SMID(0x10EC, 0x8192) || - CHK_SVID_SMID(0x10EC, 0x8193) || - CHK_SVID_SMID(0x10EC, 0x9191) || - CHK_SVID_SMID(0x10EC, 0x9192) || - CHK_SVID_SMID(0x10EC, 0x9193)) + break; + case 0x6191 ... 0x6193: + case 0x7191 ... 0x7193: + case 0x8191 ... 0x8193: + case 0x9191 ... 0x9193: rtlhal->oem_id = RT_CID_819X_SAMSUNG; - else if (CHK_SVID_SMID(0x10EC, 0x8195) || - CHK_SVID_SMID(0x10EC, 0x9195) || - CHK_SVID_SMID(0x10EC, 0x7194) || - CHK_SVID_SMID(0x10EC, 0x8200) || - CHK_SVID_SMID(0x10EC, 0x8201) || - CHK_SVID_SMID(0x10EC, 0x8202) || - CHK_SVID_SMID(0x10EC, 0x9200)) - rtlhal->oem_id = RT_CID_819X_LENOVO; - else if (CHK_SVID_SMID(0x10EC, 0x8197) || - CHK_SVID_SMID(0x10EC, 0x9196)) + break; + case 0x8197: + case 0x9196: rtlhal->oem_id = RT_CID_819X_CLEVO; - else if (CHK_SVID_SMID(0x1028, 0x8194) || - CHK_SVID_SMID(0x1028, 0x8198) || - CHK_SVID_SMID(0x1028, 0x9197) || - CHK_SVID_SMID(0x1028, 0x9198)) + break; + case 0x8203: + rtlhal->oem_id = RT_CID_819X_PRONETS; + break; + case 0x8195: + case 0x9195: + case 0x7194: + case 0x8200 ... 0x8202: + case 0x9200: + rtlhal->oem_id = RT_CID_819X_LENOVO; + break; + } + case 0x1025: + rtlhal->oem_id = RT_CID_819X_ACER; + break; + case 0x1028: + switch (rtlefuse->eeprom_smid) { + case 0x8194: + case 0x8198: + case 0x9197 ... 0x9198: rtlhal->oem_id = RT_CID_819X_DELL; - else if (CHK_SVID_SMID(0x103C, 0x1629)) + break; + } + break; + case 0x103C: + switch (rtlefuse->eeprom_smid) { + case 0x1629: rtlhal->oem_id = RT_CID_819X_HP; - else if (CHK_SVID_SMID(0x1A32, 0x2315)) + } + break; + case 0x1A32: + switch (rtlefuse->eeprom_smid) { + case 0x2315: rtlhal->oem_id = RT_CID_819X_QMI; - else if (CHK_SVID_SMID(0x10EC, 0x8203)) - rtlhal->oem_id = RT_CID_819X_PRONETS; - else if (CHK_SVID_SMID(0x1043, 0x84B5)) - rtlhal->oem_id = - RT_CID_819X_EDIMAX_ASUS; - else - rtlhal->oem_id = RT_CID_DEFAULT; - } else if (rtlefuse->eeprom_did == 0x8178) { - if (CHK_SVID_SMID(0x10EC, 0x6181) || - CHK_SVID_SMID(0x10EC, 0x6182) || - CHK_SVID_SMID(0x10EC, 0x6184) || - CHK_SVID_SMID(0x10EC, 0x6185) || - CHK_SVID_SMID(0x10EC, 0x7181) || - CHK_SVID_SMID(0x10EC, 0x7182) || - CHK_SVID_SMID(0x10EC, 0x7184) || - CHK_SVID_SMID(0x10EC, 0x7185) || - CHK_SVID_SMID(0x10EC, 0x8181) || - CHK_SVID_SMID(0x10EC, 0x8182) || - CHK_SVID_SMID(0x10EC, 0x8184) || - CHK_SVID_SMID(0x10EC, 0x8185) || - CHK_SVID_SMID(0x10EC, 0x9181) || - CHK_SVID_SMID(0x10EC, 0x9182) || - CHK_SVID_SMID(0x10EC, 0x9184) || - CHK_SVID_SMID(0x10EC, 0x9185)) - rtlhal->oem_id = RT_CID_TOSHIBA; - else if (rtlefuse->eeprom_svid == 0x1025) - rtlhal->oem_id = RT_CID_819X_ACER; - else if (CHK_SVID_SMID(0x10EC, 0x8186)) - rtlhal->oem_id = RT_CID_819X_PRONETS; - else if (CHK_SVID_SMID(0x1043, 0x8486)) + break; + } + break; + case 0x1043: + switch (rtlefuse->eeprom_smid) { + case 0x84B5: rtlhal->oem_id = - RT_CID_819X_EDIMAX_ASUS; - else - rtlhal->oem_id = RT_CID_DEFAULT; - } else { - rtlhal->oem_id = RT_CID_DEFAULT; + RT_CID_819X_EDIMAX_ASUS; + } + break; } break; - case EEPROM_CID_TOSHIBA: - rtlhal->oem_id = RT_CID_TOSHIBA; - break; - case EEPROM_CID_CCX: - rtlhal->oem_id = RT_CID_CCX; - break; - case EEPROM_CID_QMI: - rtlhal->oem_id = RT_CID_819X_QMI; - break; - case EEPROM_CID_WHQL: + case 0x8178: + switch (rtlefuse->eeprom_svid) { + case 0x10ec: + switch (rtlefuse->eeprom_smid) { + case 0x6181 ... 0x6182: + case 0x6184 ... 0x6185: + case 0x7181 ... 0x7182: + case 0x7184 ... 0x7185: + case 0x8181 ... 0x8182: + case 0x8184 ... 0x8185: + case 0x9181 ... 0x9182: + case 0x9184 ... 0x9185: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case 0x8186: + rtlhal->oem_id = + RT_CID_819X_PRONETS; + break; + } break; - default: - rtlhal->oem_id = RT_CID_DEFAULT; + case 0x1025: + rtlhal->oem_id = RT_CID_819X_ACER; + break; + case 0x1043: + switch (rtlefuse->eeprom_smid) { + case 0x8486: + rtlhal->oem_id = + RT_CID_819X_EDIMAX_ASUS; + } + break; + } break; - } + break; + case EEPROM_CID_TOSHIBA: + rtlhal->oem_id = RT_CID_TOSHIBA; + break; + case EEPROM_CID_CCX: + rtlhal->oem_id = RT_CID_CCX; + break; + case EEPROM_CID_QMI: + rtlhal->oem_id = RT_CID_819X_QMI; + break; + case EEPROM_CID_WHQL: + break; + default: + rtlhal->oem_id = RT_CID_DEFAULT; + break; } +exit: + kfree(hwinfo); } static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index d367097f4..601b78efe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -213,7 +213,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); return false; } @@ -227,7 +227,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); return false; } rtstatus = @@ -893,7 +893,7 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw) if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl8723e_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schdule workitem\n"); + "sw_chnl_inprogress false schedule workitem\n"); rtlphy->sw_chnl_inprogress = false; } else { RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c index 9ebc8281f..422771778 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c @@ -504,7 +504,7 @@ static bool _rtl8723e_phy_rf6052_config_parafile(struct ieee80211_hw *hw) if (rtstatus != true) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); return false; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 7b4a9b635..e93125ebe 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -389,7 +389,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } if (mac->opmode == NL80211_IFTYPE_STATION) { @@ -557,7 +557,7 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 32970bf18..43d4c791d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -522,8 +522,8 @@ struct rx_fwinfo_8723e { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c index 3a81cdba8..131c0d1d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c @@ -758,11 +758,11 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter( u8 ofdm_min_index = 6; u8 index_for_channel = 0; - char delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = { + s8 delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = { 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14, 15}; - char delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = { + s8 delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = { 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12, 13, 14, 15}; @@ -1279,6 +1279,7 @@ void rtl8723be_dm_watchdog(struct ieee80211_hw *hw) if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; + spin_lock(&rtlpriv->locks.rf_ps_lock); if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { @@ -1294,5 +1295,6 @@ void rtl8723be_dm_watchdog(struct ieee80211_hw *hw) rtl8723be_dm_check_txpower_tracking(hw); rtl8723be_dm_dynamic_txpower(hw); } + spin_unlock(&rtlpriv->locks.rf_ps_lock); rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 5a3df9198..82e4476ca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1474,7 +1474,7 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw) value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1); if ((value32 & (CHIP_8723B)) != CHIP_8723B) - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unkown chip version\n"); + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unknown chip version\n"); else version = (enum version_8723e)CHIP_8723B; @@ -2026,9 +2026,12 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; + int params[] = {RTL8723BE_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + u8 *hwinfo; + int i; bool is_toshiba_smid1 = false; bool is_toshiba_smid2 = false; bool is_samsung_smid = false; @@ -2055,52 +2058,13 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, /* needs to be added */ return; } - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!"); - } - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, ("MAP\n"), - hwinfo, HWSET_MAX_SIZE); - - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL8723BE_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - - if (rtlefuse->autoload_failflag) + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); - - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; - } - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "dev_addr: %pM\n", - rtlefuse->dev_addr); + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_8723BE]; @@ -2114,14 +2078,6 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, rtlefuse->autoload_failflag, hwinfo); - rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN]; - rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION]; - rtlefuse->txpwr_fromeprom = true; - rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID]; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - /* set channel plan from efuse */ rtlefuse->channel_plan = rtlefuse->eeprom_channelplan; @@ -2232,6 +2188,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, break; } } +exit: + kfree(hwinfo); } static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 445f681d0..285818df1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -379,7 +379,7 @@ static void _rtl8723be_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw) static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start, u8 end, u8 base_val) { - char i = 0; + s8 i = 0; u8 temp_value = 0; u32 temp_data = 0; @@ -467,7 +467,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); return false; } _rtl8723be_phy_init_tx_power_by_rate(hw); @@ -478,7 +478,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) } phy_txpower_by_rate_config(hw); if (!rtstatus) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); return false; } rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, @@ -953,7 +953,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; u8 shift = 0, rate_section, tx_num; - char tx_pwr_diff = 0; + s8 tx_pwr_diff = 0; rate_section = _rtl8723be_phy_get_ratesection_intxpower_byrate(rfpath, rate); @@ -1019,7 +1019,7 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path, struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 index = (channel - 1); - u8 txpower; + u8 txpower = 0; u8 power_diff_byrate = 0; if (channel > 14 || channel < 1) { @@ -1395,7 +1395,7 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw) if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { rtl8723be_phy_sw_chnl_callback(hw); RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD, - "sw_chnl_inprogress false schdule workitem current channel %d\n", + "sw_chnl_inprogress false schedule workitem current channel %d\n", rtlphy->current_channel); rtlphy->sw_chnl_inprogress = false; } else { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c index 97f5a0377..78f4f18d8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c @@ -502,7 +502,7 @@ static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw) if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); return false; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 60345975f..2175aecbb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -56,7 +56,7 @@ static void _rtl8723be_query_rxphystatus(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; - char rx_pwr_all = 0, rx_pwr[4]; + s8 rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, pwdb_all, pwdb_all_bt = 0; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; @@ -464,7 +464,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw, mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { - RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error"); + RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8723be)); @@ -616,7 +616,7 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 40c36607b..8a9fe41ac 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -385,9 +385,9 @@ struct phy_status_rpt { u8 cck_rpt_b_ofdm_cfosho_b; u8 rsvd_1;/* ch_corr_msb; */ u8 noise_power_db_msb; - char path_cfotail[2]; + s8 path_cfotail[2]; u8 pcts_mask[2]; - char stream_rxevm[2]; + s8 stream_rxevm[2]; u8 path_rxsnr[2]; u8 noise_power_db_lsb; u8 rsvd_2[3]; @@ -422,8 +422,8 @@ struct rx_fwinfo_8723be { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[2]; + s8 rxevm[2]; + s8 rxsnr[2]; u8 pcts_msk_rpt[2]; u8 pdsnr[2]; u8 csi_current[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c index 17a681788..bdfd44495 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c @@ -843,7 +843,7 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw) dm_digtable->rssi_val_min + offset; RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, - "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x", + "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x\n", dm_digtable->rssi_val_min, dm_digtable->rx_gain_max); if (rtlpriv->dm.one_entry_only) { @@ -1355,7 +1355,7 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, u32 final_swing_idx[2]; u8 pwr_tracking_limit = 26; /*+1.0dB*/ u8 tx_rate = 0xFF; - char final_ofdm_swing_index = 0; + s8 final_ofdm_swing_index = 0; if (rtldm->tx_rate != 0xFF) tx_rate = @@ -2045,7 +2045,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw, u32 final_swing_idx[1]; u8 pwr_tracking_limit = 26; /*+1.0dB*/ u8 tx_rate = 0xFF; - char final_ofdm_swing_index = 0; + s8 final_ofdm_swing_index = 0; if (rtldm->tx_rate != 0xFF) tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate); @@ -2682,9 +2682,9 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw) bool b_edca_turbo_on = false; RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, - "rtl8821ae_dm_check_edca_turbo=====>"); + "rtl8821ae_dm_check_edca_turbo=====>\n"); RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD, - "Orginial BE PARAM: 0x%x\n", + "Original BE PARAM: 0x%x\n", rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N)); if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100) @@ -2949,6 +2949,7 @@ void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw) if (ppsc->p2p_ps_info.p2p_ps_mode) fw_ps_awake = false; + spin_lock(&rtlpriv->locks.rf_ps_lock); if ((ppsc->rfpwr_state == ERFON) && ((!fw_current_inpsmode) && fw_ps_awake) && (!ppsc->rfchange_inprogress)) { @@ -2967,6 +2968,7 @@ void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw) rtl8821ae_dm_check_txpower_tracking_thermalmeter(hw); rtl8821ae_dm_iq_calibrate(hw); } + spin_unlock(&rtlpriv->locks.rf_ps_lock); rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0; RT_TRACE(rtlpriv, COMP_DIG, DBG_DMESG, "\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 71e4dd996..0cddf1ad0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -3101,79 +3101,22 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - u16 i, usvalue; - u8 hwinfo[HWSET_MAX_SIZE]; - u16 eeprom_id; + int params[] = {RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID, + EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR, + EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID, + COUNTRY_CODE_WORLD_WIDE_13}; + u8 *hwinfo; if (b_pseudo_test) { ;/* need add */ } - if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) { - rtl_efuse_shadow_map_update(hw); - memcpy(hwinfo, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0], - HWSET_MAX_SIZE); - } else if (rtlefuse->epromtype == EEPROM_93C46) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL819X Not boot from eeprom, check it !!"); - } - - RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "MAP\n", - hwinfo, HWSET_MAX_SIZE); - - eeprom_id = *((u16 *)&hwinfo[0]); - if (eeprom_id != RTL_EEPROM_ID) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, - "EEPROM ID(%#x) is invalid!!\n", eeprom_id); - rtlefuse->autoload_failflag = true; - } else { - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); - rtlefuse->autoload_failflag = false; - } - - if (rtlefuse->autoload_failflag) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, - "RTL8812AE autoload_failflag, check it !!"); + hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL); + if (!hwinfo) return; - } - - rtlefuse->eeprom_version = *(u8 *)&hwinfo[EEPROM_VERSION]; - if (rtlefuse->eeprom_version == 0xff) - rtlefuse->eeprom_version = 0; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM version: 0x%2x\n", rtlefuse->eeprom_version); - - rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID]; - rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID]; - rtlefuse->eeprom_svid = *(u16 *)&hwinfo[EEPROM_SVID]; - rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID]; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROMId = 0x%4x\n", eeprom_id); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid); - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid); - - /*customer ID*/ - rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID]; - if (rtlefuse->eeprom_oemid == 0xFF) - rtlefuse->eeprom_oemid = 0; - - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid); - - for (i = 0; i < 6; i += 2) { - usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i]; - *((u16 *)(&rtlefuse->dev_addr[i])) = usvalue; - } - - RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, - "dev_addr: %pM\n", rtlefuse->dev_addr); + if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) + goto exit; _rtl8821ae_read_txpower_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); @@ -3273,6 +3216,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ break; } } +exit: + kfree(hwinfo); } /*static void _rtl8821ae_hal_customized_behavior(struct ieee80211_hw *hw) @@ -3829,7 +3774,7 @@ void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw, rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level); else /*RT_TRACE(rtlpriv, COMP_RATR,DBG_LOUD, - "rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only");*/ + "rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only\n");*/ rtl8821ae_update_hal_rate_table(hw, sta); } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 0c3b9ce86..a71bfe38e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -366,12 +366,12 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_dm *rtldm = rtl_dm(rtlpriv); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - char reg_swing_2g = -1;/* 0xff; */ - char reg_swing_5g = -1;/* 0xff; */ - char swing_2g = -1 * reg_swing_2g; - char swing_5g = -1 * reg_swing_5g; + s8 reg_swing_2g = -1;/* 0xff; */ + s8 reg_swing_5g = -1;/* 0xff; */ + s8 swing_2g = -1 * reg_swing_2g; + s8 swing_5g = -1 * reg_swing_5g; u32 out = 0x200; - const char auto_temp = -1; + const s8 auto_temp = -1; RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "===> PHY_GetTxBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n", @@ -524,7 +524,7 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) struct rtl_dm *rtldm = rtl_dm(rtlpriv); u8 current_band = rtlhal->current_bandtype; u32 txpath, rxpath; - char bb_diff_between_band; + s8 bb_diff_between_band; txpath = rtl8821ae_phy_query_bb_reg(hw, RTXPATH, 0xf0); rxpath = rtl8821ae_phy_query_bb_reg(hw, RCCK_RX, 0x0f000000); @@ -581,7 +581,7 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) count = 0; reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY); RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, - "Reg41A value %d", reg_41a); + "Reg41A value %d\n", reg_41a); reg_41a &= 0x30; while ((reg_41a != 0x30) && (count < 50)) { udelay(50); @@ -591,7 +591,7 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) reg_41a &= 0x30; count++; RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, - "Reg41A value %d", reg_41a); + "Reg41A value %d\n", reg_41a); } if (count != 0) RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, @@ -986,7 +986,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8 struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; u8 regulation, bw, channel, rate_section; - char temp_pwrlmt = 0; + s8 temp_pwrlmt = 0; for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) { for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) { @@ -1013,7 +1013,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8 rtlphy->txpwr_limit_5g[regulation][bw][3][channel][RF90_PATH_A]; } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d", temp_pwrlmt); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d\n", temp_pwrlmt); } } } @@ -1155,7 +1155,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211 u8 regulation, bw, channel, rate_section; u8 base_index2_4G = 0; u8 base_index5G = 0; - char temp_value = 0, temp_pwrlmt = 0; + s8 temp_value = 0, temp_pwrlmt = 0; u8 rf_path = 0; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, @@ -1467,11 +1467,11 @@ static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num) return true; } -static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, +static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, u8 band, u8 channel) { struct rtl_priv *rtlpriv = rtl_priv(hw); - char channel_index = -1; + s8 channel_index = -1; u8 i = 0; if (band == BAND_ON_2_4G) @@ -1482,12 +1482,12 @@ static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, channel_index = i; } } else - RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s", + RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s\n", band, __func__); if (channel_index == -1) RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, - "Invalid Channel %d of Band %d in %s", channel, + "Invalid Channel %d of Band %d in %s\n", channel, band, __func__); return channel_index; @@ -1502,7 +1502,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul struct rtl_phy *rtlphy = &rtlpriv->phy; u8 regulation = 0, bandwidth = 0, rate_section = 0, channel; u8 channel_index; - char power_limit = 0, prev_power_limit, ret; + s8 power_limit = 0, prev_power_limit, ret; if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) || !_rtl8812ae_get_integer_from_string((char *)ppower_limit, @@ -1665,7 +1665,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n"); return false; } _rtl8821ae_phy_init_tx_power_by_rate(hw); @@ -1674,7 +1674,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw) BASEBAND_CONFIG_PHY_REG); } if (rtstatus != true) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!"); + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n"); return false; } @@ -2254,9 +2254,9 @@ static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) return in_24g; } -static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) +static s8 _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) { - char rate_section = 0; + s8 rate_section = 0; switch (rate) { case DESC_RATE1M: case DESC_RATE2M: @@ -2338,9 +2338,9 @@ static char _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate) return rate_section; } -static char _rtl8812ae_phy_get_world_wide_limit(char *limit_table) +static s8 _rtl8812ae_phy_get_world_wide_limit(s8 *limit_table) { - char min = limit_table[0]; + s8 min = limit_table[0]; u8 i = 0; for (i = 0; i < MAX_REGULATION_NUM; ++i) { @@ -2350,7 +2350,7 @@ static char _rtl8812ae_phy_get_world_wide_limit(char *limit_table) return min; } -static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, +static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, u8 band, enum ht_channel_width bandwidth, enum radio_path rf_path, @@ -2362,7 +2362,7 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, short band_temp = -1, regulation = -1, bandwidth_temp = -1, rate_section = -1, channel_temp = -1; u16 bd, regu, bdwidth, sec, chnl; - char power_limit = MAX_POWER_INDEX; + s8 power_limit = MAX_POWER_INDEX; if (rtlefuse->eeprom_regulatory == 2) return MAX_POWER_INDEX; @@ -2489,7 +2489,7 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, chnl = channel_temp; if (band == BAND_ON_2_4G) { - char limits[10] = {0}; + s8 limits[10] = {0}; u8 i; for (i = 0; i < 4; ++i) @@ -2501,7 +2501,7 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, rtlphy->txpwr_limit_2_4g[regu][bdwidth] [sec][chnl][rf_path]; } else if (band == BAND_ON_5G) { - char limits[10] = {0}; + s8 limits[10] = {0}; u8 i; for (i = 0; i < MAX_REGULATION_NUM; ++i) @@ -2519,14 +2519,14 @@ static char _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw, return power_limit; } -static char _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, +static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw, u8 band, u8 path, u8 rate) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; u8 shift = 0, rate_section, tx_num; - char tx_pwr_diff = 0; - char limit = 0; + s8 tx_pwr_diff = 0; + s8 limit = 0; rate_section = _rtl8821ae_phy_get_ratesection_intxpower_byrate(path, rate); tx_num = RF_TX_NUM_NONIMPLEMENT; @@ -2639,7 +2639,7 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path, u8 index = (channel - 1); u8 txpower = 0; bool in_24g = false; - char powerdiff_byrate = 0; + s8 powerdiff_byrate = 0; if (((rtlhal->current_bandtype == BAND_ON_2_4G) && (channel > 14 || channel < 1)) || @@ -4637,7 +4637,7 @@ void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw) { } -void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta) +void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) { } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h index c411f0a95..1285e1adf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h @@ -236,7 +236,7 @@ void rtl8821ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); void rtl8812ae_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery); -void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, char delta); +void rtl8821ae_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); void rtl8821ae_phy_lc_calibrate(struct ieee80211_hw *hw); void rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain); bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c index 292253816..c6ab95702 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c @@ -454,7 +454,7 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw) if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "Radio[%d] Fail!!", rfpath); + "Radio[%d] Fail!!\n", rfpath); return false; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 41efaa148..27727186b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -48,7 +48,7 @@ static u8 _rtl8821ae_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue) return skb->priority; } -static u16 odm_cfo(char value) +static u16 odm_cfo(s8 value) { int ret_val; @@ -64,9 +64,9 @@ static u16 odm_cfo(char value) return ret_val; } -static u8 _rtl8821ae_evm_dbm_jaguar(char value) +static u8 _rtl8821ae_evm_dbm_jaguar(s8 value) { - char ret_val = value; + s8 ret_val = value; /* -33dB~0dB to 33dB ~ 0dB*/ if (ret_val == -128) @@ -88,7 +88,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw, struct phy_status_rpt *p_phystrpt = (struct phy_status_rpt *)p_drvinfo; struct rtl_dm *rtldm = rtl_dm(rtl_priv(hw)); struct rtl_phy *rtlphy = &rtlpriv->phy; - char rx_pwr_all = 0, rx_pwr[4]; + s8 rx_pwr_all = 0, rx_pwr[4]; u8 rf_rx_num = 0, evm, evmdbm, pwdb_all; u8 i, max_spatial_stream; u32 rssi, total_rssi = 0; @@ -170,7 +170,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw, pwdb_all = 100; } } else { /* 8821 */ - char pout = -6; + s8 pout = -6; switch (lan_idx) { case 5: @@ -275,7 +275,7 @@ static void query_rxphystatus(struct ieee80211_hw *hw, if (bpacket_match_bssid) { for (i = RF90_PATH_A; i <= RF90_PATH_B; i++) rtl_priv(hw)->dm.cfo_tail[i] = - (char)p_phystrpt->cfotail[i]; + (s8)p_phystrpt->cfotail[i]; rtl_priv(hw)->dm.packet_count++; } @@ -716,7 +716,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_8821ae)); @@ -857,7 +857,7 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, - "DMA mapping error"); + "DMA mapping error\n"); return; } CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index ad565bebf..b6f3c564b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -390,11 +390,11 @@ struct phy_status_rpt { u8 cfosho[4]; /* DW 1 byte 1 DW 2 byte 0 */ /* DWORD 2 */ - char cfotail[4]; /* DW 2 byte 1 DW 3 byte 0 */ + s8 cfotail[4]; /* DW 2 byte 1 DW 3 byte 0 */ /* DWORD 3 */ - char rxevm[2]; /* DW 3 byte 1 DW 3 byte 2 */ - char rxsnr[2]; /* DW 3 byte 3 DW 4 byte 0 */ + s8 rxevm[2]; /* DW 3 byte 1 DW 3 byte 2 */ + s8 rxsnr[2]; /* DW 3 byte 3 DW 4 byte 0 */ /* DWORD 4 */ u8 pcts_msk_rpt[2]; @@ -418,8 +418,8 @@ struct rx_fwinfo_8821ae { u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; - char rxevm[2]; - char rxsnr[4]; + s8 rxevm[2]; + s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.c b/drivers/net/wireless/realtek/rtlwifi/stats.c index d8b30690b..61700fa05 100644 --- a/drivers/net/wireless/realtek/rtlwifi/stats.c +++ b/drivers/net/wireless/realtek/rtlwifi/stats.c @@ -26,7 +26,7 @@ #include "stats.h" #include <linux/export.h> -u8 rtl_query_rxpwrpercentage(char antpower) +u8 rtl_query_rxpwrpercentage(s8 antpower) { if ((antpower <= -100) || (antpower >= 20)) return 0; @@ -37,9 +37,9 @@ u8 rtl_query_rxpwrpercentage(char antpower) } EXPORT_SYMBOL(rtl_query_rxpwrpercentage); -u8 rtl_evm_db_to_percentage(char value) +u8 rtl_evm_db_to_percentage(s8 value) { - char ret_val = clamp(-value, 0, 33) * 3; + s8 ret_val = clamp(-value, 0, 33) * 3; if (ret_val == 99) ret_val = 100; diff --git a/drivers/net/wireless/realtek/rtlwifi/stats.h b/drivers/net/wireless/realtek/rtlwifi/stats.h index 2b57dffef..bd0108f93 100644 --- a/drivers/net/wireless/realtek/rtlwifi/stats.h +++ b/drivers/net/wireless/realtek/rtlwifi/stats.h @@ -33,8 +33,8 @@ /* Rx smooth factor */ #define RX_SMOOTH_FACTOR 20 -u8 rtl_query_rxpwrpercentage(char antpower); -u8 rtl_evm_db_to_percentage(char value); +u8 rtl_query_rxpwrpercentage(s8 antpower); +u8 rtl_evm_db_to_percentage(s8 value); long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig); void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, struct rtl_stats *pstatus); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 4e0ab4d42..c5086c222 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1089,7 +1089,7 @@ struct dynamic_primary_cca { }; struct rtl_regulatory { - char alpha2[2]; + s8 alpha2[2]; u16 country_code; u16 max_power_level; u32 tp_scale; @@ -1256,16 +1256,16 @@ struct rtl_phy { u8 cur_bw20_txpwridx; u8 cur_bw40_txpwridx; - char txpwr_limit_2_4g[MAX_REGULATION_NUM] - [MAX_2_4G_BANDWITH_NUM] - [MAX_RATE_SECTION_NUM] - [CHANNEL_MAX_NUMBER_2G] - [MAX_RF_PATH_NUM]; - char txpwr_limit_5g[MAX_REGULATION_NUM] - [MAX_5G_BANDWITH_NUM] + s8 txpwr_limit_2_4g[MAX_REGULATION_NUM] + [MAX_2_4G_BANDWITH_NUM] [MAX_RATE_SECTION_NUM] - [CHANNEL_MAX_NUMBER_5G] + [CHANNEL_MAX_NUMBER_2G] [MAX_RF_PATH_NUM]; + s8 txpwr_limit_5g[MAX_REGULATION_NUM] + [MAX_5G_BANDWITH_NUM] + [MAX_RATE_SECTION_NUM] + [CHANNEL_MAX_NUMBER_5G] + [MAX_RF_PATH_NUM]; u32 rfreg_chnlval[2]; bool apk_done; @@ -1639,7 +1639,7 @@ struct fast_ant_training { }; struct dm_phy_dbg_info { - char rx_snrdb[4]; + s8 rx_snrdb[4]; u64 num_qry_phy_status; u64 num_qry_phy_status_cck; u64 num_qry_phy_status_ofdm; @@ -1688,16 +1688,16 @@ struct rtl_dm { u8 txpower_track_control; bool interrupt_migration; bool disable_tx_int; - char ofdm_index[MAX_RF_PATH]; + s8 ofdm_index[MAX_RF_PATH]; u8 default_ofdm_index; u8 default_cck_index; - char cck_index; - char delta_power_index[MAX_RF_PATH]; - char delta_power_index_last[MAX_RF_PATH]; - char power_index_offset[MAX_RF_PATH]; - char absolute_ofdm_swing_idx[MAX_RF_PATH]; - char remnant_ofdm_swing_idx[MAX_RF_PATH]; - char remnant_cck_idx; + s8 cck_index; + s8 delta_power_index[MAX_RF_PATH]; + s8 delta_power_index_last[MAX_RF_PATH]; + s8 power_index_offset[MAX_RF_PATH]; + s8 absolute_ofdm_swing_idx[MAX_RF_PATH]; + s8 remnant_ofdm_swing_idx[MAX_RF_PATH]; + s8 remnant_cck_idx; bool modify_txagc_flag_path_a; bool modify_txagc_flag_path_b; @@ -1726,8 +1726,8 @@ struct rtl_dm { u8 swing_idx_cck_base; bool swing_flag_cck; - char swing_diff_2g; - char swing_diff_5g; + s8 swing_diff_2g; + s8 swing_diff_5g; u8 delta_swing_table_idx_24gccka_p[DEL_SW_IDX_SZ]; u8 delta_swing_table_idx_24gccka_n[DEL_SW_IDX_SZ]; @@ -1838,17 +1838,17 @@ struct rtl_efuse { * * Sizes of these arrays are decided by the larger ones. */ - char txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; - char txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; - char txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; - char txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + s8 txpwr_cckdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + s8 txpwr_ht20diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + s8 txpwr_ht40diff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; + s8 txpwr_legacyhtdiff[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; u8 txpwr_5g_bw40base[MAX_RF_PATH][CHANNEL_MAX_NUMBER]; u8 txpwr_5g_bw80base[MAX_RF_PATH][CHANNEL_MAX_NUMBER_5G_80M]; - char txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT]; - char txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT]; - char txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT]; - char txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT]; + s8 txpwr_5g_ofdmdiff[MAX_RF_PATH][MAX_TX_COUNT]; + s8 txpwr_5g_bw20diff[MAX_RF_PATH][MAX_TX_COUNT]; + s8 txpwr_5g_bw40diff[MAX_RF_PATH][MAX_TX_COUNT]; + s8 txpwr_5g_bw80diff[MAX_RF_PATH][MAX_TX_COUNT]; u8 txpwr_safetyflag; /* Band edge enable flag */ u16 eeprom_txpowerdiff; @@ -2006,7 +2006,7 @@ struct rtl_stats { bool is_ht; bool packet_toself; bool packet_beacon; /*for rssi */ - char cck_adc_pwdb[4]; /*for rx path selection */ + s8 cck_adc_pwdb[4]; /*for rx path selection */ bool is_vht; bool is_short_gi; @@ -2413,9 +2413,9 @@ struct dig_t { u8 presta_cstate; u8 curmultista_cstate; u8 stop_dig; - char back_val; - char back_range_max; - char back_range_min; + s8 back_val; + s8 back_range_max; + s8 back_range_min; u8 rx_gain_max; u8 rx_gain_min; u8 min_undec_pwdb_for_dm; @@ -2441,8 +2441,8 @@ struct dig_t { u8 cur_cs_ratiostate; u8 pre_cs_ratiostate; u8 backoff_enable_flag; - char backoffval_range_max; - char backoffval_range_min; + s8 backoffval_range_max; + s8 backoffval_range_min; u8 dig_min_0; u8 dig_min_1; u8 bt30_cur_igi; diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 569918c48..603c90470 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2134,6 +2134,7 @@ static void rndis_get_scan_results(struct work_struct *work) struct rndis_wlan_private *priv = container_of(work, struct rndis_wlan_private, scan_work.work); struct usbnet *usbdev = priv->usbdev; + struct cfg80211_scan_info info = {}; int ret; netdev_dbg(usbdev->net, "get_scan_results\n"); @@ -2143,7 +2144,8 @@ static void rndis_get_scan_results(struct work_struct *work) ret = rndis_check_bssid_list(usbdev, NULL, NULL); - cfg80211_scan_done(priv->scan_request, ret < 0); + info.aborted = ret < 0; + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; } @@ -3574,7 +3576,11 @@ static int rndis_wlan_stop(struct usbnet *usbdev) flush_workqueue(priv->workqueue); if (priv->scan_request) { - cfg80211_scan_done(priv->scan_request, true); + struct cfg80211_scan_info info = { + .aborted = true, + }; + + cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; } diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 40658b62d..35c14cc3f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -398,7 +398,7 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, return -ENOLINK; msg_len -= pad_bytes; - if ((msg_len <= 0) || (!msg)) { + if (msg_len <= 0) { rsi_dbg(MGMT_RX_ZONE, "%s: Invalid rx msg of len = %d\n", __func__, msg_len); diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index 983788156..0a0ff7e31 100644 --- a/drivers/net/wireless/st/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c @@ -167,6 +167,10 @@ void cw1200_scan_work(struct work_struct *work) } if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { + struct cfg80211_scan_info info = { + .aborted = priv->scan.status ? 1 : 0, + }; + if (priv->scan.output_power != priv->output_power) wsm_set_output_power(priv, priv->output_power * 10); if (priv->join_status == CW1200_JOIN_STATUS_STA && @@ -188,7 +192,7 @@ void cw1200_scan_work(struct work_struct *work) cw1200_scan_restart_delayed(priv); wsm_unlock_tx(priv); mutex_unlock(&priv->conf_mutex); - ieee80211_scan_completed(priv->hw, priv->scan.status ? 1 : 0); + ieee80211_scan_completed(priv->hw, &info); up(&priv->scan.lock); return; } else { diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c index c98630394..d0593bc1f 100644 --- a/drivers/net/wireless/ti/wl1251/event.c +++ b/drivers/net/wireless/ti/wl1251/event.c @@ -36,7 +36,11 @@ static int wl1251_event_scan_complete(struct wl1251 *wl, mbox->scheduled_scan_channels); if (wl->scanning) { - ieee80211_scan_completed(wl->hw, false); + struct cfg80211_scan_info info = { + .aborted = false, + }; + + ieee80211_scan_completed(wl->hw, &info); wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed"); wl->scanning = false; if (wl->hw->conf.flags & IEEE80211_CONF_IDLE) diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index e74d60dad..ea81be2c6 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -448,7 +448,11 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) WARN_ON(wl->state != WL1251_STATE_ON); if (wl->scanning) { - ieee80211_scan_completed(wl->hw, true); + struct cfg80211_scan_info info = { + .aborted = true, + }; + + ieee80211_scan_completed(wl->hw, &info); wl->scanning = false; } diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index ef811848d..2c5df43b8 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -112,12 +112,18 @@ static int wlcore_smart_config_decode_event(struct wl1271 *wl, return 0; } -static void wlcore_event_time_sync(struct wl1271 *wl, u16 tsf_msb, u16 tsf_lsb) +static void wlcore_event_time_sync(struct wl1271 *wl, + u16 tsf_high_msb, u16 tsf_high_lsb, + u16 tsf_low_msb, u16 tsf_low_lsb) { - u32 clock; - /* convert the MSB+LSB to a u32 TSF value */ - clock = (tsf_msb << 16) | tsf_lsb; - wl1271_info("TIME_SYNC_EVENT_ID: clock %u", clock); + u32 clock_low; + u32 clock_high; + + clock_high = (tsf_high_msb << 16) | tsf_high_lsb; + clock_low = (tsf_low_msb << 16) | tsf_low_lsb; + + wl1271_info("TIME_SYNC_EVENT_ID: clock_high %u, clock low %u", + clock_high, clock_low); } int wl18xx_process_mailbox_events(struct wl1271 *wl) @@ -138,8 +144,10 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) if (vector & TIME_SYNC_EVENT_ID) wlcore_event_time_sync(wl, - mbox->time_sync_tsf_msb, - mbox->time_sync_tsf_lsb); + mbox->time_sync_tsf_high_msb, + mbox->time_sync_tsf_high_lsb, + mbox->time_sync_tsf_low_msb, + mbox->time_sync_tsf_low_lsb); if (vector & RADAR_DETECTED_EVENT_ID) { wl1271_info("radar event: channel %d type %s", @@ -187,11 +195,11 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) */ if (vector & MAX_TX_FAILURE_EVENT_ID) wlcore_event_max_tx_failure(wl, - le32_to_cpu(mbox->tx_retry_exceeded_bitmap)); + le16_to_cpu(mbox->tx_retry_exceeded_bitmap)); if (vector & INACTIVE_STA_EVENT_ID) wlcore_event_inactive_sta(wl, - le32_to_cpu(mbox->inactive_sta_bitmap)); + le16_to_cpu(mbox->inactive_sta_bitmap)); if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID) wlcore_event_roc_complete(wl); diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h index 070de1274..ce8ea9c04 100644 --- a/drivers/net/wireless/ti/wl18xx/event.h +++ b/drivers/net/wireless/ti/wl18xx/event.h @@ -74,10 +74,16 @@ struct wl18xx_event_mailbox { __le16 bss_loss_bitmap; /* bitmap of stations (by HLID) which exceeded max tx retries */ - __le32 tx_retry_exceeded_bitmap; + __le16 tx_retry_exceeded_bitmap; + + /* time sync high msb*/ + __le16 time_sync_tsf_high_msb; /* bitmap of inactive stations (by HLID) */ - __le32 inactive_sta_bitmap; + __le16 inactive_sta_bitmap; + + /* time sync high lsb*/ + __le16 time_sync_tsf_high_lsb; /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */ u8 rx_ba_role_id; @@ -98,14 +104,15 @@ struct wl18xx_event_mailbox { u8 sc_sync_channel; u8 sc_sync_band; - /* time sync msb*/ - u16 time_sync_tsf_msb; + /* time sync low msb*/ + __le16 time_sync_tsf_low_msb; + /* radar detect */ u8 radar_channel; u8 radar_type; - /* time sync lsb*/ - u16 time_sync_tsf_lsb; + /* time sync low lsb*/ + __le16 time_sync_tsf_low_lsb; } __packed; diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index fc02c27bc..3f451467f 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -1214,6 +1214,10 @@ static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status, int_fw_status->counters.tx_voice_released_blks; fw_status->counters.tx_last_rate = int_fw_status->counters.tx_last_rate; + fw_status->counters.tx_last_rate_mbps = + int_fw_status->counters.tx_last_rate_mbps; + fw_status->counters.hlid = + int_fw_status->counters.hlid; fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr); @@ -1821,9 +1825,12 @@ static const struct ieee80211_iface_limit wl18xx_iface_limits[] = { }, { .max = 1, - .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_P2P_CLIENT), + .types = BIT(NL80211_IFTYPE_AP) + | BIT(NL80211_IFTYPE_P2P_GO) + | BIT(NL80211_IFTYPE_P2P_CLIENT) +#ifdef CONFIG_MAC80211_MESH + | BIT(NL80211_IFTYPE_MESH_POINT) +#endif }, { .max = 1, @@ -1836,6 +1843,12 @@ static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = { .max = 2, .types = BIT(NL80211_IFTYPE_AP), }, +#ifdef CONFIG_MAC80211_MESH + { + .max = 1, + .types = BIT(NL80211_IFTYPE_MESH_POINT), + }, +#endif { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE), diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c index ebaf66ef3..876aef10f 100644 --- a/drivers/net/wireless/ti/wl18xx/tx.c +++ b/drivers/net/wireless/ti/wl18xx/tx.c @@ -30,9 +30,9 @@ static void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif, - u8 band, struct ieee80211_tx_rate *rate) + u8 band, struct ieee80211_tx_rate *rate, u8 hlid) { - u8 fw_rate = wl->fw_status->counters.tx_last_rate; + u8 fw_rate = wl->links[hlid].fw_rate_idx; if (fw_rate > CONF_HW_RATE_INDEX_MAX) { wl1271_error("last Tx rate invalid: %d", fw_rate); @@ -79,6 +79,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) struct sk_buff *skb; int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK; bool tx_success; + struct wl1271_tx_hw_descr *tx_desc; /* check for id legality */ if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { @@ -91,6 +92,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) skb = wl->tx_frames[id]; info = IEEE80211_SKB_CB(skb); + tx_desc = (struct wl1271_tx_hw_descr *)skb->data; if (wl12xx_is_dummy_packet(wl, skb)) { wl1271_free_tx_id(wl, id); @@ -105,7 +107,9 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) * the info->status structures */ wl18xx_get_last_tx_rate(wl, info->control.vif, - info->band, &info->status.rates[0]); + info->band, + &info->status.rates[0], + tx_desc->hlid); info->status.rates[0].count = 1; /* no data about retries */ info->status.ack_signal = -1; @@ -144,12 +148,22 @@ void wl18xx_tx_immediate_complete(struct wl1271 *wl) struct wl18xx_fw_status_priv *status_priv = (struct wl18xx_fw_status_priv *)wl->fw_status->priv; struct wl18xx_priv *priv = wl->priv; - u8 i; + u8 i, hlid; /* nothing to do here */ if (priv->last_fw_rls_idx == status_priv->fw_release_idx) return; + /* update rates per link */ + hlid = wl->fw_status->counters.hlid; + + if (hlid < WLCORE_MAX_LINKS) { + wl->links[hlid].fw_rate_idx = + wl->fw_status->counters.tx_last_rate; + wl->links[hlid].fw_rate_mbps = + wl->fw_status->counters.tx_last_rate_mbps; + } + /* freed Tx descriptors */ wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d", priv->last_fw_rls_idx, status_priv->fw_release_idx); diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h index 71e9e382c..5371cbdd5 100644 --- a/drivers/net/wireless/ti/wl18xx/wl18xx.h +++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h @@ -29,7 +29,7 @@ #define WL18XX_IFTYPE_VER 9 #define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE #define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE -#define WL18XX_MINOR_VER 11 +#define WL18XX_MINOR_VER 58 #define WL18XX_CMD_MAX_SIZE 740 @@ -125,7 +125,11 @@ struct wl18xx_fw_packet_counters { /* Tx rate of the last transmitted packet */ u8 tx_last_rate; - u8 padding[2]; + /* Tx rate or Tx rate estimate pre-calculated by fw in mbps units */ + u8 tx_last_rate_mbps; + + /* hlid for which the rates were reported */ + u8 hlid; } __packed; /* FW status registers */ diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h index 0d61fae88..6321ed472 100644 --- a/drivers/net/wireless/ti/wlcore/acx.h +++ b/drivers/net/wireless/ti/wlcore/acx.h @@ -105,6 +105,7 @@ enum wl12xx_role { WL1271_ROLE_DEVICE, WL1271_ROLE_P2P_CL, WL1271_ROLE_P2P_GO, + WL1271_ROLE_MESH_POINT, WL12XX_INVALID_ROLE_TYPE = 0xff }; diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c index 19b7ec7b6..f75d30444 100644 --- a/drivers/net/wireless/ti/wlcore/boot.c +++ b/drivers/net/wireless/ti/wlcore/boot.c @@ -130,7 +130,7 @@ fail: wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n" "Please use at least FW %s\n" "You can get the latest firmwares at:\n" - "git://github.com/TI-OpenLink/firmwares.git", + "git://git.ti.com/wilink8-wlan/wl18xx_fw.git", fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE], fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE], fw_ver[FW_VER_MINOR], min_fw_str); diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 33153565a..7f4da727b 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -629,11 +629,14 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif) wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id); - /* trying to use hidden SSID with an old hostapd version */ - if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) { - wl1271_error("got a null SSID from beacon/bss"); - ret = -EINVAL; - goto out; + /* If MESH --> ssid_len is always 0 */ + if (!ieee80211_vif_is_mesh(vif)) { + /* trying to use hidden SSID with an old hostapd version */ + if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) { + wl1271_error("got a null SSID from beacon/bss"); + ret = -EINVAL; + goto out; + } } cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); @@ -1566,6 +1569,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif, cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates, wlvif->band)); + if (!cmd->supported_rates) { + wl1271_debug(DEBUG_CMD, + "peer has no supported rates yet, configuring basic rates: 0x%x", + wlvif->basic_rate_set); + cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set); + } + wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x", cmd->supported_rates, sta->uapsd_queues); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 2509c0050..04e907660 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -221,6 +221,7 @@ static void wlcore_rc_update_work(struct work_struct *work) struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, rc_update_work); struct wl1271 *wl = wlvif->wl; + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); mutex_lock(&wl->mutex); @@ -231,8 +232,16 @@ static void wlcore_rc_update_work(struct work_struct *work) if (ret < 0) goto out; - wlcore_hw_sta_rc_update(wl, wlvif); + if (ieee80211_vif_is_mesh(vif)) { + ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap, + true, wlvif->sta.hlid); + if (ret < 0) + goto out_sleep; + } else { + wlcore_hw_sta_rc_update(wl, wlvif); + } +out_sleep: wl1271_ps_elp_sleep(wl); out: mutex_unlock(&wl->mutex); @@ -2153,10 +2162,14 @@ static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx) static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) { + struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); + switch (wlvif->bss_type) { case BSS_TYPE_AP_BSS: if (wlvif->p2p) return WL1271_ROLE_P2P_GO; + else if (ieee80211_vif_is_mesh(vif)) + return WL1271_ROLE_MESH_POINT; else return WL1271_ROLE_AP; @@ -2198,6 +2211,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) wlvif->p2p = 1; /* fall-through */ case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_MESH_POINT: wlvif->bss_type = BSS_TYPE_AP_BSS; break; default: @@ -2615,6 +2629,10 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, if (wl->scan.state != WL1271_SCAN_STATE_IDLE && wl->scan_wlvif == wlvif) { + struct cfg80211_scan_info info = { + .aborted = true, + }; + /* * Rearm the tx watchdog just before idling scan. This * prevents just-finished scans from triggering the watchdog @@ -2625,7 +2643,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl, memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan_wlvif = NULL; wl->scan.req = NULL; - ieee80211_scan_completed(wl->hw, true); + ieee80211_scan_completed(wl->hw, &info); } if (wl->sched_vif == wlvif) @@ -3649,6 +3667,9 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); + struct cfg80211_scan_info info = { + .aborted = true, + }; int ret; wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); @@ -3681,7 +3702,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); wl->scan_wlvif = NULL; wl->scan.req = NULL; - ieee80211_scan_completed(wl->hw, true); + ieee80211_scan_completed(wl->hw, &info); out_sleep: wl1271_ps_elp_sleep(wl); @@ -4124,9 +4145,14 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl, if (ret < 0) goto out; - ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif); - if (ret < 0) - goto out; + /* No need to set probe resp template for mesh */ + if (!ieee80211_vif_is_mesh(vif)) { + ret = wl1271_ap_set_probe_resp_tmpl(wl, + wlvif->basic_rate, + vif); + if (ret < 0) + goto out; + } ret = wlcore_set_beacon_template(wl, vif, true); if (ret < 0) @@ -4960,6 +4986,7 @@ static int wl12xx_sta_add(struct wl1271 *wl, return ret; wl_sta = (struct wl1271_station *)sta->drv_priv; + wl_sta->wl = wl; hlid = wl_sta->hlid; ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); @@ -5091,6 +5118,11 @@ static int wl12xx_update_sta_state(struct wl1271 *wl, if (ret < 0) return ret; + /* reconfigure rates */ + ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid); + if (ret < 0) + return ret; + ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true, wl_sta->hlid); if (ret) @@ -5629,6 +5661,7 @@ static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw, /* this callback is atomic, so schedule a new work */ wlvif->rc_update_bw = sta->bandwidth; + memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap)); ieee80211_queue_work(hw, &wlvif->rc_update_work); } @@ -5667,6 +5700,17 @@ out: mutex_unlock(&wl->mutex); } +static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) +{ + struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; + struct wl1271 *wl = hw->priv; + u8 hlid = wl_sta->hlid; + + /* return in units of Kbps */ + return (wl->links[hlid].fw_rate_mbps * 1000); +} + static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) { struct wl1271 *wl = hw->priv; @@ -5867,6 +5911,7 @@ static const struct ieee80211_ops wl1271_ops = { .switch_vif_chanctx = wlcore_op_switch_vif_chanctx, .sta_rc_update = wlcore_op_sta_rc_update, .sta_statistics = wlcore_op_sta_statistics, + .get_expected_throughput = wlcore_op_get_expected_throughput, CFG80211_TESTMODE_CMD(wl1271_tm_cmd) }; @@ -6050,7 +6095,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_DEVICE) | BIT(NL80211_IFTYPE_P2P_CLIENT) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif BIT(NL80211_IFTYPE_P2P_GO); + wl->hw->wiphy->max_scan_ssids = 1; wl->hw->wiphy->max_sched_scan_ssids = 16; wl->hw->wiphy->max_match_sets = 16; diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c index c9bd294a0..b9e140451 100644 --- a/drivers/net/wireless/ti/wlcore/rx.c +++ b/drivers/net/wireless/ti/wlcore/rx.c @@ -222,6 +222,13 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status) enum wl_rx_buf_align rx_align; int ret = 0; + /* update rates per link */ + hlid = status->counters.hlid; + + if (hlid < WLCORE_MAX_LINKS) + wl->links[hlid].fw_rate_mbps = + status->counters.tx_last_rate_mbps; + while (drv_rx_counter != fw_rx_counter) { buf_size = 0; rx_counter = drv_rx_counter; diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c index 233436432..5612f5916 100644 --- a/drivers/net/wireless/ti/wlcore/scan.c +++ b/drivers/net/wireless/ti/wlcore/scan.c @@ -36,6 +36,9 @@ void wl1271_scan_complete_work(struct work_struct *work) struct delayed_work *dwork; struct wl1271 *wl; struct wl12xx_vif *wlvif; + struct cfg80211_scan_info info = { + .aborted = false, + }; int ret; dwork = to_delayed_work(work); @@ -82,7 +85,7 @@ void wl1271_scan_complete_work(struct work_struct *work) wlcore_cmd_regdomain_config_locked(wl); - ieee80211_scan_completed(wl->hw, false); + ieee80211_scan_completed(wl->hw, &info); out: mutex_unlock(&wl->mutex); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index c172da56b..5839acbbc 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -241,7 +241,6 @@ static int wlcore_probe_of(struct device *dev, int *irq, *irq = irq_of_parse_and_map(np, 0); if (!*irq) { dev_err(dev, "No irq in platform data\n"); - kfree(pdev_data); return -EINVAL; } diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index cea9443c2..6d2404088 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -70,16 +70,30 @@ #define WSPI_MAX_CHUNK_SIZE 4092 /* - * only support SPI for 12xx - this code should be reworked when 18xx - * support is introduced + * wl18xx driver aggregation buffer size is (13 * PAGE_SIZE) compared to + * (4 * PAGE_SIZE) for wl12xx, so use the larger buffer needed for wl18xx */ -#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) +#define SPI_AGGR_BUFFER_SIZE (13 * PAGE_SIZE) /* Maximum number of SPI write chunks */ #define WSPI_MAX_NUM_OF_CHUNKS \ ((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1) +struct wilink_familiy_data { + char name[8]; +}; + +const struct wilink_familiy_data *wilink_data; + +static const struct wilink_familiy_data wl18xx_data = { + .name = "wl18xx", +}; + +static const struct wilink_familiy_data wl12xx_data = { + .name = "wl12xx", +}; + struct wl12xx_spi_glue { struct device *dev; struct platform_device *core; @@ -119,6 +133,7 @@ static void wl12xx_spi_init(struct device *child) struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); struct spi_transfer t; struct spi_message m; + struct spi_device *spi = to_spi_device(glue->dev); u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { @@ -151,6 +166,7 @@ static void wl12xx_spi_init(struct device *child) cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY; cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END; + /* * The above is the logical order; it must actually be stored * in the buffer byte-swapped. @@ -163,6 +179,28 @@ static void wl12xx_spi_init(struct device *child) spi_message_add_tail(&t, &m); spi_sync(to_spi_device(glue->dev), &m); + + /* Send extra clocks with inverted CS (high). this is required + * by the wilink family in order to successfully enter WSPI mode. + */ + spi->mode ^= SPI_CS_HIGH; + memset(&m, 0, sizeof(m)); + spi_message_init(&m); + + cmd[0] = 0xff; + cmd[1] = 0xff; + cmd[2] = 0xff; + cmd[3] = 0xff; + __swab32s((u32 *)cmd); + + t.tx_buf = cmd; + t.len = 4; + spi_message_add_tail(&t, &m); + + spi_sync(to_spi_device(glue->dev), &m); + + /* Restore chip select configration to normal */ + spi->mode ^= SPI_CS_HIGH; kfree(cmd); } @@ -270,22 +308,25 @@ static int __must_check wl12xx_spi_raw_read(struct device *child, int addr, return 0; } -static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, - void *buf, size_t len, bool fixed) +static int __wl12xx_spi_raw_write(struct device *child, int addr, + void *buf, size_t len, bool fixed) { struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); - /* SPI write buffers - 2 for each chunk */ - struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS]; + struct spi_transfer *t; struct spi_message m; u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */ u32 *cmd; u32 chunk_len; int i; + /* SPI write buffers - 2 for each chunk */ + t = kzalloc(sizeof(*t) * 2 * WSPI_MAX_NUM_OF_CHUNKS, GFP_KERNEL); + if (!t) + return -ENOMEM; + WARN_ON(len > SPI_AGGR_BUFFER_SIZE); spi_message_init(&m); - memset(t, 0, sizeof(t)); cmd = &commands[0]; i = 0; @@ -318,9 +359,26 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, spi_sync(to_spi_device(glue->dev), &m); + kfree(t); return 0; } +static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, + void *buf, size_t len, bool fixed) +{ + int ret; + + /* The ELP wakeup write may fail the first time due to internal + * hardware latency. It is safer to send the wakeup command twice to + * avoid unexpected failures. + */ + if (addr == HW_ACCESS_ELP_CTRL_REG) + ret = __wl12xx_spi_raw_write(child, addr, buf, len, fixed); + ret = __wl12xx_spi_raw_write(child, addr, buf, len, fixed); + + return ret; +} + /** * wl12xx_spi_set_power - power on/off the wl12xx unit * @child: wl12xx device handle. @@ -349,17 +407,38 @@ static int wl12xx_spi_set_power(struct device *child, bool enable) return ret; } +/** + * wl12xx_spi_set_block_size + * + * This function is not needed for spi mode, but need to be present. + * Without it defined the wlcore fallback to use the wrong packet + * allignment on tx. + */ +static void wl12xx_spi_set_block_size(struct device *child, + unsigned int blksz) +{ +} + static struct wl1271_if_operations spi_ops = { .read = wl12xx_spi_raw_read, .write = wl12xx_spi_raw_write, .reset = wl12xx_spi_reset, .init = wl12xx_spi_init, .power = wl12xx_spi_set_power, - .set_block_size = NULL, + .set_block_size = wl12xx_spi_set_block_size, }; static const struct of_device_id wlcore_spi_of_match_table[] = { - { .compatible = "ti,wl1271" }, + { .compatible = "ti,wl1271", .data = &wl12xx_data}, + { .compatible = "ti,wl1273", .data = &wl12xx_data}, + { .compatible = "ti,wl1281", .data = &wl12xx_data}, + { .compatible = "ti,wl1283", .data = &wl12xx_data}, + { .compatible = "ti,wl1801", .data = &wl18xx_data}, + { .compatible = "ti,wl1805", .data = &wl18xx_data}, + { .compatible = "ti,wl1807", .data = &wl18xx_data}, + { .compatible = "ti,wl1831", .data = &wl18xx_data}, + { .compatible = "ti,wl1835", .data = &wl18xx_data}, + { .compatible = "ti,wl1837", .data = &wl18xx_data}, { } }; MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table); @@ -375,18 +454,24 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, struct wlcore_platdev_data *pdev_data) { struct device_node *dt_node = spi->dev.of_node; - int ret; + const struct of_device_id *of_id; + + of_id = of_match_node(wlcore_spi_of_match_table, dt_node); + if (!of_id) + return -ENODEV; + + wilink_data = of_id->data; + dev_info(&spi->dev, "selected chip familiy is %s\n", + wilink_data->name); if (of_find_property(dt_node, "clock-xtal", NULL)) pdev_data->ref_clock_xtal = true; - ret = of_property_read_u32(dt_node, "ref-clock-frequency", - &pdev_data->ref_clock_freq); - if (ret) { - dev_err(glue->dev, - "can't get reference clock frequency (%d)\n", ret); - return ret; - } + /* optional clock frequency params */ + of_property_read_u32(dt_node, "ref-clock-frequency", + &pdev_data->ref_clock_freq); + of_property_read_u32(dt_node, "tcxo-clock-frequency", + &pdev_data->tcxo_clock_freq); return 0; } @@ -437,7 +522,8 @@ static int wl1271_probe(struct spi_device *spi) return ret; } - glue->core = platform_device_alloc("wl12xx", PLATFORM_DEVID_AUTO); + glue->core = platform_device_alloc(wilink_data->name, + PLATFORM_DEVID_AUTO); if (!glue->core) { dev_err(glue->dev, "can't allocate platform_device\n"); return -ENOMEM; diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index 7d43b8057..d274b0b13 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -171,6 +171,12 @@ struct wl_fw_status { /* Tx rate of the last transmitted packet */ u8 tx_last_rate; + + /* Tx rate or Tx rate estimate pre calculated by fw in mbps */ + u8 tx_last_rate_mbps; + + /* hlid for which the rates were reported */ + u8 hlid; } counters; u32 log_start_addr; @@ -273,6 +279,12 @@ struct wl1271_link { /* bitmap of TIDs where RX BA sessions are active for this link */ u8 ba_bitmap; + /* the last fw rate index we used for this link */ + u8 fw_rate_idx; + + /* the last fw rate [Mbps] we used for this link */ + u8 fw_rate_mbps; + /* The wlvif this link belongs to. Might be null for global links */ struct wl12xx_vif *wlvif; @@ -335,6 +347,7 @@ struct wl1271_station { * Used in both AP and STA mode. */ u64 total_freed_pkts; + struct wl1271 *wl; }; struct wl12xx_vif { @@ -472,6 +485,7 @@ struct wl12xx_vif { /* update rate conrol */ enum ieee80211_sta_rx_bandwidth rc_update_bw; + struct ieee80211_sta_ht_cap rc_ht_cap; struct work_struct rc_update_work; /* diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index 13fd734b6..82d94f83b 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -378,8 +378,7 @@ static int wl3501_esbq_exec(struct wl3501_card *this, void *sig, int sig_size) return rc; } -static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, - void *bf, int size) +static int wl3501_request_mib(struct wl3501_card *this, u8 index, void *bf) { struct wl3501_get_req sig = { .sig_id = WL3501_SIG_GET_REQ, @@ -395,20 +394,32 @@ static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, wl3501_set_to_wla(this, ptr, &sig, sizeof(sig)); wl3501_esbq_req(this, &ptr); this->sig_get_confirm.mib_status = 255; - spin_unlock_irqrestore(&this->lock, flags); - rc = wait_event_interruptible(this->wait, - this->sig_get_confirm.mib_status != 255); - if (!rc) - memcpy(bf, this->sig_get_confirm.mib_value, - size); - goto out; + rc = 0; } } spin_unlock_irqrestore(&this->lock, flags); -out: + return rc; } +static int wl3501_get_mib_value(struct wl3501_card *this, u8 index, + void *bf, int size) +{ + int rc; + + rc = wl3501_request_mib(this, index, bf); + if (rc) + return rc; + + rc = wait_event_interruptible(this->wait, + this->sig_get_confirm.mib_status != 255); + if (rc) + return rc; + + memcpy(bf, this->sig_get_confirm.mib_value, size); + return 0; +} + static int wl3501_pwr_mgmt(struct wl3501_card *this, int suspend) { struct wl3501_pwr_mgmt_req sig = { diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 6a31f2610..daf4c7867 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev, be->dev = dev; dev_set_drvdata(&dev->dev, be); + be->state = XenbusStateInitialising; + err = xenbus_switch_state(dev, XenbusStateInitialising); + if (err) + goto fail; + sg = 1; do { @@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev, be->hotplug_script = script; - err = xenbus_switch_state(dev, XenbusStateInitWait); - if (err) - goto fail; - - be->state = XenbusStateInitWait; /* This kicks hotplug scripts, so do it immediately. */ err = backend_create_xenvif(be); @@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be, /* Handle backend state transitions: * - * The backend state starts in InitWait and the following transitions are + * The backend state starts in Initialising and the following transitions are * allowed. * - * InitWait -> Connected - * - * ^ \ | - * | \ | - * | \ | - * | \ | - * | \ | - * | \ | - * | V V + * Initialising -> InitWait -> Connected + * \ + * \ ^ \ | + * \ | \ | + * \ | \ | + * \ | \ | + * \ | \ | + * \ | \ | + * V | V V * - * Closed <-> Closing + * Closed <-> Closing * * The state argument specifies the eventual state of the backend and the * function transitions to that state via the shortest path. @@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be, { while (be->state != state) { switch (be->state) { + case XenbusStateInitialising: + switch (state) { + case XenbusStateInitWait: + case XenbusStateConnected: + case XenbusStateClosing: + backend_switch_state(be, XenbusStateInitWait); + break; + case XenbusStateClosed: + backend_switch_state(be, XenbusStateClosed); + break; + default: + BUG(); + } + break; case XenbusStateClosed: switch (state) { case XenbusStateInitWait: |