From b4b7ff4b08e691656c9d77c758fc355833128ac0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fabian=20Silva=20Delgado?= Date: Wed, 20 Jan 2016 14:01:31 -0300 Subject: Linux-libre 4.4-gnu --- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 11 + drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 + drivers/net/ethernet/mellanox/mlxsw/cmd.h | 51 +- drivers/net/ethernet/mellanox/mlxsw/core.c | 7 +- drivers/net/ethernet/mellanox/mlxsw/core.h | 5 + drivers/net/ethernet/mellanox/mlxsw/item.h | 50 +- drivers/net/ethernet/mellanox/mlxsw/pci.c | 63 +- drivers/net/ethernet/mellanox/mlxsw/pci.h | 5 +- drivers/net/ethernet/mellanox/mlxsw/reg.h | 1263 ++++++++++++- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1949 ++++++++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 122 ++ .../net/ethernet/mellanox/mlxsw/spectrum_buffers.c | 422 +++++ .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 903 +++++++++ drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 41 +- drivers/net/ethernet/mellanox/mlxsw/txheader.h | 1 + 15 files changed, 4745 insertions(+), 151 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum.h create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c (limited to 'drivers/net/ethernet/mellanox/mlxsw') diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 2941d9c5a..e36e12219 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -30,3 +30,14 @@ config MLXSW_SWITCHX2 To compile this driver as a module, choose M here: the module will be called mlxsw_switchx2. + +config MLXSW_SPECTRUM + tristate "Mellanox Technologies Spectrum support" + depends on MLXSW_CORE && NET_SWITCHDEV + default m + ---help--- + This driver supports Mellanox Technologies Spectrum Ethernet + Switch ASICs. + + To compile this driver as a module, choose M here: the + module will be called mlxsw_spectrum. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 0a05f65ee..af015818f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -4,3 +4,6 @@ obj-$(CONFIG_MLXSW_PCI) += mlxsw_pci.o mlxsw_pci-objs := pci.o obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o mlxsw_switchx2-objs := switchx2.o +obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o +mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ + spectrum_switchdev.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h index 770db17eb..cd63b8263 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h +++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h @@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8); * passed in this command must be pinned. */ +#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32 + static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core, char *in_mbox, u32 vpm_entries_count) { @@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1); */ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1); -/* cmd_mbox_config_profile_set_fid_based +/* cmd_mbox_config_profile_set_flood_mode * Capability bit. Setting a bit to 1 configures the profile * according to the mailbox contents. */ @@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12); MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16); /* cmd_mbox_config_profile_max_flood_tables - * Maximum number of Flooding Tables. Flooding Tables are associated to - * the different packet types for the different switch partitions. - * Note that the table size depends on the fid_based mode. - * In SwitchX silicon, tables are split equally between the switch - * partitions. e.g. for 2 swids and 8 tables, the first 4 are associated - * with swid-1 and the last 4 are associated with swid-2. + * Maximum number of single-entry flooding tables. Different flooding tables + * can be associated with different packet types. */ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); @@ -665,15 +663,42 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4); */ MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4); -/* cmd_mbox_config_profile_fid_based - * FID Based Flood Mode - * 00 Do not use FID to offset the index into the Port Group Table/Multicast ID - * 01 Use FID to offset the index to the Port Group Table (pgi) - * 10 Use FID to offset the index to the Port Group Table (pgi) and - * the Multicast ID +/* cmd_mbox_config_profile_flood_mode + * Flooding mode to use. + * 0-2 - Backward compatible modes for SwitchX devices. + * 3 - Mixed mode, where: + * max_flood_tables indicates the number of single-entry tables. + * max_vid_flood_tables indicates the number of per-VID tables. + * max_fid_offset_flood_tables indicates the number of FID-offset tables. + * max_fid_flood_tables indicates the number of per-FID tables. */ MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2); +/* cmd_mbox_config_profile_max_fid_offset_flood_tables + * Maximum number of FID-offset flooding tables. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, + max_fid_offset_flood_tables, 0x34, 24, 4); + +/* cmd_mbox_config_profile_fid_offset_flood_table_size + * The size (number of entries) of each FID-offset flood table. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, + fid_offset_flood_table_size, 0x34, 0, 16); + +/* cmd_mbox_config_profile_max_fid_flood_tables + * Maximum number of per-FID flooding tables. + * + * Note: This flooding tables cover special FIDs only (vFIDs), starting at + * FID value 4K and higher. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, max_fid_flood_tables, 0x38, 24, 4); + +/* cmd_mbox_config_profile_fid_flood_table_size + * The size (number of entries) of each per-FID table. + */ +MLXSW_ITEM32(cmd_mbox, config_profile, fid_flood_table_size, 0x38, 0, 16); + /* cmd_mbox_config_profile_max_ib_mc * Maximum number of multicast FDB records for InfiniBand * FDB (in 512 chunks) per InfiniBand switch partition. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 28c19cc1a..97f0d93ca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -287,7 +287,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv, mlxsw_emad_op_tlv_status_set(op_tlv, 0); mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id); mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST); - if (MLXSW_CORE_REG_ACCESS_TYPE_QUERY == type) + if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY) mlxsw_emad_op_tlv_method_set(op_tlv, MLXSW_EMAD_OP_TLV_METHOD_QUERY); else @@ -362,7 +362,7 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb) char *op_tlv; op_tlv = mlxsw_emad_op_tlv(skb); - return (MLXSW_EMAD_OP_TLV_RESPONSE == mlxsw_emad_op_tlv_r_get(op_tlv)); + return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); } #define MLXSW_EMAD_TIMEOUT_MS 200 @@ -511,7 +511,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) return err; mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, - MLXSW_REG_HTGT_TRAP_GROUP_EMAD, MLXSW_TRAP_ID_ETHEMAD); return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); } @@ -556,8 +555,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) { char hpkt_pl[MLXSW_REG_HPKT_LEN]; + mlxsw_core->emad.use_emad = false; mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD, - MLXSW_REG_HTGT_TRAP_GROUP_EMAD, MLXSW_TRAP_ID_ETHEMAD); mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 165808471..807827350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -54,6 +54,7 @@ MODULE_ALIAS(MLXSW_MODULE_ALIAS_PREFIX kind) #define MLXSW_DEVICE_KIND_SWITCHX2 "switchx2" +#define MLXSW_DEVICE_KIND_SPECTRUM "spectrum" struct mlxsw_core; struct mlxsw_driver; @@ -153,6 +154,10 @@ struct mlxsw_config_profile { u8 max_flood_tables; u8 max_vid_flood_tables; u8 flood_mode; + u8 max_fid_offset_flood_tables; + u16 fid_offset_flood_table_size; + u8 max_fid_flood_tables; + u16 fid_flood_table_size; u16 max_ib_mc; u16 max_pkey; u8 ar_sec; diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index 36fb1cec5..a94dbda65 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -171,15 +171,21 @@ static inline void __mlxsw_item_set64(char *buf, struct mlxsw_item *item, } static inline void __mlxsw_item_memcpy_from(char *buf, char *dst, - struct mlxsw_item *item) + struct mlxsw_item *item, + unsigned short index) { - memcpy(dst, &buf[item->offset], item->size.bytes); + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + memcpy(dst, &buf[offset], item->size.bytes); } -static inline void __mlxsw_item_memcpy_to(char *buf, char *src, - struct mlxsw_item *item) +static inline void __mlxsw_item_memcpy_to(char *buf, const char *src, + struct mlxsw_item *item, + unsigned short index) { - memcpy(&buf[item->offset], src, item->size.bytes); + unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char)); + + memcpy(&buf[offset], src, item->size.bytes); } static inline u16 @@ -373,12 +379,40 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ static inline void \ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst) \ { \ - __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\ + __mlxsw_item_memcpy_from(buf, dst, \ + &__ITEM_NAME(_type, _cname, _iname), 0); \ +} \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \ +{ \ + __mlxsw_item_memcpy_to(buf, src, \ + &__ITEM_NAME(_type, _cname, _iname), 0); \ +} + +#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes, \ + _step, _instepoffset) \ +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \ + .offset = _offset, \ + .step = _step, \ + .in_step_offset = _instepoffset, \ + .size = {.bytes = _sizebytes,}, \ + .name = #_type "_" #_cname "_" #_iname, \ +}; \ +static inline void \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, \ + unsigned short index, \ + char *dst) \ +{ \ + __mlxsw_item_memcpy_from(buf, dst, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } \ static inline void \ -mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src) \ +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \ + unsigned short index, \ + const char *src) \ { \ - __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname)); \ + __mlxsw_item_memcpy_to(buf, src, \ + &__ITEM_NAME(_type, _cname, _iname), index); \ } #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes, \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index cef866c37..de69e719d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -57,6 +57,7 @@ static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; static const struct pci_device_id mlxsw_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0}, + {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, } }; @@ -67,6 +68,8 @@ static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id) switch (id->device) { case PCI_DEVICE_ID_MELLANOX_SWITCHX2: return MLXSW_DEVICE_KIND_SWITCHX2; + case PCI_DEVICE_ID_MELLANOX_SPECTRUM: + return MLXSW_DEVICE_KIND_SPECTRUM; default: BUG(); } @@ -171,8 +174,8 @@ struct mlxsw_pci { struct msix_entry msix_entry; struct mlxsw_core *core; struct { - u16 num_pages; struct mlxsw_pci_mem_item *items; + unsigned int count; } fw_area; struct { struct mlxsw_pci_mem_item out_mbox; @@ -431,8 +434,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { - if (net_ratelimit()) - dev_err(&pdev->dev, "failed to dma map tx frag\n"); + dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); return -EIO; } mlxsw_pci_wqe_address_set(wqe, index, mapaddr); @@ -497,6 +499,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { struct mlxsw_pci_queue_elem_info *elem_info; + u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); int i; int err; @@ -504,9 +507,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->consumer_counter = 0; /* Set CQ of same number of this RDQ with base - * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs. + * above SDQ count as the lower ones are assigned to SDQs. */ - mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT); + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -699,8 +702,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, put_new_skb: memset(wqe, 0, q->elem_size); err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); - if (err && net_ratelimit()) - dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n"); + if (err) + dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); /* Everything is set up, ring doorbell to pass elem to HW */ q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); @@ -830,7 +833,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) { struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; struct mlxsw_pci *mlxsw_pci = q->pci; - unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)]; + u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); + unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; char *eqe; u8 cqn; bool cq_handle = false; @@ -866,7 +870,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data) if (!cq_handle) return; - for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) { + for_each_set_bit(cqn, active_cqns, cq_count) { q = mlxsw_pci_cq_get(mlxsw_pci, cqn); mlxsw_pci_queue_tasklet_schedule(q); } @@ -1067,10 +1071,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); - if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) || - (num_rdqs != MLXSW_PCI_RDQS_COUNT) || - (num_cqs != MLXSW_PCI_CQS_COUNT) || - (num_eqs != MLXSW_PCI_EQS_COUNT)) { + if (num_sdqs + num_rdqs > num_cqs || + num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { dev_err(&pdev->dev, "Unsupported number of queues\n"); return -EINVAL; } @@ -1215,6 +1217,14 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, mbox, profile->max_flood_tables); mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( mbox, profile->max_vid_flood_tables); + mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set( + mbox, profile->max_fid_offset_flood_tables); + mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set( + mbox, profile->fid_offset_flood_table_size); + mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set( + mbox, profile->max_fid_flood_tables); + mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set( + mbox, profile->fid_flood_table_size); } if (profile->used_flood_mode) { mlxsw_cmd_mbox_config_profile_set_flood_mode_set( @@ -1272,6 +1282,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, u16 num_pages) { struct mlxsw_pci_mem_item *mem_item; + int nent = 0; int i; int err; @@ -1279,7 +1290,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, GFP_KERNEL); if (!mlxsw_pci->fw_area.items) return -ENOMEM; - mlxsw_pci->fw_area.num_pages = num_pages; + mlxsw_pci->fw_area.count = num_pages; mlxsw_cmd_mbox_zero(mbox); for (i = 0; i < num_pages; i++) { @@ -1293,13 +1304,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, err = -ENOMEM; goto err_alloc; } - mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr); - mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */ + mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); + mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ + if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { + err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); + if (err) + goto err_cmd_map_fa; + nent = 0; + mlxsw_cmd_mbox_zero(mbox); + } } - err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages); - if (err) - goto err_cmd_map_fa; + if (nent) { + err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); + if (err) + goto err_cmd_map_fa; + } return 0; @@ -1322,7 +1342,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) mlxsw_cmd_unmap_fa(mlxsw_pci->core); - for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) { + for (i = 0; i < mlxsw_pci->fw_area.count; i++) { mem_item = &mlxsw_pci->fw_area.items[i]; pci_free_consistent(mlxsw_pci->pdev, mem_item->size, @@ -1642,8 +1662,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, CIR_OUT_PARAM_LO)); memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); } - } else if (!err && out_mbox) + } else if (!err && out_mbox) { memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); + } mutex_unlock(&mlxsw_pci->cmd.lock); diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 1ef9664b4..142f33d97 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -40,6 +40,7 @@ #include "item.h" #define PCI_DEVICE_ID_MELLANOX_SWITCHX2 0xc738 +#define PCI_DEVICE_ID_MELLANOX_SPECTRUM 0xcb84 #define MLXSW_PCI_BAR0_SIZE (1024 * 1024) /* 1MB */ #define MLXSW_PCI_PAGE_SIZE 4096 @@ -71,9 +72,7 @@ #define MLXSW_PCI_DOORBELL(offset, type_offset, num) \ ((offset) + (type_offset) + (num) * 4) -#define MLXSW_PCI_RDQS_COUNT 24 -#define MLXSW_PCI_SDQS_COUNT 24 -#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT) +#define MLXSW_PCI_CQS_MAX 96 #define MLXSW_PCI_EQS_COUNT 2 #define MLXSW_PCI_EQ_ASYNC_NUM 0 #define MLXSW_PCI_EQ_COMP_NUM 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 096e1c121..236fb5d2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = { */ MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6); -/* SMID - Switch Multicast ID - * -------------------------- - * In multi-chip configuration, each device should maintain mapping between - * Multicast ID (MID) into a list of local ports. This mapping is used in all - * the devices other than the ingress device, and is implemented as part of the - * FDB. The MID record maps from a MID, which is a unique identi- fier of the - * multicast group within the stacking domain, into a list of local ports into - * which the packet is replicated. - */ -#define MLXSW_REG_SMID_ID 0x2007 -#define MLXSW_REG_SMID_LEN 0x420 - -static const struct mlxsw_reg_info mlxsw_reg_smid = { - .id = MLXSW_REG_SMID_ID, - .len = MLXSW_REG_SMID_LEN, -}; - -/* reg_smid_swid - * Switch partition ID. - * Access: Index - */ -MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8); - -/* reg_smid_mid - * Multicast identifier - global identifier that represents the multicast group - * across all devices - * Access: Index - */ -MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16); - -/* reg_smid_port - * Local port memebership (1 bit per port). - * Access: RW - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1); - -/* reg_smid_port_mask - * Local port mask (1 bit per port). - * Access: W - */ -MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1); - -static inline void mlxsw_reg_smid_pack(char *payload, u16 mid) -{ - MLXSW_REG_ZERO(smid, payload); - mlxsw_reg_smid_swid_set(payload, 0); - mlxsw_reg_smid_mid_set(payload, mid); - mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1); - mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); -} - /* SSPR - Switch System Port Record Register * ----------------------------------------- * Configures the system port to local port mapping. @@ -208,11 +157,359 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port) mlxsw_reg_sspr_system_port_set(payload, local_port); } +/* SFDAT - Switch Filtering Database Aging Time + * -------------------------------------------- + * Controls the Switch aging time. Aging time is able to be set per Switch + * Partition. + */ +#define MLXSW_REG_SFDAT_ID 0x2009 +#define MLXSW_REG_SFDAT_LEN 0x8 + +static const struct mlxsw_reg_info mlxsw_reg_sfdat = { + .id = MLXSW_REG_SFDAT_ID, + .len = MLXSW_REG_SFDAT_LEN, +}; + +/* reg_sfdat_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfdat, swid, 0x00, 24, 8); + +/* reg_sfdat_age_time + * Aging time in seconds + * Min - 10 seconds + * Max - 1,000,000 seconds + * Default is 300 seconds. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdat, age_time, 0x04, 0, 20); + +static inline void mlxsw_reg_sfdat_pack(char *payload, u32 age_time) +{ + MLXSW_REG_ZERO(sfdat, payload); + mlxsw_reg_sfdat_swid_set(payload, 0); + mlxsw_reg_sfdat_age_time_set(payload, age_time); +} + +/* SFD - Switch Filtering Database + * ------------------------------- + * The following register defines the access to the filtering database. + * The register supports querying, adding, removing and modifying the database. + * The access is optimized for bulk updates in which case more than one + * FDB record is present in the same command. + */ +#define MLXSW_REG_SFD_ID 0x200A +#define MLXSW_REG_SFD_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_SFD_REC_LEN 0x10 /* record length */ +#define MLXSW_REG_SFD_REC_MAX_COUNT 64 +#define MLXSW_REG_SFD_LEN (MLXSW_REG_SFD_BASE_LEN + \ + MLXSW_REG_SFD_REC_LEN * MLXSW_REG_SFD_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_sfd = { + .id = MLXSW_REG_SFD_ID, + .len = MLXSW_REG_SFD_LEN, +}; + +/* reg_sfd_swid + * Switch partition ID for queries. Reserved on Write. + * Access: Index + */ +MLXSW_ITEM32(reg, sfd, swid, 0x00, 24, 8); + +enum mlxsw_reg_sfd_op { + /* Dump entire FDB a (process according to record_locator) */ + MLXSW_REG_SFD_OP_QUERY_DUMP = 0, + /* Query records by {MAC, VID/FID} value */ + MLXSW_REG_SFD_OP_QUERY_QUERY = 1, + /* Query and clear activity. Query records by {MAC, VID/FID} value */ + MLXSW_REG_SFD_OP_QUERY_QUERY_AND_CLEAR_ACTIVITY = 2, + /* Test. Response indicates if each of the records could be + * added to the FDB. + */ + MLXSW_REG_SFD_OP_WRITE_TEST = 0, + /* Add/modify. Aged-out records cannot be added. This command removes + * the learning notification of the {MAC, VID/FID}. Response includes + * the entries that were added to the FDB. + */ + MLXSW_REG_SFD_OP_WRITE_EDIT = 1, + /* Remove record by {MAC, VID/FID}. This command also removes + * the learning notification and aged-out notifications + * of the {MAC, VID/FID}. The response provides current (pre-removal) + * entries as non-aged-out. + */ + MLXSW_REG_SFD_OP_WRITE_REMOVE = 2, + /* Remove learned notification by {MAC, VID/FID}. The response provides + * the removed learning notification. + */ + MLXSW_REG_SFD_OP_WRITE_REMOVE_NOTIFICATION = 2, +}; + +/* reg_sfd_op + * Operation. + * Access: OP + */ +MLXSW_ITEM32(reg, sfd, op, 0x04, 30, 2); + +/* reg_sfd_record_locator + * Used for querying the FDB. Use record_locator=0 to initiate the + * query. When a record is returned, a new record_locator is + * returned to be used in the subsequent query. + * Reserved for database update. + * Access: Index + */ +MLXSW_ITEM32(reg, sfd, record_locator, 0x04, 0, 30); + +/* reg_sfd_num_rec + * Request: Number of records to read/add/modify/remove + * Response: Number of records read/added/replaced/removed + * See above description for more details. + * Ranges 0..64 + * Access: RW + */ +MLXSW_ITEM32(reg, sfd, num_rec, 0x08, 0, 8); + +static inline void mlxsw_reg_sfd_pack(char *payload, enum mlxsw_reg_sfd_op op, + u32 record_locator) +{ + MLXSW_REG_ZERO(sfd, payload); + mlxsw_reg_sfd_op_set(payload, op); + mlxsw_reg_sfd_record_locator_set(payload, record_locator); +} + +/* reg_sfd_rec_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_swid, MLXSW_REG_SFD_BASE_LEN, 24, 8, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +enum mlxsw_reg_sfd_rec_type { + MLXSW_REG_SFD_REC_TYPE_UNICAST = 0x0, +}; + +/* reg_sfd_rec_type + * FDB record type. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_type, MLXSW_REG_SFD_BASE_LEN, 20, 4, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +enum mlxsw_reg_sfd_rec_policy { + /* Replacement disabled, aging disabled. */ + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY = 0, + /* (mlag remote): Replacement enabled, aging disabled, + * learning notification enabled on this port. + */ + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG = 1, + /* (ingress device): Replacement enabled, aging enabled. */ + MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS = 3, +}; + +/* reg_sfd_rec_policy + * Policy. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_policy, MLXSW_REG_SFD_BASE_LEN, 18, 2, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +/* reg_sfd_rec_a + * Activity. Set for new static entries. Set for static entries if a frame SMAC + * lookup hits on the entry. + * To clear the a bit, use "query and clear activity" op. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_a, MLXSW_REG_SFD_BASE_LEN, 16, 1, + MLXSW_REG_SFD_REC_LEN, 0x00, false); + +/* reg_sfd_rec_mac + * MAC address. + * Access: Index + */ +MLXSW_ITEM_BUF_INDEXED(reg, sfd, rec_mac, MLXSW_REG_SFD_BASE_LEN, 6, + MLXSW_REG_SFD_REC_LEN, 0x02); + +enum mlxsw_reg_sfd_rec_action { + /* forward */ + MLXSW_REG_SFD_REC_ACTION_NOP = 0, + /* forward and trap, trap_id is FDB_TRAP */ + MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1, + /* trap and do not forward, trap_id is FDB_TRAP */ + MLXSW_REG_SFD_REC_ACTION_TRAP = 3, + MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15, +}; + +/* reg_sfd_rec_action + * Action to apply on the packet. + * Note: Dynamic entries can only be configured with NOP action. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, rec_action, MLXSW_REG_SFD_BASE_LEN, 28, 4, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +/* reg_sfd_uc_sub_port + * VEPA channel on local port. + * Valid only if local port is a non-stacking port. Must be 0 if multichannel + * VEPA is not enabled. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_sub_port, MLXSW_REG_SFD_BASE_LEN, 16, 8, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_fid_vid + * Filtering ID or VLAN ID + * For SwitchX and SwitchX-2: + * - Dynamic entries (policy 2,3) use FID + * - Static entries (policy 0) use VID + * - When independent learning is configured, VID=FID + * For Spectrum: use FID for both Dynamic and Static entries. + * VID should not be used. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_fid_vid, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x08, false); + +/* reg_sfd_uc_system_port + * Unique port identifier for the final destination of the packet. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, sfd, uc_system_port, MLXSW_REG_SFD_BASE_LEN, 0, 16, + MLXSW_REG_SFD_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfd_uc_pack(char *payload, int rec_index, + enum mlxsw_reg_sfd_rec_policy policy, + const char *mac, u16 vid, + enum mlxsw_reg_sfd_rec_action action, + u8 local_port) +{ + u8 num_rec = mlxsw_reg_sfd_num_rec_get(payload); + + if (rec_index >= num_rec) + mlxsw_reg_sfd_num_rec_set(payload, rec_index + 1); + mlxsw_reg_sfd_rec_swid_set(payload, rec_index, 0); + mlxsw_reg_sfd_rec_type_set(payload, rec_index, + MLXSW_REG_SFD_REC_TYPE_UNICAST); + mlxsw_reg_sfd_rec_policy_set(payload, rec_index, policy); + mlxsw_reg_sfd_rec_mac_memcpy_to(payload, rec_index, mac); + mlxsw_reg_sfd_uc_sub_port_set(payload, rec_index, 0); + mlxsw_reg_sfd_uc_fid_vid_set(payload, rec_index, vid); + mlxsw_reg_sfd_rec_action_set(payload, rec_index, action); + mlxsw_reg_sfd_uc_system_port_set(payload, rec_index, local_port); +} + +static inline void mlxsw_reg_sfd_uc_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u8 *p_local_port) +{ + mlxsw_reg_sfd_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfd_uc_fid_vid_get(payload, rec_index); + *p_local_port = mlxsw_reg_sfd_uc_system_port_get(payload, rec_index); +} + +/* SFN - Switch FDB Notification Register + * ------------------------------------------- + * The switch provides notifications on newly learned FDB entries and + * aged out entries. The notifications can be polled by software. + */ +#define MLXSW_REG_SFN_ID 0x200B +#define MLXSW_REG_SFN_BASE_LEN 0x10 /* base length, without records */ +#define MLXSW_REG_SFN_REC_LEN 0x10 /* record length */ +#define MLXSW_REG_SFN_REC_MAX_COUNT 64 +#define MLXSW_REG_SFN_LEN (MLXSW_REG_SFN_BASE_LEN + \ + MLXSW_REG_SFN_REC_LEN * MLXSW_REG_SFN_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_sfn = { + .id = MLXSW_REG_SFN_ID, + .len = MLXSW_REG_SFN_LEN, +}; + +/* reg_sfn_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfn, swid, 0x00, 24, 8); + +/* reg_sfn_num_rec + * Request: Number of learned notifications and aged-out notification + * records requested. + * Response: Number of notification records returned (must be smaller + * than or equal to the value requested) + * Ranges 0..64 + * Access: OP + */ +MLXSW_ITEM32(reg, sfn, num_rec, 0x04, 0, 8); + +static inline void mlxsw_reg_sfn_pack(char *payload) +{ + MLXSW_REG_ZERO(sfn, payload); + mlxsw_reg_sfn_swid_set(payload, 0); + mlxsw_reg_sfn_num_rec_set(payload, MLXSW_REG_SFN_REC_MAX_COUNT); +} + +/* reg_sfn_rec_swid + * Switch partition ID. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, rec_swid, MLXSW_REG_SFN_BASE_LEN, 24, 8, + MLXSW_REG_SFN_REC_LEN, 0x00, false); + +enum mlxsw_reg_sfn_rec_type { + /* MAC addresses learned on a regular port. */ + MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC = 0x5, + /* Aged-out MAC address on a regular port */ + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, +}; + +/* reg_sfn_rec_type + * Notification record type. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, rec_type, MLXSW_REG_SFN_BASE_LEN, 20, 4, + MLXSW_REG_SFN_REC_LEN, 0x00, false); + +/* reg_sfn_rec_mac + * MAC address. + * Access: RO + */ +MLXSW_ITEM_BUF_INDEXED(reg, sfn, rec_mac, MLXSW_REG_SFN_BASE_LEN, 6, + MLXSW_REG_SFN_REC_LEN, 0x02); + +/* reg_sfn_mac_sub_port + * VEPA channel on the local port. + * 0 if multichannel VEPA is not enabled. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_sub_port, MLXSW_REG_SFN_BASE_LEN, 16, 8, + MLXSW_REG_SFN_REC_LEN, 0x08, false); + +/* reg_sfn_mac_fid + * Filtering identifier. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_fid, MLXSW_REG_SFN_BASE_LEN, 0, 16, + MLXSW_REG_SFN_REC_LEN, 0x08, false); + +/* reg_sfn_mac_system_port + * Unique port identifier for the final destination of the packet. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, mac_system_port, MLXSW_REG_SFN_BASE_LEN, 0, 16, + MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +static inline void mlxsw_reg_sfn_mac_unpack(char *payload, int rec_index, + char *mac, u16 *p_vid, + u8 *p_local_port) +{ + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); + *p_vid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); + *p_local_port = mlxsw_reg_sfn_mac_system_port_get(payload, rec_index); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. */ -#define MLXSW_REG_SPMS_ID 0x200d +#define MLXSW_REG_SPMS_ID 0x200D #define MLXSW_REG_SPMS_LEN 0x404 static const struct mlxsw_reg_info mlxsw_reg_spms = { @@ -243,20 +540,166 @@ enum mlxsw_reg_spms_state { */ MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2); -static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid, - enum mlxsw_reg_spms_state state) +static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port) { MLXSW_REG_ZERO(spms, payload); mlxsw_reg_spms_local_port_set(payload, local_port); +} + +static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid, + enum mlxsw_reg_spms_state state) +{ mlxsw_reg_spms_state_set(payload, vid, state); } +/* SPVID - Switch Port VID + * ----------------------- + * The switch port VID configures the default VID for a port. + */ +#define MLXSW_REG_SPVID_ID 0x200E +#define MLXSW_REG_SPVID_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_spvid = { + .id = MLXSW_REG_SPVID_ID, + .len = MLXSW_REG_SPVID_LEN, +}; + +/* reg_spvid_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spvid, local_port, 0x00, 16, 8); + +/* reg_spvid_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, spvid, sub_port, 0x00, 8, 8); + +/* reg_spvid_pvid + * Port default VID + * Access: RW + */ +MLXSW_ITEM32(reg, spvid, pvid, 0x04, 0, 12); + +static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid) +{ + MLXSW_REG_ZERO(spvid, payload); + mlxsw_reg_spvid_local_port_set(payload, local_port); + mlxsw_reg_spvid_pvid_set(payload, pvid); +} + +/* SPVM - Switch Port VLAN Membership + * ---------------------------------- + * The Switch Port VLAN Membership register configures the VLAN membership + * of a port in a VLAN denoted by VID. VLAN membership is managed per + * virtual port. The register can be used to add and remove VID(s) from a port. + */ +#define MLXSW_REG_SPVM_ID 0x200F +#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */ +#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */ +#define MLXSW_REG_SPVM_REC_MAX_COUNT 256 +#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \ + MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_spvm = { + .id = MLXSW_REG_SPVM_ID, + .len = MLXSW_REG_SPVM_LEN, +}; + +/* reg_spvm_pt + * Priority tagged. If this bit is set, packets forwarded to the port with + * untagged VLAN membership (u bit is set) will be tagged with priority tag + * (VID=0) + * Access: RW + */ +MLXSW_ITEM32(reg, spvm, pt, 0x00, 31, 1); + +/* reg_spvm_pte + * Priority Tagged Update Enable. On Write operations, if this bit is cleared, + * the pt bit will NOT be updated. To update the pt bit, pte must be set. + * Access: WO + */ +MLXSW_ITEM32(reg, spvm, pte, 0x00, 30, 1); + +/* reg_spvm_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, spvm, local_port, 0x00, 16, 8); + +/* reg_spvm_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, spvm, sub_port, 0x00, 8, 8); + +/* reg_spvm_num_rec + * Number of records to update. Each record contains: i, e, u, vid. + * Access: OP + */ +MLXSW_ITEM32(reg, spvm, num_rec, 0x00, 0, 8); + +/* reg_spvm_rec_i + * Ingress membership in VLAN ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_i, + MLXSW_REG_SPVM_BASE_LEN, 14, 1, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +/* reg_spvm_rec_e + * Egress membership in VLAN ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_e, + MLXSW_REG_SPVM_BASE_LEN, 13, 1, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +/* reg_spvm_rec_u + * Untagged - port is an untagged member - egress transmission uses untagged + * frames on VID + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_u, + MLXSW_REG_SPVM_BASE_LEN, 12, 1, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +/* reg_spvm_rec_vid + * Egress membership in VLAN ID. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvm, rec_vid, + MLXSW_REG_SPVM_BASE_LEN, 0, 12, + MLXSW_REG_SPVM_REC_LEN, 0, false); + +static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port, + u16 vid_begin, u16 vid_end, + bool is_member, bool untagged) +{ + int size = vid_end - vid_begin + 1; + int i; + + MLXSW_REG_ZERO(spvm, payload); + mlxsw_reg_spvm_local_port_set(payload, local_port); + mlxsw_reg_spvm_num_rec_set(payload, size); + + for (i = 0; i < size; i++) { + mlxsw_reg_spvm_rec_i_set(payload, i, is_member); + mlxsw_reg_spvm_rec_e_set(payload, i, is_member); + mlxsw_reg_spvm_rec_u_set(payload, i, untagged); + mlxsw_reg_spvm_rec_vid_set(payload, i, vid_begin + i); + } +} + /* SFGC - Switch Flooding Group Configuration * ------------------------------------------ * The following register controls the association of flooding tables and MIDs * to packet types used for flooding. */ -#define MLXSW_REG_SFGC_ID 0x2011 +#define MLXSW_REG_SFGC_ID 0x2011 #define MLXSW_REG_SFGC_LEN 0x10 static const struct mlxsw_reg_info mlxsw_reg_sfgc = { @@ -265,13 +708,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = { }; enum mlxsw_reg_sfgc_type { - MLXSW_REG_SFGC_TYPE_BROADCAST = 0, - MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1, - MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2, - MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3, - MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5, - MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6, - MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7, + MLXSW_REG_SFGC_TYPE_BROADCAST, + MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6, + MLXSW_REG_SFGC_TYPE_RESERVED, + MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP, + MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL, + MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST, + MLXSW_REG_SFGC_TYPE_MAX, }; /* reg_sfgc_type @@ -408,7 +853,7 @@ static inline void mlxsw_reg_sftr_pack(char *payload, unsigned int flood_table, unsigned int index, enum mlxsw_flood_table_type table_type, - unsigned int range) + unsigned int range, u8 port, bool set) { MLXSW_REG_ZERO(sftr, payload); mlxsw_reg_sftr_swid_set(payload, 0); @@ -416,8 +861,8 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_index_set(payload, index); mlxsw_reg_sftr_table_type_set(payload, table_type); mlxsw_reg_sftr_range_set(payload, range); - mlxsw_reg_sftr_port_set(payload, MLXSW_PORT_CPU_PORT, 1); - mlxsw_reg_sftr_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1); + mlxsw_reg_sftr_port_set(payload, port, set); + mlxsw_reg_sftr_port_mask_set(payload, port, 1); } /* SPMLR - Switch Port MAC Learning Register @@ -473,6 +918,285 @@ static inline void mlxsw_reg_spmlr_pack(char *payload, u8 local_port, mlxsw_reg_spmlr_learn_mode_set(payload, mode); } +/* SVFA - Switch VID to FID Allocation Register + * -------------------------------------------- + * Controls the VID to FID mapping and {Port, VID} to FID mapping for + * virtualized ports. + */ +#define MLXSW_REG_SVFA_ID 0x201C +#define MLXSW_REG_SVFA_LEN 0x10 + +static const struct mlxsw_reg_info mlxsw_reg_svfa = { + .id = MLXSW_REG_SVFA_ID, + .len = MLXSW_REG_SVFA_LEN, +}; + +/* reg_svfa_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, svfa, swid, 0x00, 24, 8); + +/* reg_svfa_local_port + * Local port number. + * Access: Index + * + * Note: Reserved for 802.1Q FIDs. + */ +MLXSW_ITEM32(reg, svfa, local_port, 0x00, 16, 8); + +enum mlxsw_reg_svfa_mt { + MLXSW_REG_SVFA_MT_VID_TO_FID, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, +}; + +/* reg_svfa_mapping_table + * Mapping table: + * 0 - VID to FID + * 1 - {Port, VID} to FID + * Access: Index + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, mapping_table, 0x00, 8, 3); + +/* reg_svfa_v + * Valid. + * Valid if set. + * Access: RW + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, v, 0x00, 0, 1); + +/* reg_svfa_fid + * Filtering ID. + * Access: RW + */ +MLXSW_ITEM32(reg, svfa, fid, 0x04, 16, 16); + +/* reg_svfa_vid + * VLAN ID. + * Access: Index + */ +MLXSW_ITEM32(reg, svfa, vid, 0x04, 0, 12); + +/* reg_svfa_counter_set_type + * Counter set type for flow counters. + * Access: RW + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, counter_set_type, 0x08, 24, 8); + +/* reg_svfa_counter_index + * Counter index for flow counters. + * Access: RW + * + * Note: Reserved for SwitchX-2. + */ +MLXSW_ITEM32(reg, svfa, counter_index, 0x08, 0, 24); + +static inline void mlxsw_reg_svfa_pack(char *payload, u8 local_port, + enum mlxsw_reg_svfa_mt mt, bool valid, + u16 fid, u16 vid) +{ + MLXSW_REG_ZERO(svfa, payload); + local_port = mt == MLXSW_REG_SVFA_MT_VID_TO_FID ? 0 : local_port; + mlxsw_reg_svfa_swid_set(payload, 0); + mlxsw_reg_svfa_local_port_set(payload, local_port); + mlxsw_reg_svfa_mapping_table_set(payload, mt); + mlxsw_reg_svfa_v_set(payload, valid); + mlxsw_reg_svfa_fid_set(payload, fid); + mlxsw_reg_svfa_vid_set(payload, vid); +} + +/* SVPE - Switch Virtual-Port Enabling Register + * -------------------------------------------- + * Enables port virtualization. + */ +#define MLXSW_REG_SVPE_ID 0x201E +#define MLXSW_REG_SVPE_LEN 0x4 + +static const struct mlxsw_reg_info mlxsw_reg_svpe = { + .id = MLXSW_REG_SVPE_ID, + .len = MLXSW_REG_SVPE_LEN, +}; + +/* reg_svpe_local_port + * Local port number + * Access: Index + * + * Note: CPU port is not supported (uses VLAN mode only). + */ +MLXSW_ITEM32(reg, svpe, local_port, 0x00, 16, 8); + +/* reg_svpe_vp_en + * Virtual port enable. + * 0 - Disable, VLAN mode (VID to FID). + * 1 - Enable, Virtual port mode ({Port, VID} to FID). + * Access: RW + */ +MLXSW_ITEM32(reg, svpe, vp_en, 0x00, 8, 1); + +static inline void mlxsw_reg_svpe_pack(char *payload, u8 local_port, + bool enable) +{ + MLXSW_REG_ZERO(svpe, payload); + mlxsw_reg_svpe_local_port_set(payload, local_port); + mlxsw_reg_svpe_vp_en_set(payload, enable); +} + +/* SFMR - Switch FID Management Register + * ------------------------------------- + * Creates and configures FIDs. + */ +#define MLXSW_REG_SFMR_ID 0x201F +#define MLXSW_REG_SFMR_LEN 0x18 + +static const struct mlxsw_reg_info mlxsw_reg_sfmr = { + .id = MLXSW_REG_SFMR_ID, + .len = MLXSW_REG_SFMR_LEN, +}; + +enum mlxsw_reg_sfmr_op { + MLXSW_REG_SFMR_OP_CREATE_FID, + MLXSW_REG_SFMR_OP_DESTROY_FID, +}; + +/* reg_sfmr_op + * Operation. + * 0 - Create or edit FID. + * 1 - Destroy FID. + * Access: WO + */ +MLXSW_ITEM32(reg, sfmr, op, 0x00, 24, 4); + +/* reg_sfmr_fid + * Filtering ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfmr, fid, 0x00, 0, 16); + +/* reg_sfmr_fid_offset + * FID offset. + * Used to point into the flooding table selected by SFGC register if + * the table is of type FID-Offset. Otherwise, this field is reserved. + * Access: RW + */ +MLXSW_ITEM32(reg, sfmr, fid_offset, 0x08, 0, 16); + +/* reg_sfmr_vtfp + * Valid Tunnel Flood Pointer. + * If not set, then nve_tunnel_flood_ptr is reserved and considered NULL. + * Access: RW + * + * Note: Reserved for 802.1Q FIDs. + */ +MLXSW_ITEM32(reg, sfmr, vtfp, 0x0C, 31, 1); + +/* reg_sfmr_nve_tunnel_flood_ptr + * Underlay Flooding and BC Pointer. + * Used as a pointer to the first entry of the group based link lists of + * flooding or BC entries (for NVE tunnels). + * Access: RW + */ +MLXSW_ITEM32(reg, sfmr, nve_tunnel_flood_ptr, 0x0C, 0, 24); + +/* reg_sfmr_vv + * VNI Valid. + * If not set, then vni is reserved. + * Access: RW + * + * Note: Reserved for 802.1Q FIDs. + */ +MLXSW_ITEM32(reg, sfmr, vv, 0x10, 31, 1); + +/* reg_sfmr_vni + * Virtual Network Identifier. + * Access: RW + * + * Note: A given VNI can only be assigned to one FID. + */ +MLXSW_ITEM32(reg, sfmr, vni, 0x10, 0, 24); + +static inline void mlxsw_reg_sfmr_pack(char *payload, + enum mlxsw_reg_sfmr_op op, u16 fid, + u16 fid_offset) +{ + MLXSW_REG_ZERO(sfmr, payload); + mlxsw_reg_sfmr_op_set(payload, op); + mlxsw_reg_sfmr_fid_set(payload, fid); + mlxsw_reg_sfmr_fid_offset_set(payload, fid_offset); + mlxsw_reg_sfmr_vtfp_set(payload, false); + mlxsw_reg_sfmr_vv_set(payload, false); +} + +/* SPVMLR - Switch Port VLAN MAC Learning Register + * ----------------------------------------------- + * Controls the switch MAC learning policy per {Port, VID}. + */ +#define MLXSW_REG_SPVMLR_ID 0x2020 +#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */ +#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */ +#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256 +#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \ + MLXSW_REG_SPVMLR_REC_LEN * \ + MLXSW_REG_SPVMLR_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_spvmlr = { + .id = MLXSW_REG_SPVMLR_ID, + .len = MLXSW_REG_SPVMLR_LEN, +}; + +/* reg_spvmlr_local_port + * Local ingress port. + * Access: Index + * + * Note: CPU port is not supported. + */ +MLXSW_ITEM32(reg, spvmlr, local_port, 0x00, 16, 8); + +/* reg_spvmlr_num_rec + * Number of records to update. + * Access: OP + */ +MLXSW_ITEM32(reg, spvmlr, num_rec, 0x00, 0, 8); + +/* reg_spvmlr_rec_learn_enable + * 0 - Disable learning for {Port, VID}. + * 1 - Enable learning for {Port, VID}. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_learn_enable, MLXSW_REG_SPVMLR_BASE_LEN, + 31, 1, MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); + +/* reg_spvmlr_rec_vid + * VLAN ID to be added/removed from port or for querying. + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, spvmlr, rec_vid, MLXSW_REG_SPVMLR_BASE_LEN, 0, 12, + MLXSW_REG_SPVMLR_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, + u16 vid_begin, u16 vid_end, + bool learn_enable) +{ + int num_rec = vid_end - vid_begin + 1; + int i; + + WARN_ON(num_rec < 1 || num_rec > MLXSW_REG_SPVMLR_REC_MAX_COUNT); + + MLXSW_REG_ZERO(spvmlr, payload); + mlxsw_reg_spvmlr_local_port_set(payload, local_port); + mlxsw_reg_spvmlr_num_rec_set(payload, num_rec); + + for (i = 0; i < num_rec; i++) { + mlxsw_reg_spvmlr_rec_learn_enable_set(payload, i, learn_enable); + mlxsw_reg_spvmlr_rec_vid_set(payload, i, vid_begin + i); + } +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -1008,12 +1732,88 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) mlxsw_reg_ppcnt_prio_tc_set(payload, 0); } +/* PBMC - Port Buffer Management Control Register + * ---------------------------------------------- + * The PBMC register configures and retrieves the port packet buffer + * allocation for different Prios, and the Pause threshold management. + */ +#define MLXSW_REG_PBMC_ID 0x500C +#define MLXSW_REG_PBMC_LEN 0x68 + +static const struct mlxsw_reg_info mlxsw_reg_pbmc = { + .id = MLXSW_REG_PBMC_ID, + .len = MLXSW_REG_PBMC_LEN, +}; + +/* reg_pbmc_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pbmc, local_port, 0x00, 16, 8); + +/* reg_pbmc_xoff_timer_value + * When device generates a pause frame, it uses this value as the pause + * timer (time for the peer port to pause in quota-512 bit time). + * Access: RW + */ +MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16); + +/* reg_pbmc_xoff_refresh + * The time before a new pause frame should be sent to refresh the pause RW + * state. Using the same units as xoff_timer_value above (in quota-512 bit + * time). + * Access: RW + */ +MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16); + +/* reg_pbmc_buf_lossy + * The field indicates if the buffer is lossy. + * 0 - Lossless + * 1 - Lossy + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_lossy, 0x0C, 25, 1, 0x08, 0x00, false); + +/* reg_pbmc_buf_epsb + * Eligible for Port Shared buffer. + * If epsb is set, packets assigned to buffer are allowed to insert the port + * shared buffer. + * When buf_lossy is MLXSW_REG_PBMC_LOSSY_LOSSY this field is reserved. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false); + +/* reg_pbmc_buf_size + * The part of the packet buffer array is allocated for the specific buffer. + * Units are represented in cells. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false); + +static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, + u16 xoff_timer_value, u16 xoff_refresh) +{ + MLXSW_REG_ZERO(pbmc, payload); + mlxsw_reg_pbmc_local_port_set(payload, local_port); + mlxsw_reg_pbmc_xoff_timer_value_set(payload, xoff_timer_value); + mlxsw_reg_pbmc_xoff_refresh_set(payload, xoff_refresh); +} + +static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload, + int buf_index, + u16 size) +{ + mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 1); + mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0); + mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); +} + /* PSPA - Port Switch Partition Allocation * --------------------------------------- * Controls the association of a port with a switch partition and enables * configuring ports as stacking ports. */ -#define MLXSW_REG_PSPA_ID 0x500d +#define MLXSW_REG_PSPA_ID 0x500D #define MLXSW_REG_PSPA_LEN 0x8 static const struct mlxsw_reg_info mlxsw_reg_pspa = { @@ -1074,8 +1874,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8); */ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4); -#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0 -#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1 +enum mlxsw_reg_htgt_trap_group { + MLXSW_REG_HTGT_TRAP_GROUP_EMAD, + MLXSW_REG_HTGT_TRAP_GROUP_RX, + MLXSW_REG_HTGT_TRAP_GROUP_CTRL, +}; /* reg_htgt_trap_group * Trap group number. User defined number specifying which trap groups @@ -1142,6 +1945,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15 #define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14 +#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13 /* reg_htgt_local_path_rdq * Receive descriptor queue (RDQ) to use for the trap group. @@ -1149,21 +1953,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6); */ MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6); -static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group) +static inline void mlxsw_reg_htgt_pack(char *payload, + enum mlxsw_reg_htgt_trap_group group) { u8 swid, rdq; MLXSW_REG_ZERO(htgt, payload); - if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) { + switch (group) { + case MLXSW_REG_HTGT_TRAP_GROUP_EMAD: swid = MLXSW_PORT_SWID_ALL_SWIDS; rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD; - } else { + break; + case MLXSW_REG_HTGT_TRAP_GROUP_RX: swid = 0; rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX; + break; + case MLXSW_REG_HTGT_TRAP_GROUP_CTRL: + swid = 0; + rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL; + break; } mlxsw_reg_htgt_swid_set(payload, swid); mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL); - mlxsw_reg_htgt_trap_group_set(payload, trap_group); + mlxsw_reg_htgt_trap_group_set(payload, group); mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE); mlxsw_reg_htgt_pid_set(payload, 0); mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU); @@ -1254,17 +2066,290 @@ enum { */ MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2); -static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, - u8 trap_group, u16 trap_id) +static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id) { + enum mlxsw_reg_htgt_trap_group trap_group; + MLXSW_REG_ZERO(hpkt, payload); mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED); mlxsw_reg_hpkt_action_set(payload, action); + switch (trap_id) { + case MLXSW_TRAP_ID_ETHEMAD: + case MLXSW_TRAP_ID_PUDE: + trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD; + break; + default: + trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX; + break; + } mlxsw_reg_hpkt_trap_group_set(payload, trap_group); mlxsw_reg_hpkt_trap_id_set(payload, trap_id); mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT); } +/* SBPR - Shared Buffer Pools Register + * ----------------------------------- + * The SBPR configures and retrieves the shared buffer pools and configuration. + */ +#define MLXSW_REG_SBPR_ID 0xB001 +#define MLXSW_REG_SBPR_LEN 0x14 + +static const struct mlxsw_reg_info mlxsw_reg_sbpr = { + .id = MLXSW_REG_SBPR_ID, + .len = MLXSW_REG_SBPR_LEN, +}; + +enum mlxsw_reg_sbpr_dir { + MLXSW_REG_SBPR_DIR_INGRESS, + MLXSW_REG_SBPR_DIR_EGRESS, +}; + +/* reg_sbpr_dir + * Direction. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpr, dir, 0x00, 24, 2); + +/* reg_sbpr_pool + * Pool index. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpr, pool, 0x00, 0, 4); + +/* reg_sbpr_size + * Pool size in buffer cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpr, size, 0x04, 0, 24); + +enum mlxsw_reg_sbpr_mode { + MLXSW_REG_SBPR_MODE_STATIC, + MLXSW_REG_SBPR_MODE_DYNAMIC, +}; + +/* reg_sbpr_mode + * Pool quota calculation mode. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); + +static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, + enum mlxsw_reg_sbpr_dir dir, + enum mlxsw_reg_sbpr_mode mode, u32 size) +{ + MLXSW_REG_ZERO(sbpr, payload); + mlxsw_reg_sbpr_pool_set(payload, pool); + mlxsw_reg_sbpr_dir_set(payload, dir); + mlxsw_reg_sbpr_mode_set(payload, mode); + mlxsw_reg_sbpr_size_set(payload, size); +} + +/* SBCM - Shared Buffer Class Management Register + * ---------------------------------------------- + * The SBCM register configures and retrieves the shared buffer allocation + * and configuration according to Port-PG, including the binding to pool + * and definition of the associated quota. + */ +#define MLXSW_REG_SBCM_ID 0xB002 +#define MLXSW_REG_SBCM_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_sbcm = { + .id = MLXSW_REG_SBCM_ID, + .len = MLXSW_REG_SBCM_LEN, +}; + +/* reg_sbcm_local_port + * Local port number. + * For Ingress: excludes CPU port and Router port + * For Egress: excludes IP Router + * Access: Index + */ +MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); + +/* reg_sbcm_pg_buff + * PG buffer - Port PG (dir=ingress) / traffic class (dir=egress) + * For PG buffer: range is 0..cap_max_pg_buffers - 1 + * For traffic class: range is 0..cap_max_tclass - 1 + * Note that when traffic class is in MC aware mode then the traffic + * classes which are MC aware cannot be configured. + * Access: Index + */ +MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6); + +enum mlxsw_reg_sbcm_dir { + MLXSW_REG_SBCM_DIR_INGRESS, + MLXSW_REG_SBCM_DIR_EGRESS, +}; + +/* reg_sbcm_dir + * Direction. + * Access: Index + */ +MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2); + +/* reg_sbcm_min_buff + * Minimum buffer size for the limiter, in cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); + +/* reg_sbcm_max_buff + * When the pool associated to the port-pg/tclass is configured to + * static, Maximum buffer size for the limiter configured in cells. + * When the pool associated to the port-pg/tclass is configured to + * dynamic, the max_buff holds the "alpha" parameter, supporting + * the following values: + * 0: 0 + * i: (1/128)*2^(i-1), for i=1..14 + * 0xFF: Infinity + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); + +/* reg_sbcm_pool + * Association of the port-priority to a pool. + * Access: RW + */ +MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); + +static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, + enum mlxsw_reg_sbcm_dir dir, + u32 min_buff, u32 max_buff, u8 pool) +{ + MLXSW_REG_ZERO(sbcm, payload); + mlxsw_reg_sbcm_local_port_set(payload, local_port); + mlxsw_reg_sbcm_pg_buff_set(payload, pg_buff); + mlxsw_reg_sbcm_dir_set(payload, dir); + mlxsw_reg_sbcm_min_buff_set(payload, min_buff); + mlxsw_reg_sbcm_max_buff_set(payload, max_buff); + mlxsw_reg_sbcm_pool_set(payload, pool); +} + +/* SBPM - Shared Buffer Class Management Register + * ---------------------------------------------- + * The SBPM register configures and retrieves the shared buffer allocation + * and configuration according to Port-Pool, including the definition + * of the associated quota. + */ +#define MLXSW_REG_SBPM_ID 0xB003 +#define MLXSW_REG_SBPM_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_sbpm = { + .id = MLXSW_REG_SBPM_ID, + .len = MLXSW_REG_SBPM_LEN, +}; + +/* reg_sbpm_local_port + * Local port number. + * For Ingress: excludes CPU port and Router port + * For Egress: excludes IP Router + * Access: Index + */ +MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); + +/* reg_sbpm_pool + * The pool associated to quota counting on the local_port. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); + +enum mlxsw_reg_sbpm_dir { + MLXSW_REG_SBPM_DIR_INGRESS, + MLXSW_REG_SBPM_DIR_EGRESS, +}; + +/* reg_sbpm_dir + * Direction. + * Access: Index + */ +MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); + +/* reg_sbpm_min_buff + * Minimum buffer size for the limiter, in cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); + +/* reg_sbpm_max_buff + * When the pool associated to the port-pg/tclass is configured to + * static, Maximum buffer size for the limiter configured in cells. + * When the pool associated to the port-pg/tclass is configured to + * dynamic, the max_buff holds the "alpha" parameter, supporting + * the following values: + * 0: 0 + * i: (1/128)*2^(i-1), for i=1..14 + * 0xFF: Infinity + * Access: RW + */ +MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); + +static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, + enum mlxsw_reg_sbpm_dir dir, + u32 min_buff, u32 max_buff) +{ + MLXSW_REG_ZERO(sbpm, payload); + mlxsw_reg_sbpm_local_port_set(payload, local_port); + mlxsw_reg_sbpm_pool_set(payload, pool); + mlxsw_reg_sbpm_dir_set(payload, dir); + mlxsw_reg_sbpm_min_buff_set(payload, min_buff); + mlxsw_reg_sbpm_max_buff_set(payload, max_buff); +} + +/* SBMM - Shared Buffer Multicast Management Register + * -------------------------------------------------- + * The SBMM register configures and retrieves the shared buffer allocation + * and configuration for MC packets according to Switch-Priority, including + * the binding to pool and definition of the associated quota. + */ +#define MLXSW_REG_SBMM_ID 0xB004 +#define MLXSW_REG_SBMM_LEN 0x28 + +static const struct mlxsw_reg_info mlxsw_reg_sbmm = { + .id = MLXSW_REG_SBMM_ID, + .len = MLXSW_REG_SBMM_LEN, +}; + +/* reg_sbmm_prio + * Switch Priority. + * Access: Index + */ +MLXSW_ITEM32(reg, sbmm, prio, 0x00, 8, 4); + +/* reg_sbmm_min_buff + * Minimum buffer size for the limiter, in cells. + * Access: RW + */ +MLXSW_ITEM32(reg, sbmm, min_buff, 0x18, 0, 24); + +/* reg_sbmm_max_buff + * When the pool associated to the port-pg/tclass is configured to + * static, Maximum buffer size for the limiter configured in cells. + * When the pool associated to the port-pg/tclass is configured to + * dynamic, the max_buff holds the "alpha" parameter, supporting + * the following values: + * 0: 0 + * i: (1/128)*2^(i-1), for i=1..14 + * 0xFF: Infinity + * Access: RW + */ +MLXSW_ITEM32(reg, sbmm, max_buff, 0x1C, 0, 24); + +/* reg_sbmm_pool + * Association of the port-priority to a pool. + * Access: RW + */ +MLXSW_ITEM32(reg, sbmm, pool, 0x24, 0, 4); + +static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, + u32 max_buff, u8 pool) +{ + MLXSW_REG_ZERO(sbmm, payload); + mlxsw_reg_sbmm_prio_set(payload, prio); + mlxsw_reg_sbmm_min_buff_set(payload, min_buff); + mlxsw_reg_sbmm_max_buff_set(payload, max_buff); + mlxsw_reg_sbmm_pool_set(payload, pool); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -1272,18 +2357,34 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SGCR"; case MLXSW_REG_SPAD_ID: return "SPAD"; - case MLXSW_REG_SMID_ID: - return "SMID"; case MLXSW_REG_SSPR_ID: return "SSPR"; + case MLXSW_REG_SFDAT_ID: + return "SFDAT"; + case MLXSW_REG_SFD_ID: + return "SFD"; + case MLXSW_REG_SFN_ID: + return "SFN"; case MLXSW_REG_SPMS_ID: return "SPMS"; + case MLXSW_REG_SPVID_ID: + return "SPVID"; + case MLXSW_REG_SPVM_ID: + return "SPVM"; case MLXSW_REG_SFGC_ID: return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; case MLXSW_REG_SPMLR_ID: return "SPMLR"; + case MLXSW_REG_SVFA_ID: + return "SVFA"; + case MLXSW_REG_SVPE_ID: + return "SVPE"; + case MLXSW_REG_SFMR_ID: + return "SFMR"; + case MLXSW_REG_SPVMLR_ID: + return "SPVMLR"; case MLXSW_REG_PMLP_ID: return "PMLP"; case MLXSW_REG_PMTU_ID: @@ -1296,12 +2397,22 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "PAOS"; case MLXSW_REG_PPCNT_ID: return "PPCNT"; + case MLXSW_REG_PBMC_ID: + return "PBMC"; case MLXSW_REG_PSPA_ID: return "PSPA"; case MLXSW_REG_HTGT_ID: return "HTGT"; case MLXSW_REG_HPKT_ID: return "HPKT"; + case MLXSW_REG_SBPR_ID: + return "SBPR"; + case MLXSW_REG_SBCM_ID: + return "SBCM"; + case MLXSW_REG_SBPM_ID: + return "SBPM"; + case MLXSW_REG_SBMM_ID: + return "SBMM"; default: return "*UNKNOWN*"; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c new file mode 100644 index 000000000..3be4a2355 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -0,0 +1,1949 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spectrum.h" +#include "core.h" +#include "reg.h" +#include "port.h" +#include "trap.h" +#include "txheader.h" + +static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum"; +static const char mlxsw_sp_driver_version[] = "1.0"; + +/* tx_hdr_version + * Tx header version. + * Must be set to 1. + */ +MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4); + +/* tx_hdr_ctl + * Packet control type. + * 0 - Ethernet control (e.g. EMADs, LACP) + * 1 - Ethernet data + */ +MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2); + +/* tx_hdr_proto + * Packet protocol type. Must be set to 1 (Ethernet). + */ +MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3); + +/* tx_hdr_rx_is_router + * Packet is sent from the router. Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1); + +/* tx_hdr_fid_valid + * Indicates if the 'fid' field is valid and should be used for + * forwarding lookup. Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1); + +/* tx_hdr_swid + * Switch partition ID. Must be set to 0. + */ +MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3); + +/* tx_hdr_control_tclass + * Indicates if the packet should use the control TClass and not one + * of the data TClasses. + */ +MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1); + +/* tx_hdr_etclass + * Egress TClass to be used on the egress device on the egress port. + */ +MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4); + +/* tx_hdr_port_mid + * Destination local port for unicast packets. + * Destination multicast ID for multicast packets. + * + * Control packets are directed to a specific egress port, while data + * packets are transmitted through the CPU port (0) into the switch partition, + * where forwarding rules are applied. + */ +MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16); + +/* tx_hdr_fid + * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is + * set, otherwise calculated based on the packet's VID using VID to FID mapping. + * Valid for data packets only. + */ +MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16); + +/* tx_hdr_type + * 0 - Data packets + * 6 - Control packets + */ +MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4); + +static void mlxsw_sp_txhdr_construct(struct sk_buff *skb, + const struct mlxsw_tx_info *tx_info) +{ + char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN); + + memset(txhdr, 0, MLXSW_TXHDR_LEN); + + mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1); + mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL); + mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH); + mlxsw_tx_hdr_swid_set(txhdr, 0); + mlxsw_tx_hdr_control_tclass_set(txhdr, 1); + mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port); + mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL); +} + +static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp) +{ + char spad_pl[MLXSW_REG_SPAD_LEN]; + int err; + + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl); + if (err) + return err; + mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac); + return 0; +} + +static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool is_up) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char paos_pl[MLXSW_REG_PAOS_LEN]; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, + is_up ? MLXSW_PORT_ADMIN_STATUS_UP : + MLXSW_PORT_ADMIN_STATUS_DOWN); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); +} + +static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, + bool *p_is_up) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char paos_pl[MLXSW_REG_PAOS_LEN]; + u8 oper_status; + int err; + + mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl); + if (err) + return err; + oper_status = mlxsw_reg_paos_oper_status_get(paos_pl); + *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false; + return 0; +} + +static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + int err; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, + MLXSW_SP_VFID_BASE + vfid, 0); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); + + if (err) + return err; + + set_bit(vfid, mlxsw_sp->active_vfids); + return 0; +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + clear_bit(vfid, mlxsw_sp->active_vfids); + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, + MLXSW_SP_VFID_BASE + vfid, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, + unsigned char *addr) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ppad_pl[MLXSW_REG_PPAD_LEN]; + + mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port); + mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl); +} + +static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + unsigned char *addr = mlxsw_sp_port->dev->dev_addr; + + ether_addr_copy(addr, mlxsw_sp->base_mac); + addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port; + return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr); +} + +static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, enum mlxsw_reg_spms_state state) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spms_pl; + int err; + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); + mlxsw_reg_spms_vid_pack(spms_pl, vid, state); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pmtu_pl[MLXSW_REG_PMTU_LEN]; + int max_mtu; + int err; + + mtu += MLXSW_TXHDR_LEN + ETH_HLEN; + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); + if (err) + return err; + max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); + + if (mtu > max_mtu) + return -EINVAL; + + mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); +} + +static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pspa_pl[MLXSW_REG_PSPA_LEN]; + + mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); +} + +static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char svpe_pl[MLXSW_REG_SVPE_LEN]; + + mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl); +} + +int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, + u16 vid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char svfa_pl[MLXSW_REG_SVFA_LEN]; + + mlxsw_reg_svfa_pack(svfa_pl, mlxsw_sp_port->local_port, mt, valid, + fid, vid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl); +} + +static int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid, bool learn_enable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spvmlr_pl; + int err; + + spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL); + if (!spvmlr_pl) + return -ENOMEM; + mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid, + learn_enable); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl); + kfree(spvmlr_pl); + return err; +} + +static int +mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sspr_pl[MLXSW_REG_SSPR_LEN]; + + mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); +} + +static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, + bool *p_usable) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int err; + + mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); + if (err) + return err; + *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + return 0; +} + +static int mlxsw_sp_port_open(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) + return err; + netif_start_queue(dev); + return 0; +} + +static int mlxsw_sp_port_stop(struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + netif_stop_queue(dev); + return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); +} + +static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; + const struct mlxsw_tx_info tx_info = { + .local_port = mlxsw_sp_port->local_port, + .is_emad = false, + }; + u64 len; + int err; + + if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) + return NETDEV_TX_BUSY; + + if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { + struct sk_buff *skb_orig = skb; + + skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN); + if (!skb) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb_orig); + return NETDEV_TX_OK; + } + } + + if (eth_skb_pad(skb)) { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + return NETDEV_TX_OK; + } + + mlxsw_sp_txhdr_construct(skb, &tx_info); + len = skb->len; + /* Due to a race we might fail here because of a full queue. In that + * unlikely case we simply drop the packet. + */ + err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); + + if (!err) { + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped); + dev_kfree_skb_any(skb); + } + return NETDEV_TX_OK; +} + +static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); + if (err) + return err; + dev->mtu = mtu; + return 0; +} + +static struct rtnl_link_stats64 * +mlxsw_sp_port_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_port_pcpu_stats *p; + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + u32 tx_dropped = 0; + unsigned int start; + int i; + + for_each_possible_cpu(i) { + p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); + do { + start = u64_stats_fetch_begin_irq(&p->syncp); + rx_packets = p->rx_packets; + rx_bytes = p->rx_bytes; + tx_packets = p->tx_packets; + tx_bytes = p->tx_bytes; + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + /* tx_dropped is u32, updated without syncp protection. */ + tx_dropped += p->tx_dropped; + } + stats->tx_dropped = tx_dropped; + return stats; +} + +int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, + u16 vid_end, bool is_member, bool untagged) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *spvm_pl; + int err; + + spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL); + if (!spvm_pl) + return -ENOMEM; + + mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin, + vid_end, is_member, untagged); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl); + kfree(spvm_pl); + return err; +} + +static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + u16 vid, last_visited_vid; + int err; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, vid, + vid); + if (err) { + last_visited_vid = vid; + goto err_port_vid_to_fid_set; + } + } + + err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); + if (err) { + last_visited_vid = VLAN_N_VID; + goto err_port_vid_to_fid_set; + } + + return 0; + +err_port_vid_to_fid_set: + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, vid, + vid); + return err; +} + +static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) +{ + enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + u16 vid; + int err; + + err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); + if (err) + return err; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, + vid, vid); + if (err) + return err; + } + + return 0; +} + +int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, + u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *sftr_pl; + int err; + + /* VLAN 0 is added to HW filter when device goes up, but it is + * reserved in our case, so simply return. + */ + if (!vid) + return 0; + + if (test_bit(vid, mlxsw_sp_port->active_vfids)) { + netdev_warn(dev, "VID=%d already configured\n", vid); + return 0; + } + + if (!test_bit(vid, mlxsw_sp->active_vfids)) { + err = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (err) { + netdev_err(dev, "Failed to create vFID=%d\n", + MLXSW_SP_VFID_BASE + vid); + return err; + } + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) { + err = -ENOMEM; + goto err_flood_table_alloc; + } + mlxsw_reg_sftr_pack(sftr_pl, 0, vid, + MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, + MLXSW_PORT_CPU_PORT, true); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + kfree(sftr_pl); + if (err) { + netdev_err(dev, "Failed to configure flood table\n"); + goto err_flood_table_config; + } + } + + /* In case we fail in the following steps, we intentionally do not + * destroy the associated vFID. + */ + + /* When adding the first VLAN interface on a bridged port we need to + * transition all the active 802.1Q bridge VLANs to use explicit + * {Port, VID} to FID mappings and set the port's mode to Virtual mode. + */ + if (!mlxsw_sp_port->nr_vfids) { + err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); + if (err) { + netdev_err(dev, "Failed to set to Virtual mode\n"); + return err; + } + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, MLXSW_SP_VFID_BASE + vid, vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", + vid, MLXSW_SP_VFID_BASE + vid); + goto err_port_vid_to_fid_set; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + if (err) { + netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); + goto err_port_vid_learning_set; + } + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); + if (err) { + netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", + vid); + goto err_port_add_vid; + } + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + MLXSW_REG_SPMS_STATE_FORWARDING); + if (err) { + netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); + goto err_port_stp_state_set; + } + + mlxsw_sp_port->nr_vfids++; + set_bit(vid, mlxsw_sp_port->active_vfids); + + return 0; + +err_flood_table_config: +err_flood_table_alloc: + mlxsw_sp_vfid_destroy(mlxsw_sp, vid); + return err; + +err_port_stp_state_set: + mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); +err_port_add_vid: + mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); +err_port_vid_learning_set: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, + MLXSW_SP_VFID_BASE + vid, vid); +err_port_vid_to_fid_set: + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + return err; +} + +int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + /* VLAN 0 is removed from HW filter when device goes down, but + * it is reserved in our case, so simply return. + */ + if (!vid) + return 0; + + if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { + netdev_warn(dev, "VID=%d does not exist\n", vid); + return 0; + } + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + MLXSW_REG_SPMS_STATE_DISCARDING); + if (err) { + netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); + return err; + } + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + if (err) { + netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", + vid); + return err; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + if (err) { + netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); + return err; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, MLXSW_SP_VFID_BASE + vid, + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", + vid, MLXSW_SP_VFID_BASE + vid); + return err; + } + + /* When removing the last VLAN interface on a bridged port we need to + * transition all active 802.1Q bridge VLANs to use VID to FID + * mappings and set port's mode to VLAN mode. + */ + if (mlxsw_sp_port->nr_vfids == 1) { + err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + if (err) { + netdev_err(dev, "Failed to set to VLAN mode\n"); + return err; + } + } + + mlxsw_sp_port->nr_vfids--; + clear_bit(vid, mlxsw_sp_port->active_vfids); + + return 0; +} + +static const struct net_device_ops mlxsw_sp_port_netdev_ops = { + .ndo_open = mlxsw_sp_port_open, + .ndo_stop = mlxsw_sp_port_stop, + .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, + .ndo_change_mtu = mlxsw_sp_port_change_mtu, + .ndo_get_stats64 = mlxsw_sp_port_get_stats64, + .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid, + .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, +}; + +static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, mlxsw_sp_driver_version, + sizeof(drvinfo->version)); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d.%d", + mlxsw_sp->bus_info->fw_rev.major, + mlxsw_sp->bus_info->fw_rev.minor, + mlxsw_sp->bus_info->fw_rev.subminor); + strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name, + sizeof(drvinfo->bus_info)); +} + +struct mlxsw_sp_port_hw_stats { + char str[ETH_GSTRING_LEN]; + u64 (*getter)(char *payload); +}; + +static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { + { + .str = "a_frames_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get, + }, + { + .str = "a_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get, + }, + { + .str = "a_frame_check_sequence_errors", + .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get, + }, + { + .str = "a_alignment_errors", + .getter = mlxsw_reg_ppcnt_a_alignment_errors_get, + }, + { + .str = "a_octets_transmitted_ok", + .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get, + }, + { + .str = "a_octets_received_ok", + .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get, + }, + { + .str = "a_multicast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get, + }, + { + .str = "a_broadcast_frames_xmitted_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get, + }, + { + .str = "a_multicast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get, + }, + { + .str = "a_broadcast_frames_received_ok", + .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get, + }, + { + .str = "a_in_range_length_errors", + .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get, + }, + { + .str = "a_out_of_range_length_field", + .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get, + }, + { + .str = "a_frame_too_long_errors", + .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get, + }, + { + .str = "a_symbol_error_during_carrier", + .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get, + }, + { + .str = "a_mac_control_frames_transmitted", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get, + }, + { + .str = "a_mac_control_frames_received", + .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get, + }, + { + .str = "a_unsupported_opcodes_received", + .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_received", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get, + }, + { + .str = "a_pause_mac_ctrl_frames_xmitted", + .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get, + }, +}; + +#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) + +static void mlxsw_sp_port_get_strings(struct net_device *dev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static void mlxsw_sp_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int i; + int err; + + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); + for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) + data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; +} + +static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return MLXSW_SP_PORT_HW_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +struct mlxsw_sp_port_link_mode { + u32 mask; + u32 supported; + u32 advertised; + u32 speed; +}; + +static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = { + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T, + .supported = SUPPORTED_100baseT_Full, + .advertised = ADVERTISED_100baseT_Full, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX, + .speed = 100, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX, + .supported = SUPPORTED_1000baseKX_Full, + .advertised = ADVERTISED_1000baseKX_Full, + .speed = 1000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T, + .supported = SUPPORTED_10000baseT_Full, + .advertised = ADVERTISED_10000baseT_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4, + .supported = SUPPORTED_10000baseKX4_Full, + .advertised = ADVERTISED_10000baseKX4_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR, + .supported = SUPPORTED_10000baseKR_Full, + .advertised = ADVERTISED_10000baseKR_Full, + .speed = 10000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2, + .supported = SUPPORTED_20000baseKR2_Full, + .advertised = ADVERTISED_20000baseKR2_Full, + .speed = 20000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4, + .supported = SUPPORTED_40000baseCR4_Full, + .advertised = ADVERTISED_40000baseCR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4, + .supported = SUPPORTED_40000baseKR4_Full, + .advertised = ADVERTISED_40000baseKR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4, + .supported = SUPPORTED_40000baseSR4_Full, + .advertised = ADVERTISED_40000baseSR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4, + .supported = SUPPORTED_40000baseLR4_Full, + .advertised = ADVERTISED_40000baseLR4_Full, + .speed = 40000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR, + .speed = 25000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2 | + MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2, + .speed = 50000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4, + .supported = SUPPORTED_56000baseKR4_Full, + .advertised = ADVERTISED_56000baseKR4_Full, + .speed = 56000, + }, + { + .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4, + .speed = 100000, + }, +}; + +#define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode) + +static u32 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return SUPPORTED_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX)) + return SUPPORTED_Backplane; + return 0; +} + +static u32 mlxsw_sp_from_ptys_supported_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) + modes |= mlxsw_sp_port_link_mode[i].supported; + } + return modes; +} + +static u32 mlxsw_sp_from_ptys_advert_link(u32 ptys_eth_proto) +{ + u32 modes = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) + modes |= mlxsw_sp_port_link_mode[i].advertised; + } + return modes; +} + +static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto, + struct ethtool_cmd *cmd) +{ + u32 speed = SPEED_UNKNOWN; + u8 duplex = DUPLEX_UNKNOWN; + int i; + + if (!carrier_ok) + goto out; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) { + speed = mlxsw_sp_port_link_mode[i].speed; + duplex = DUPLEX_FULL; + break; + } + } +out: + ethtool_cmd_speed_set(cmd, speed); + cmd->duplex = duplex; +} + +static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto) +{ + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 | + MLXSW_REG_PTYS_ETH_SPEED_SGMII)) + return PORT_FIBRE; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4)) + return PORT_DA; + + if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR | + MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 | + MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 | + MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4)) + return PORT_NONE; + + return PORT_OTHER; +} + +static int mlxsw_sp_port_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_cap; + u32 eth_proto_admin; + u32 eth_proto_oper; + int err; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, + ð_proto_admin, ð_proto_oper); + + cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) | + mlxsw_sp_from_ptys_supported_link(eth_proto_cap) | + SUPPORTED_Pause | SUPPORTED_Asym_Pause; + cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin); + mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), + eth_proto_oper, cmd); + + eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; + cmd->port = mlxsw_sp_port_connector_port(eth_proto_oper); + cmd->lp_advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_oper); + + cmd->transceiver = XCVR_INTERNAL; + return 0; +} + +static u32 mlxsw_sp_to_ptys_advert_link(u32 advertising) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (advertising & mlxsw_sp_port_link_mode[i].advertised) + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + } + return ptys_proto; +} + +static u32 mlxsw_sp_to_ptys_speed(u32 speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (speed == mlxsw_sp_port_link_mode[i].speed) + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + } + return ptys_proto; +} + +static int mlxsw_sp_port_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 speed; + u32 eth_proto_new; + u32 eth_proto_cap; + u32 eth_proto_admin; + bool is_up; + int err; + + speed = ethtool_cmd_speed(cmd); + + eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ? + mlxsw_sp_to_ptys_advert_link(cmd->advertising) : + mlxsw_sp_to_ptys_speed(speed); + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to get proto"); + return err; + } + mlxsw_reg_ptys_unpack(ptys_pl, ð_proto_cap, ð_proto_admin, NULL); + + eth_proto_new = eth_proto_new & eth_proto_cap; + if (!eth_proto_new) { + netdev_err(dev, "Not supported proto admin requested"); + return -EINVAL; + } + if (eth_proto_new == eth_proto_admin) + return 0; + + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, eth_proto_new); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); + if (err) { + netdev_err(dev, "Failed to set proto admin"); + return err; + } + + err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up); + if (err) { + netdev_err(dev, "Failed to get oper status"); + return err; + } + if (!is_up) + return 0; + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); + if (err) { + netdev_err(dev, "Failed to set admin status"); + return err; + } + + return 0; +} + +static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { + .get_drvinfo = mlxsw_sp_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlxsw_sp_port_get_strings, + .get_ethtool_stats = mlxsw_sp_port_get_stats, + .get_sset_count = mlxsw_sp_port_get_sset_count, + .get_settings = mlxsw_sp_port_get_settings, + .set_settings = mlxsw_sp_port_set_settings, +}; + +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *dev; + bool usable; + int err; + + dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); + if (!dev) + return -ENOMEM; + mlxsw_sp_port = netdev_priv(dev); + mlxsw_sp_port->dev = dev; + mlxsw_sp_port->mlxsw_sp = mlxsw_sp; + mlxsw_sp_port->local_port = local_port; + mlxsw_sp_port->learning = 1; + mlxsw_sp_port->learning_sync = 1; + mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->pvid = 1; + + mlxsw_sp_port->pcpu_stats = + netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); + if (!mlxsw_sp_port->pcpu_stats) { + err = -ENOMEM; + goto err_alloc_stats; + } + + dev->netdev_ops = &mlxsw_sp_port_netdev_ops; + dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops; + + err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n", + mlxsw_sp_port->local_port); + goto err_dev_addr_init; + } + + netif_carrier_off(dev); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_FILTER; + + /* Each packet needs to have a Tx header (metadata) on top all other + * headers. + */ + dev->hard_header_len += MLXSW_TXHDR_LEN; + + err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", + mlxsw_sp_port->local_port); + goto err_port_module_check; + } + + if (!usable) { + dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", + mlxsw_sp_port->local_port); + goto port_not_usable; + } + + err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", + mlxsw_sp_port->local_port); + goto err_port_system_port_mapping_set; + } + + err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n", + mlxsw_sp_port->local_port); + goto err_port_swid_set; + } + + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", + mlxsw_sp_port->local_port); + goto err_port_mtu_set; + } + + err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); + if (err) + goto err_port_admin_status_set; + + err = mlxsw_sp_port_buffers_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n", + mlxsw_sp_port->local_port); + goto err_port_buffers_init; + } + + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); + err = register_netdev(dev); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n", + mlxsw_sp_port->local_port); + goto err_register_netdev; + } + + err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); + if (err) + goto err_port_vlan_init; + + mlxsw_sp->ports[local_port] = mlxsw_sp_port; + return 0; + +err_port_vlan_init: + unregister_netdev(dev); +err_register_netdev: +err_port_buffers_init: +err_port_admin_status_set: +err_port_mtu_set: +err_port_swid_set: +err_port_system_port_mapping_set: +port_not_usable: +err_port_module_check: +err_dev_addr_init: + free_percpu(mlxsw_sp_port->pcpu_stats); +err_alloc_stats: + free_netdev(dev); + return err; +} + +static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) +{ + u16 vfid; + + for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); +} + +static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + + if (!mlxsw_sp_port) + return; + mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); + unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + free_percpu(mlxsw_sp_port->pcpu_stats); + free_netdev(mlxsw_sp_port->dev); +} + +static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) +{ + int i; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) + mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->ports); +} + +static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) +{ + size_t alloc_size; + int i; + int err; + + alloc_size = sizeof(struct mlxsw_sp_port *) * MLXSW_PORT_MAX_PORTS; + mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!mlxsw_sp->ports) + return -ENOMEM; + + for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + err = mlxsw_sp_port_create(mlxsw_sp, i); + if (err) + goto err_port_create; + } + return 0; + +err_port_create: + for (i--; i >= 1; i--) + mlxsw_sp_port_remove(mlxsw_sp, i); + kfree(mlxsw_sp->ports); + return err; +} + +static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, + char *pude_pl, void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port; + enum mlxsw_reg_pude_oper_status status; + u8 local_port; + + local_port = mlxsw_reg_pude_local_port_get(pude_pl); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n", + local_port); + return; + } + + status = mlxsw_reg_pude_oper_status_get(pude_pl); + if (status == MLXSW_PORT_OPER_STATUS_UP) { + netdev_info(mlxsw_sp_port->dev, "link up\n"); + netif_carrier_on(mlxsw_sp_port->dev); + } else { + netdev_info(mlxsw_sp_port->dev, "link down\n"); + netif_carrier_off(mlxsw_sp_port->dev); + } +} + +static struct mlxsw_event_listener mlxsw_sp_pude_event = { + .func = mlxsw_sp_pude_event_func, + .trap_id = MLXSW_TRAP_ID_PUDE, +}; + +static int mlxsw_sp_event_register(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int err; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sp_pude_event; + break; + } + err = mlxsw_core_event_listener_register(mlxsw_sp->core, el, mlxsw_sp); + if (err) + return err; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_event_trap_set; + + return 0; + +err_event_trap_set: + mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); + return err; +} + +static void mlxsw_sp_event_unregister(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_event_trap_id trap_id) +{ + struct mlxsw_event_listener *el; + + switch (trap_id) { + case MLXSW_TRAP_ID_PUDE: + el = &mlxsw_sp_pude_event; + break; + } + mlxsw_core_event_listener_unregister(mlxsw_sp->core, el, mlxsw_sp); +} + +static void mlxsw_sp_rx_listener_func(struct sk_buff *skb, u8 local_port, + void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct mlxsw_sp_port_pcpu_stats *pcpu_stats; + + if (unlikely(!mlxsw_sp_port)) { + dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); + return; + } + + skb->dev = mlxsw_sp_port->dev; + + pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + skb->protocol = eth_type_trans(skb, skb->dev); + netif_receive_skb(skb); +} + +static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = { + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_FDB_MC, + }, + /* Traps for specific L2 packet types, not trapped as FDB MC */ + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_STP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LACP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_EAPOL, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_LLDP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MMRP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_MVRP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_RPVST, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_DHCP, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_QUERY, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V1_REPORT, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_REPORT, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V2_LEAVE, + }, + { + .func = mlxsw_sp_rx_listener_func, + .local_port = MLXSW_PORT_DONT_CARE, + .trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT, + }, +}; + +static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp) +{ + char htgt_pl[MLXSW_REG_HTGT_LEN]; + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + int err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_RX); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { + err = mlxsw_core_rx_listener_register(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); + if (err) + goto err_rx_listener_register; + + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, + mlxsw_sp_rx_listener[i].trap_id); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + if (err) + goto err_rx_trap_set; + } + return 0; + +err_rx_trap_set: + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); +err_rx_listener_register: + for (i--; i >= 0; i--) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_sp_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); + } + return err; +} + +static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp) +{ + char hpkt_pl[MLXSW_REG_HPKT_LEN]; + int i; + + for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) { + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, + mlxsw_sp_rx_listener[i].trap_id); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl); + + mlxsw_core_rx_listener_unregister(mlxsw_sp->core, + &mlxsw_sp_rx_listener[i], + mlxsw_sp); + } +} + +static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, + enum mlxsw_reg_sfgc_type type, + enum mlxsw_reg_sfgc_bridge_type bridge_type) +{ + enum mlxsw_flood_table_type table_type; + enum mlxsw_sp_flood_table flood_table; + char sfgc_pl[MLXSW_REG_SFGC_LEN]; + + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; + flood_table = 0; + } else { + table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + flood_table = MLXSW_SP_FLOOD_TABLE_UC; + else + flood_table = MLXSW_SP_FLOOD_TABLE_BM; + } + + mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, + flood_table); + return mlxsw_reg_write(mlxsw_core, MLXSW_REG(sfgc), sfgc_pl); +} + +static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) +{ + int type, err; + + /* For non-offloaded netdevs, flood all traffic types to CPU + * port. + */ + for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { + if (type == MLXSW_REG_SFGC_TYPE_RESERVED) + continue; + + err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, + MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); + if (err) + return err; + } + + /* For bridged ports, use one flooding table for unknown unicast + * traffic and a second table for unregistered multicast and + * broadcast. + */ + for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { + if (type == MLXSW_REG_SFGC_TYPE_RESERVED) + continue; + + err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, + MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); + if (err) + return err; + } + + return 0; +} + +static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, + const struct mlxsw_bus_info *mlxsw_bus_info) +{ + struct mlxsw_sp *mlxsw_sp = priv; + int err; + + mlxsw_sp->core = mlxsw_core; + mlxsw_sp->bus_info = mlxsw_bus_info; + + err = mlxsw_sp_base_mac_get(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n"); + return err; + } + + err = mlxsw_sp_ports_create(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); + goto err_ports_create; + } + + err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n"); + goto err_event_register; + } + + err = mlxsw_sp_traps_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps for RX\n"); + goto err_rx_listener_register; + } + + err = mlxsw_sp_flood_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize flood tables\n"); + goto err_flood_init; + } + + err = mlxsw_sp_buffers_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n"); + goto err_buffers_init; + } + + err = mlxsw_sp_switchdev_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); + goto err_switchdev_init; + } + + return 0; + +err_switchdev_init: +err_buffers_init: +err_flood_init: + mlxsw_sp_traps_fini(mlxsw_sp); +err_rx_listener_register: + mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); +err_event_register: + mlxsw_sp_ports_remove(mlxsw_sp); +err_ports_create: + mlxsw_sp_vfids_fini(mlxsw_sp); + return err; +} + +static void mlxsw_sp_fini(void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_switchdev_fini(mlxsw_sp); + mlxsw_sp_traps_fini(mlxsw_sp); + mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); + mlxsw_sp_ports_remove(mlxsw_sp); + mlxsw_sp_vfids_fini(mlxsw_sp); +} + +static struct mlxsw_config_profile mlxsw_sp_config_profile = { + .used_max_vepa_channels = 1, + .max_vepa_channels = 0, + .used_max_lag = 1, + .max_lag = 64, + .used_max_port_per_lag = 1, + .max_port_per_lag = 16, + .used_max_mid = 1, + .max_mid = 7000, + .used_max_pgt = 1, + .max_pgt = 0, + .used_max_system_port = 1, + .max_system_port = 64, + .used_max_vlan_groups = 1, + .max_vlan_groups = 127, + .used_max_regions = 1, + .max_regions = 400, + .used_flood_tables = 1, + .used_flood_mode = 1, + .flood_mode = 3, + .max_fid_offset_flood_tables = 2, + .fid_offset_flood_table_size = VLAN_N_VID - 1, + .max_fid_flood_tables = 1, + .fid_flood_table_size = VLAN_N_VID, + .used_max_ib_mc = 1, + .max_ib_mc = 0, + .used_max_pkey = 1, + .max_pkey = 0, + .swid_config = { + { + .used_type = 1, + .type = MLXSW_PORT_SWID_TYPE_ETH, + } + }, +}; + +static struct mlxsw_driver mlxsw_sp_driver = { + .kind = MLXSW_DEVICE_KIND_SPECTRUM, + .owner = THIS_MODULE, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp_init, + .fini = mlxsw_sp_fini, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp_config_profile, +}; + +static bool mlxsw_sp_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; +} + +static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* When port is not bridged untagged packets are tagged with + * PVID=VID=1, thereby creating an implicit VLAN interface in + * the device. Remove it and let bridge code take care of its + * own VLANs. + */ + err = mlxsw_sp_port_kill_vid(dev, 0, 1); + if (err) + netdev_err(dev, "Failed to remove VID 1\n"); + + return err; +} + +static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* Add implicit VLAN interface in the device, so that untagged + * packets will be classified to the default vFID. + */ + err = mlxsw_sp_port_add_vid(dev, 0, 1); + if (err) + netdev_err(dev, "Failed to add VID 1\n"); + + return err; +} + +static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + return !mlxsw_sp->master_bridge.dev || + mlxsw_sp->master_bridge.dev == br_dev; +} + +static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + mlxsw_sp->master_bridge.dev = br_dev; + mlxsw_sp->master_bridge.ref_count++; +} + +static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + if (--mlxsw_sp->master_bridge.ref_count == 0) + mlxsw_sp->master_bridge.dev = NULL; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *upper_dev; + struct mlxsw_sp *mlxsw_sp; + int err; + + if (!mlxsw_sp_port_dev_check(dev)) + return NOTIFY_DONE; + + mlxsw_sp_port = netdev_priv(dev); + mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + info = ptr; + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + /* HW limitation forbids to put ports to multiple bridges. */ + if (info->master && info->linking && + netif_is_bridge_master(upper_dev) && + !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) + return NOTIFY_BAD; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (info->master && + netif_is_bridge_master(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); + if (err) + netdev_err(dev, "Failed to join bridge\n"); + mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); + mlxsw_sp_port->bridged = 1; + } else { + err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); + if (err) + netdev_err(dev, "Failed to leave bridge\n"); + mlxsw_sp_port->bridged = 0; + mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); + } + } + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { + .notifier_call = mlxsw_sp_netdevice_event, +}; + +static int __init mlxsw_sp_module_init(void) +{ + int err; + + register_netdevice_notifier(&mlxsw_sp_netdevice_nb); + err = mlxsw_core_driver_register(&mlxsw_sp_driver); + if (err) + goto err_core_driver_register; + return 0; + +err_core_driver_register: + unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); + return err; +} + +static void __exit mlxsw_sp_module_exit(void) +{ + mlxsw_core_driver_unregister(&mlxsw_sp_driver); + unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); +} + +module_init(mlxsw_sp_module_init); +module_exit(mlxsw_sp_module_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_DESCRIPTION("Mellanox Spectrum driver"); +MODULE_MLXSW_DRIVER_ALIAS(MLXSW_DEVICE_KIND_SPECTRUM); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h new file mode 100644 index 000000000..4365c8bcc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -0,0 +1,122 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum.h + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_H +#define _MLXSW_SPECTRUM_H + +#include +#include +#include +#include +#include + +#include "core.h" + +#define MLXSW_SP_VFID_BASE VLAN_N_VID + +struct mlxsw_sp_port; + +struct mlxsw_sp { + unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)]; + struct mlxsw_sp_port **ports; + struct mlxsw_core *core; + const struct mlxsw_bus_info *bus_info; + unsigned char base_mac[ETH_ALEN]; + struct { + struct delayed_work dw; +#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 + unsigned int interval; /* ms */ + } fdb_notify; +#define MLXSW_SP_DEFAULT_AGEING_TIME 300 + u32 ageing_time; + struct { + struct net_device *dev; + unsigned int ref_count; + } master_bridge; +}; + +struct mlxsw_sp_port_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; +}; + +struct mlxsw_sp_port { + struct net_device *dev; + struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; + struct mlxsw_sp *mlxsw_sp; + u8 local_port; + u8 stp_state; + u8 learning:1, + learning_sync:1, + uc_flood:1, + bridged:1; + u16 pvid; + /* 802.1Q bridge VLANs */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* VLAN interfaces */ + unsigned long active_vfids[BITS_TO_LONGS(VLAN_N_VID)]; + u16 nr_vfids; +}; + +enum mlxsw_sp_flood_table { + MLXSW_SP_FLOOD_TABLE_UC, + MLXSW_SP_FLOOD_TABLE_BM, +}; + +int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); + +int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_svfa_mt mt, bool valid, u16 fid, + u16 vid); +int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, + u16 vid_end, bool is_member, bool untagged); +int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, + u16 vid); +int mlxsw_sp_port_kill_vid(struct net_device *dev, + __be16 __always_unused proto, u16 vid); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c new file mode 100644 index 000000000..d59195e3f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -0,0 +1,422 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "spectrum.h" +#include "core.h" +#include "port.h" +#include "reg.h" + +struct mlxsw_sp_pb { + u8 index; + u16 size; +}; + +#define MLXSW_SP_PB(_index, _size) \ + { \ + .index = _index, \ + .size = _size, \ + } + +static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { + MLXSW_SP_PB(0, 208), + MLXSW_SP_PB(1, 208), + MLXSW_SP_PB(2, 208), + MLXSW_SP_PB(3, 208), + MLXSW_SP_PB(4, 208), + MLXSW_SP_PB(5, 208), + MLXSW_SP_PB(6, 208), + MLXSW_SP_PB(7, 208), + MLXSW_SP_PB(9, 208), +}; + +#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) + +static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + char pbmc_pl[MLXSW_REG_PBMC_LEN]; + int i; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, + 0xffff, 0xffff / 2); + for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { + const struct mlxsw_sp_pb *pb; + + pb = &mlxsw_sp_pbs[i]; + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); + } + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, + MLXSW_REG(pbmc), pbmc_pl); +} + +#define MLXSW_SP_SB_BYTES_PER_CELL 96 + +struct mlxsw_sp_sb_pool { + u8 pool; + enum mlxsw_reg_sbpr_dir dir; + enum mlxsw_reg_sbpr_mode mode; + u32 size; +}; + +#define MLXSW_SP_SB_POOL_INGRESS_SIZE \ + ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \ + MLXSW_SP_SB_BYTES_PER_CELL) +#define MLXSW_SP_SB_POOL_EGRESS_SIZE \ + ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \ + MLXSW_SP_SB_BYTES_PER_CELL) + +#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ + { \ + .pool = _pool, \ + .dir = _dir, \ + .mode = _mode, \ + .size = _size, \ + } + +#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \ + MLXSW_REG_SBPR_MODE_DYNAMIC, _size) + +#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ + MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \ + MLXSW_REG_SBPR_MODE_DYNAMIC, _size) + +static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { + MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE), + MLXSW_SP_SB_POOL_INGRESS(1, 0), + MLXSW_SP_SB_POOL_INGRESS(2, 0), + MLXSW_SP_SB_POOL_INGRESS(3, 0), + MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE), + MLXSW_SP_SB_POOL_EGRESS(1, 0), + MLXSW_SP_SB_POOL_EGRESS(2, 0), + MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE), +}; + +#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) + +static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) +{ + char sbpr_pl[MLXSW_REG_SBPR_LEN]; + int i; + int err; + + for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { + const struct mlxsw_sp_sb_pool *pool; + + pool = &mlxsw_sp_sb_pools[i]; + mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, + pool->mode, pool->size); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + if (err) + return err; + } + return 0; +} + +struct mlxsw_sp_sb_cm { + union { + u8 pg; + u8 tc; + } u; + enum mlxsw_reg_sbcm_dir dir; + u32 min_buff; + u32 max_buff; + u8 pool; +}; + +#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ + { \ + .u.pg = _pg_tc, \ + .dir = _dir, \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ + } + +#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ + MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \ + _min_buff, _max_buff, 0) + +#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \ + _min_buff, _max_buff, 0) + +#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ + MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) + +static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { + MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8), + MLXSW_SP_SB_CM_INGRESS(1, 0, 0), + MLXSW_SP_SB_CM_INGRESS(2, 0, 0), + MLXSW_SP_SB_CM_INGRESS(3, 0, 0), + MLXSW_SP_SB_CM_INGRESS(4, 0, 0), + MLXSW_SP_SB_CM_INGRESS(5, 0, 0), + MLXSW_SP_SB_CM_INGRESS(6, 0, 0), + MLXSW_SP_SB_CM_INGRESS(7, 0, 0), + MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff), + MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), + MLXSW_SP_SB_CM_EGRESS(8, 0, 0), + MLXSW_SP_SB_CM_EGRESS(9, 0, 0), + MLXSW_SP_SB_CM_EGRESS(10, 0, 0), + MLXSW_SP_SB_CM_EGRESS(11, 0, 0), + MLXSW_SP_SB_CM_EGRESS(12, 0, 0), + MLXSW_SP_SB_CM_EGRESS(13, 0, 0), + MLXSW_SP_SB_CM_EGRESS(14, 0, 0), + MLXSW_SP_SB_CM_EGRESS(15, 0, 0), + MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff), +}; + +#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) + +static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), + MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), +}; + +#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ + ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) + +static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + const struct mlxsw_sp_sb_cm *cms, + size_t cms_len) +{ + char sbcm_pl[MLXSW_REG_SBCM_LEN]; + int i; + int err; + + for (i = 0; i < cms_len; i++) { + const struct mlxsw_sp_sb_cm *cm; + + cm = &cms[i]; + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, + cm->min_buff, cm->max_buff, cm->pool); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + if (err) + return err; + } + return 0; +} + +static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, mlxsw_sp_sb_cms, + MLXSW_SP_SB_CMS_LEN); +} + +static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, + MLXSW_SP_CPU_PORT_SB_MCS_LEN); +} + +struct mlxsw_sp_sb_pm { + u8 pool; + enum mlxsw_reg_sbpm_dir dir; + u32 min_buff; + u32 max_buff; +}; + +#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ + { \ + .pool = _pool, \ + .dir = _dir, \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + } + +#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \ + _min_buff, _max_buff) + +#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ + MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \ + _min_buff, _max_buff) + +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { + MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff), + MLXSW_SP_SB_PM_INGRESS(1, 0, 0), + MLXSW_SP_SB_PM_INGRESS(2, 0, 0), + MLXSW_SP_SB_PM_INGRESS(3, 0, 0), + MLXSW_SP_SB_PM_EGRESS(0, 0, 7), + MLXSW_SP_SB_PM_EGRESS(1, 0, 0), + MLXSW_SP_SB_PM_EGRESS(2, 0, 0), + MLXSW_SP_SB_PM_EGRESS(3, 0, 0), +}; + +#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) + +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + int i; + int err; + + for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { + const struct mlxsw_sp_sb_pm *pm; + + pm = &mlxsw_sp_sb_pms[i]; + mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, + pm->pool, pm->dir, + pm->min_buff, pm->max_buff); + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, + MLXSW_REG(sbpm), sbpm_pl); + if (err) + return err; + } + return 0; +} + +struct mlxsw_sp_sb_mm { + u8 prio; + u32 min_buff; + u32 max_buff; + u8 pool; +}; + +#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ + { \ + .prio = _prio, \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ + } + +static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { + MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), +}; + +#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) + +static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) +{ + char sbmm_pl[MLXSW_REG_SBMM_LEN]; + int i; + int err; + + for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) { + const struct mlxsw_sp_sb_mm *mc; + + mc = &mlxsw_sp_sb_mms[i]; + mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, + mc->max_buff, mc->pool); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); + if (err) + return err; + } + return 0; +} + +int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = mlxsw_sp_sb_pools_init(mlxsw_sp); + if (err) + return err; + err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); + if (err) + return err; + err = mlxsw_sp_sb_mms_init(mlxsw_sp); + + return err; +} + +int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + if (err) + return err; + err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); + if (err) + return err; + err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port); + + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c new file mode 100644 index 000000000..617fb22b5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -0,0 +1,903 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c + * Copyright (c) 2015 Mellanox Technologies. All rights reserved. + * Copyright (c) 2015 Jiri Pirko + * Copyright (c) 2015 Ido Schimmel + * Copyright (c) 2015 Elad Raz + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spectrum.h" +#include "core.h" +#include "reg.h" + +static int mlxsw_sp_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(mlxsw_sp->base_mac); + memcpy(&attr->u.ppid.id, &mlxsw_sp->base_mac, + attr->u.ppid.id_len); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + attr->u.brport_flags = + (mlxsw_sp_port->learning ? BR_LEARNING : 0) | + (mlxsw_sp_port->learning_sync ? BR_LEARNING_SYNC : 0) | + (mlxsw_sp_port->uc_flood ? BR_FLOOD : 0); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 state) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + enum mlxsw_reg_spms_state spms_state; + char *spms_pl; + u16 vid; + int err; + + switch (state) { + case BR_STATE_DISABLED: /* fall-through */ + case BR_STATE_FORWARDING: + spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; + break; + case BR_STATE_LISTENING: /* fall-through */ + case BR_STATE_LEARNING: + spms_state = MLXSW_REG_SPMS_STATE_LEARNING; + break; + case BR_STATE_BLOCKING: + spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; + break; + default: + BUG(); + } + + spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); + if (!spms_pl) + return -ENOMEM; + mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port); + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl); + kfree(spms_pl); + return err; +} + +static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + u8 state) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + mlxsw_sp_port->stp_state = state; + return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state); +} + +static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid_begin, u16 fid_end, bool set, + bool only_uc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u16 range = fid_end - fid_begin + 1; + char *sftr_pl; + int err; + + sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); + if (!sftr_pl) + return -ENOMEM; + + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, fid_begin, + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, + mlxsw_sp_port->local_port, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + if (err) + goto buffer_out; + + /* Flooding control allows one to decide whether a given port will + * flood unicast traffic for which there is no FDB entry. + */ + if (only_uc) + goto buffer_out; + + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, fid_begin, + MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST, range, + mlxsw_sp_port->local_port, set); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + +buffer_out: + kfree(sftr_pl); + return err; +} + +static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool set) +{ + struct net_device *dev = mlxsw_sp_port->dev; + u16 vid, last_visited_vid; + int err; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set, + true); + if (err) { + last_visited_vid = vid; + goto err_port_flood_set; + } + } + + return 0; + +err_port_flood_set: + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid) + __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true); + netdev_err(dev, "Failed to configure unicast flooding\n"); + return err; +} + +static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + unsigned long brport_flags) +{ + unsigned long uc_flood = mlxsw_sp_port->uc_flood ? BR_FLOOD : 0; + bool set; + int err; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if ((uc_flood ^ brport_flags) & BR_FLOOD) { + set = mlxsw_sp_port->uc_flood ? false : true; + err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port, set); + if (err) + return err; + } + + mlxsw_sp_port->uc_flood = brport_flags & BR_FLOOD ? 1 : 0; + mlxsw_sp_port->learning = brport_flags & BR_LEARNING ? 1 : 0; + mlxsw_sp_port->learning_sync = brport_flags & BR_LEARNING_SYNC ? 1 : 0; + + return 0; +} + +static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time) +{ + char sfdat_pl[MLXSW_REG_SFDAT_LEN]; + int err; + + mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl); + if (err) + return err; + mlxsw_sp->ageing_time = ageing_time; + return 0; +} + +static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + unsigned long ageing_clock_t) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); + u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); +} + +static int mlxsw_sp_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans, + attr->u.ageing_time); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char spvid_pl[MLXSW_REG_SPVID_LEN]; + + mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); +} + +static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + int err; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); + + if (err) + return err; + + set_bit(fid, mlxsw_sp->active_fids); + return 0; +} + +static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid) +{ + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + clear_bit(fid, mlxsw_sp->active_fids); + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, + fid, fid); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) +{ + enum mlxsw_reg_svfa_mt mt; + + if (mlxsw_sp_port->nr_vfids) + mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + else + mt = MLXSW_REG_SVFA_MT_VID_TO_FID; + + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid); +} + +static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid) +{ + enum mlxsw_reg_svfa_mt mt; + + if (!mlxsw_sp_port->nr_vfids) + return 0; + + mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID; + return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid); +} + +static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin, + u16 vid_end) +{ + u16 vid; + int err; + + for (vid = vid_begin; vid <= vid_end; vid++) { + err = mlxsw_sp_port_add_vid(dev, 0, vid); + if (err) + goto err_port_add_vid; + } + return 0; + +err_port_add_vid: + for (vid--; vid >= vid_begin; vid--) + mlxsw_sp_port_kill_vid(dev, 0, vid); + return err; +} + +static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, + bool flag_untagged, bool flag_pvid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct net_device *dev = mlxsw_sp_port->dev; + enum mlxsw_reg_svfa_mt mt; + u16 vid, vid_e; + int err; + + /* In case this is invoked with BRIDGE_FLAGS_SELF and port is + * not bridged, then packets ingressing through the port with + * the specified VIDs will be directed to CPU. + */ + if (!mlxsw_sp_port->bridged) + return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end); + + for (vid = vid_begin; vid <= vid_end; vid++) { + if (!test_bit(vid, mlxsw_sp->active_fids)) { + err = mlxsw_sp_fid_create(mlxsw_sp, vid); + if (err) { + netdev_err(dev, "Failed to create FID=%d\n", + vid); + return err; + } + + /* When creating a FID, we set a VID to FID mapping + * regardless of the port's mode. + */ + mt = MLXSW_REG_SVFA_MT_VID_TO_FID; + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, + true, vid, vid); + if (err) { + netdev_err(dev, "Failed to create FID=VID=%d mapping\n", + vid); + return err; + } + } + + /* Set FID mapping according to port's mode */ + err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid); + if (err) { + netdev_err(dev, "Failed to map FID=%d", vid); + return err; + } + } + + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, + true, false); + if (err) { + netdev_err(dev, "Failed to configure flooding\n"); + return err; + } + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, true, + flag_untagged); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to add VIDs %d-%d\n", + vid, vid_e); + return err; + } + } + + vid = vid_begin; + if (flag_pvid && mlxsw_sp_port->pvid != vid) { + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to add PVID %d\n", + vid); + return err; + } + mlxsw_sp_port->pvid = vid; + } + + /* Changing activity bits only if HW operation succeded */ + for (vid = vid_begin; vid <= vid_end; vid++) + set_bit(vid, mlxsw_sp_port->active_vlans); + + return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, + mlxsw_sp_port->stp_state); +} + +static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + bool untagged_flag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid_flag = vlan->flags & BRIDGE_VLAN_INFO_PVID; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return __mlxsw_sp_port_vlans_add(mlxsw_sp_port, + vlan->vid_begin, vlan->vid_end, + untagged_flag, pvid_flag); +} + +static int mlxsw_sp_port_fdb_op(struct mlxsw_sp_port *mlxsw_sp_port, + const char *mac, u16 vid, bool adding, + bool dynamic) +{ + enum mlxsw_reg_sfd_rec_policy policy; + enum mlxsw_reg_sfd_op op; + char *sfd_pl; + int err; + + if (!vid) + vid = mlxsw_sp_port->pvid; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + policy = dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS : + MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY; + op = adding ? MLXSW_REG_SFD_OP_WRITE_EDIT : + MLXSW_REG_SFD_OP_WRITE_REMOVE; + mlxsw_reg_sfd_pack(sfd_pl, op, 0); + mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, + mac, vid, MLXSW_REG_SFD_REC_ACTION_NOP, + mlxsw_sp_port->local_port); + err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sfd), + sfd_pl); + kfree(sfd_pl); + + return err; +} + +static int +mlxsw_sp_port_fdb_static_add(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + if (switchdev_trans_ph_prepare(trans)) + return 0; + + return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, + true, false); +} + +static int mlxsw_sp_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + trans); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin, + u16 vid_end) +{ + u16 vid; + int err; + + for (vid = vid_begin; vid <= vid_end; vid++) { + err = mlxsw_sp_port_kill_vid(dev, 0, vid); + if (err) + return err; + } + + return 0; +} + +static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, + u16 vid_begin, u16 vid_end, bool init) +{ + struct net_device *dev = mlxsw_sp_port->dev; + u16 vid, vid_e; + int err; + + /* In case this is invoked with BRIDGE_FLAGS_SELF and port is + * not bridged, then prevent packets ingressing through the + * port with the specified VIDs from being trapped to CPU. + */ + if (!init && !mlxsw_sp_port->bridged) + return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end); + + for (vid = vid_begin; vid <= vid_end; + vid += MLXSW_REG_SPVM_REC_MAX_COUNT) { + vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1), + vid_end); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e, false, + false); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to del VIDs %d-%d\n", + vid, vid_e); + return err; + } + } + + if ((mlxsw_sp_port->pvid >= vid_begin) && + (mlxsw_sp_port->pvid <= vid_end)) { + /* Default VLAN is always 1 */ + mlxsw_sp_port->pvid = 1; + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, + mlxsw_sp_port->pvid); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Unable to del PVID %d\n", + vid); + return err; + } + } + + if (init) + goto out; + + err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, + false, false); + if (err) { + netdev_err(dev, "Failed to clear flooding\n"); + return err; + } + + for (vid = vid_begin; vid <= vid_end; vid++) { + /* Remove FID mapping in case of Virtual mode */ + err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid); + if (err) { + netdev_err(dev, "Failed to unmap FID=%d", vid); + return err; + } + } + +out: + /* Changing activity bits only if HW operation succeded */ + for (vid = vid_begin; vid <= vid_end; vid++) + clear_bit(vid, mlxsw_sp_port->active_vlans); + + return 0; +} + +static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_vlan *vlan) +{ + return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, + vlan->vid_begin, vlan->vid_end, false); +} + +static int +mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, + const struct switchdev_obj_port_fdb *fdb) +{ + return mlxsw_sp_port_fdb_op(mlxsw_sp_port, fdb->addr, fdb->vid, + false, false); +} + +static int mlxsw_sp_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_port_vlans_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_FDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + char *sfd_pl; + char mac[ETH_ALEN]; + u16 vid; + u8 local_port; + u8 num_rec; + int stored_err = 0; + int i; + int err; + + sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); + if (!sfd_pl) + return -ENOMEM; + + mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); + do { + mlxsw_reg_sfd_num_rec_set(sfd_pl, MLXSW_REG_SFD_REC_MAX_COUNT); + err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, + MLXSW_REG(sfd), sfd_pl); + if (err) + goto out; + + num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl); + + /* Even in case of error, we have to run the dump to the end + * so the session in firmware is finished. + */ + if (stored_err) + continue; + + for (i = 0; i < num_rec; i++) { + switch (mlxsw_reg_sfd_rec_type_get(sfd_pl, i)) { + case MLXSW_REG_SFD_REC_TYPE_UNICAST: + mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &vid, + &local_port); + if (local_port == mlxsw_sp_port->local_port) { + ether_addr_copy(fdb->addr, mac); + fdb->ndm_state = NUD_REACHABLE; + fdb->vid = vid; + err = cb(&fdb->obj); + if (err) + stored_err = err; + } + } + } + } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); + +out: + kfree(sfd_pl); + return stored_err ? stored_err : err; +} + +static int mlxsw_sp_port_vlan_dump(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + u16 vid; + int err = 0; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) { + vlan->flags = 0; + if (vid == mlxsw_sp_port->pvid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + vlan->vid_begin = vid; + vlan->vid_end = vid; + err = cb(&vlan->obj); + if (err) + break; + } + return err; +} + +static int mlxsw_sp_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_port_vlan_dump(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), cb); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, + SWITCHDEV_OBJ_PORT_FDB(obj), cb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { + .switchdev_port_attr_get = mlxsw_sp_port_attr_get, + .switchdev_port_attr_set = mlxsw_sp_port_attr_set, + .switchdev_port_obj_add = mlxsw_sp_port_obj_add, + .switchdev_port_obj_del = mlxsw_sp_port_obj_del, + .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, +}; + +static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index, + bool adding) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + char mac[ETH_ALEN]; + u8 local_port; + u16 vid; + int err; + + mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &vid, &local_port); + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n"); + return; + } + + err = mlxsw_sp_port_fdb_op(mlxsw_sp_port, mac, vid, + adding && mlxsw_sp_port->learning, true); + if (err) { + if (net_ratelimit()) + netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); + return; + } + + if (mlxsw_sp_port->learning && mlxsw_sp_port->learning_sync) { + struct switchdev_notifier_fdb_info info; + unsigned long notifier_type; + + info.addr = mac; + info.vid = vid; + notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; + call_switchdev_notifiers(notifier_type, mlxsw_sp_port->dev, + &info.info); + } +} + +static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, int rec_index) +{ + switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) { + case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC: + mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, + rec_index, true); + break; + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC: + mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl, + rec_index, false); + break; + } +} + +static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) +{ + schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, + msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); +} + +static void mlxsw_sp_fdb_notify_work(struct work_struct *work) +{ + struct mlxsw_sp *mlxsw_sp; + char *sfn_pl; + u8 num_rec; + int i; + int err; + + sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL); + if (!sfn_pl) + return; + + mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); + + do { + mlxsw_reg_sfn_pack(sfn_pl); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); + if (err) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n"); + break; + } + num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl); + for (i = 0; i < num_rec; i++) + mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); + + } while (num_rec); + + kfree(sfn_pl); + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); +} + +static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; + + err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); + return err; + } + INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); + mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; + mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); + return 0; +} + +static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) +{ + cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw); +} + +static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) +{ + u16 fid; + + for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID) + mlxsw_sp_fid_destroy(mlxsw_sp, fid); +} + +int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) +{ + return mlxsw_sp_fdb_init(mlxsw_sp); +} + +void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_fdb_fini(mlxsw_sp); + mlxsw_sp_fids_fini(mlxsw_sp); +} + +int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* Allow only untagged packets to ingress and tag them internally + * with VID 1. + */ + mlxsw_sp_port->pvid = 1; + err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID, true); + if (err) { + netdev_err(dev, "Unable to init VLANs\n"); + return err; + } + + /* Add implicit VLAN interface in the device, so that untagged + * packets will be classified to the default vFID. + */ + err = mlxsw_sp_port_add_vid(dev, 0, 1); + if (err) + netdev_err(dev, "Failed to configure default vFID\n"); + + return err; +} + +void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops; +} + +void mlxsw_sp_port_switchdev_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 62cbbd1ad..d85960cfb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0"; struct mlxsw_sx_port; -#define MLXSW_SW_HW_ID_LEN 6 - struct mlxsw_sx { struct mlxsw_sx_port **ports; struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; - u8 hw_id[MLXSW_SW_HW_ID_LEN]; + u8 hw_id[ETH_ALEN]; }; struct mlxsw_sx_port_pcpu_stats { @@ -868,7 +866,7 @@ static int mlxsw_sx_port_attr_get(struct net_device *dev, struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx; switch (attr->id) { - case SWITCHDEV_ATTR_PORT_PARENT_ID: + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id); memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len); break; @@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port, spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL); if (!spms_pl) return -ENOMEM; - mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state); + mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port); + mlxsw_reg_spms_vid_pack(spms_pl, vid, state); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl); kfree(spms_pl); return err; @@ -1148,7 +1147,7 @@ static void mlxsw_sx_pude_event_func(const struct mlxsw_reg_info *reg, } status = mlxsw_reg_pude_oper_status_get(pude_pl); - if (MLXSW_PORT_OPER_STATUS_UP == status) { + if (status == MLXSW_PORT_OPER_STATUS_UP) { netdev_info(mlxsw_sx_port->dev, "link up\n"); netif_carrier_on(mlxsw_sx_port->dev); } else { @@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx, if (err) return err; - mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id); + mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); if (err) goto err_event_trap_set; @@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port, struct mlxsw_sx_port_pcpu_stats *pcpu_stats; if (unlikely(!mlxsw_sx_port)) { - if (net_ratelimit()) - dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", - local_port); + dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n", + local_port); return; } @@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) if (err) return err; + mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL); + err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl); + if (err) + return err; + for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { err = mlxsw_core_rx_listener_register(mlxsw_sx->core, &mlxsw_sx_rx_listener[i], @@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx) goto err_rx_listener_register; mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU, - MLXSW_REG_HTGT_TRAP_GROUP_RX, mlxsw_sx_rx_listener[i].trap_id); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); if (err) @@ -1339,7 +1340,6 @@ err_rx_trap_set: err_rx_listener_register: for (i--; i >= 0; i--) { mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - MLXSW_REG_HTGT_TRAP_GROUP_RX, mlxsw_sx_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); @@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx) for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) { mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, - MLXSW_REG_HTGT_TRAP_GROUP_RX, mlxsw_sx_rx_listener[i].trap_id); mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl); @@ -1371,25 +1370,15 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) { char sfgc_pl[MLXSW_REG_SFGC_LEN]; char sgcr_pl[MLXSW_REG_SGCR_LEN]; - char *smid_pl; char *sftr_pl; int err; - /* Due to FW bug, we must configure SMID. */ - smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); - if (!smid_pl) - return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID); - err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl); - kfree(smid_pl); - if (err) - return err; - /* Configure a flooding table, which includes only CPU port. */ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); if (!sftr_pl) return -ENOMEM; - mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0); + mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0, + MLXSW_PORT_CPU_PORT, true); err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl); kfree(sftr_pl); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/txheader.h b/drivers/net/ethernet/mellanox/mlxsw/txheader.h index 06fc46c78..fdf94720c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/txheader.h +++ b/drivers/net/ethernet/mellanox/mlxsw/txheader.h @@ -38,6 +38,7 @@ #define MLXSW_TXHDR_LEN 0x10 #define MLXSW_TXHDR_VERSION_0 0 +#define MLXSW_TXHDR_VERSION_1 1 enum { MLXSW_TXHDR_ETH_CTL, -- cgit v1.2.3