diff options
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
19 files changed, 1328 insertions, 388 deletions
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 74beb1867..4ccc03263 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -25,6 +25,7 @@ config HIX5HD2_GMAC config HIP04_ETH tristate "HISILICON P04 Ethernet support" + depends on HAS_IOMEM # For MFD_SYSCON select MARVELL_PHY select MFD_SYSCON select HNS_MDIO diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 253f8ed05..0c4afe95e 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -500,8 +500,10 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) while (cnt && !last) { buf = priv->rx_buf[priv->rx_head]; skb = build_skb(buf, priv->rx_buf_size); - if (unlikely(!skb)) + if (unlikely(!skb)) { net_dbg_ratelimited("build_skb failed\n"); + goto refill; + } dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head], RX_BUF_SIZE, DMA_FROM_DEVICE); @@ -528,6 +530,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget) rx++; } +refill: buf = netdev_alloc_frag(priv->rx_buf_size); if (!buf) goto done; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index b36452974..3bfe36f94 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = { static int __ae_match(struct device *dev, const void *data) { struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - const char *ae_id = data; - if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) - return 1; - - return 0; + return hdev->dev->of_node == data; } -static struct hnae_ae_dev *find_ae(const char *ae_id) +static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) { struct device *dev; - WARN_ON(!ae_id); + WARN_ON(!ae_node); - dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); + dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); return dev ? cls_to_ae_dev(dev) : NULL; } @@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle); * return handle ptr or ERR_PTR */ struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const char *ae_id, u32 port_id, + const struct device_node *ae_node, + u32 port_id, struct hnae_buf_ops *bops) { struct hnae_ae_dev *dev; @@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, int i, j; int ret; - dev = find_ae(ae_id); + dev = find_ae(ae_node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index cec95ac86..1cbcb9fa3 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -35,7 +35,7 @@ #include <linux/phy.h> #include <linux/types.h> -#define HNAE_DRIVER_VERSION "1.3.0" +#define HNAE_DRIVER_VERSION "2.0" #define HNAE_DRIVER_NAME "hns" #define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation." #define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver" @@ -63,6 +63,7 @@ do { \ #define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0') #define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0') +#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1) #define AE_NAME_SIZE 16 /* some said the RX and TX RCB format should not be the same in the future. But @@ -144,23 +145,61 @@ enum hnae_led_state { #define HNS_RXD_ASID_S 24 #define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S) +#define HNSV2_TXD_BUFNUM_S 0 +#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S) +#define HNSV2_TXD_RI_B 1 +#define HNSV2_TXD_L4CS_B 2 +#define HNSV2_TXD_L3CS_B 3 +#define HNSV2_TXD_FE_B 4 +#define HNSV2_TXD_VLD_B 5 + +#define HNSV2_TXD_TSE_B 0 +#define HNSV2_TXD_VLAN_EN_B 1 +#define HNSV2_TXD_SNAP_B 2 +#define HNSV2_TXD_IPV6_B 3 +#define HNSV2_TXD_SCTP_B 4 + /* hardware spec ring buffer format */ struct __packed hnae_desc { __le64 addr; union { struct { - __le16 asid_bufnum_pid; + union { + __le16 asid_bufnum_pid; + __le16 asid; + }; __le16 send_size; - __le32 flag_ipoffset; - __le32 reserved_3[4]; + union { + __le32 flag_ipoffset; + struct { + __u8 bn_pid; + __u8 ra_ri_cs_fe_vld; + __u8 ip_offset; + __u8 tse_vlan_snap_v6_sctp_nth; + }; + }; + __le16 mss; + __u8 l4_len; + __u8 reserved1; + __le16 paylen; + __u8 vmid; + __u8 qid; + __le32 reserved2[2]; } tx; struct { __le32 ipoff_bnum_pid_flag; __le16 pkt_len; __le16 size; - __le32 vlan_pri_asid; - __le32 reserved_2[3]; + union { + __le32 vlan_pri_asid; + struct { + __le16 asid; + __le16 vlan_cfi_pri; + }; + }; + __le32 rss_hash; + __le32 reserved_1[2]; } rx; }; }; @@ -302,7 +341,8 @@ struct hnae_queue { void __iomem *io_base; phys_addr_t phy_base; struct hnae_ae_dev *dev; /* the device who use this queue */ - struct hnae_ring rx_ring, tx_ring; + struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; + struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp; struct hnae_handle *handle; }; @@ -435,6 +475,7 @@ struct hnae_ae_ops { int (*set_mac_addr)(struct hnae_handle *handle, void *p); int (*set_mc_addr)(struct hnae_handle *handle, void *addr); int (*set_mtu)(struct hnae_handle *handle, int new_mtu); + void (*set_tso_stats)(struct hnae_handle *handle, int enable); void (*update_stats)(struct hnae_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae_handle *handle, u64 *data); @@ -446,6 +487,12 @@ struct hnae_ae_ops { enum hnae_led_state status); void (*get_regs)(struct hnae_handle *handle, void *data); int (*get_regs_len)(struct hnae_handle *handle); + u32 (*get_rss_key_size)(struct hnae_handle *handle); + u32 (*get_rss_indir_size)(struct hnae_handle *handle); + int (*get_rss)(struct hnae_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); }; struct hnae_ae_dev { @@ -477,8 +524,11 @@ struct hnae_handle { #define ring_to_dev(ring) ((ring)->q->dev->dev) -struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id, - u32 port_id, struct hnae_buf_ops *bops); +struct hnae_handle *hnae_get_handle(struct device *owner_dev, + const struct device_node *ae_node, + u32 port_id, + struct hnae_buf_ops *bops); + void hnae_put_handle(struct hnae_handle *handle); int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); void hnae_ae_unregister(struct hnae_ae_dev *dev); @@ -551,11 +601,9 @@ static inline void hnae_replace_buffer(struct hnae_ring *ring, int i, struct hnae_desc_cb *res_cb) { struct hnae_buf_ops *bops = ring->q->handle->bops; - struct hnae_desc_cb tmp_cb = ring->desc_cb[i]; bops->unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; - *res_cb = tmp_cb; ring->desc[i].addr = (__le64)ring->desc_cb[i].dma; ring->desc[i].rx.ipoff_bnum_pid_flag = 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 1a16c0307..d4f92ed32 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -252,7 +252,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) if (mac_cb->mac_type != HNAE_PORT_SERVICE) return 0; - ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, ENABLE); + ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true); if (ret) { dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -261,7 +261,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) } ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, - mac_addr, ENABLE); + mac_addr, true); if (ret) dev_err(handle->owner_dev, "mac add mul_mac:%pM port%d fail, ret = %#x!\n", @@ -277,12 +277,19 @@ static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu) return hns_mac_set_mtu(mac_cb, new_mtu); } +static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + hns_ppe_set_tso_enable(ppe_cb, enable); +} + static int hns_ae_start(struct hnae_handle *handle) { int ret; struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); - ret = hns_mac_vm_config_bc_en(mac_cb, 0, ENABLE); + ret = hns_mac_vm_config_bc_en(mac_cb, 0, true); if (ret) return ret; @@ -309,7 +316,7 @@ void hns_ae_stop(struct hnae_handle *handle) hns_ae_ring_enable_all(handle, 0); - (void)hns_mac_vm_config_bc_en(mac_cb, 0, DISABLE); + (void)hns_mac_vm_config_bc_en(mac_cb, 0, false); } static void hns_ae_reset(struct hnae_handle *handle) @@ -334,12 +341,30 @@ void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask) else flag = RCB_INT_FLAG_RX; - hns_rcb_int_clr_hw(ring->q, flag); hns_rcb_int_ctrl_hw(ring->q, flag, mask); } +static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask) +{ + u32 flag; + + if (is_tx_ring(ring)) + flag = RCB_INT_FLAG_TX; + else + flag = RCB_INT_FLAG_RX; + + hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); +} + static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) { + struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev); + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + else + hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX); + hns_rcb_start(queue, val); } @@ -650,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, { int ret; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); switch (loop) { + case MAC_INTERNALLOOP_PHY: + ret = 0; + break; case MAC_INTERNALLOOP_SERDES: ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); break; @@ -661,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, default: ret = -EINVAL; } + + if (!ret) + hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en); + return ret; } @@ -730,6 +763,53 @@ int hns_ae_get_regs_len(struct hnae_handle *handle) return total_num; } +static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle) +{ + return HNS_PPEV2_RSS_KEY_SIZE; +} + +static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle) +{ + return HNS_PPEV2_RSS_IND_TBL_SIZE; +} + +static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* currently we support only one type of hash function i.e. Toep hash */ + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + /* get the RSS Key required by the user */ + if (key) + memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); + + /* update the current hash->queue mappings from the shadow RSS table */ + memcpy(indir, ppe_cb->rss_indir_table, HNS_PPEV2_RSS_IND_TBL_SIZE); + + return 0; +} + +static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); + + /* set the RSS Hash Key if specififed by the user */ + if (key) + hns_ppe_set_rss_key(ppe_cb, (int *)key); + + /* update the shadow RSS table with user specified qids */ + memcpy(ppe_cb->rss_indir_table, indir, HNS_PPEV2_RSS_IND_TBL_SIZE); + + /* now update the hardware */ + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + + return 0; +} + static struct hnae_ae_ops hns_dsaf_ops = { .get_handle = hns_ae_get_handle, .put_handle = hns_ae_put_handle, @@ -758,19 +838,38 @@ static struct hnae_ae_ops hns_dsaf_ops = { .set_mc_addr = hns_ae_set_multicast_one, .set_mtu = hns_ae_set_mtu, .update_stats = hns_ae_update_stats, + .set_tso_stats = hns_ae_set_tso_stats, .get_stats = hns_ae_get_stats, .get_strings = hns_ae_get_strings, .get_sset_count = hns_ae_get_sset_count, .update_led_status = hns_ae_update_led_status, .set_led_id = hns_ae_cpld_set_led_id, .get_regs = hns_ae_get_regs, - .get_regs_len = hns_ae_get_regs_len + .get_regs_len = hns_ae_get_regs_len, + .get_rss_key_size = hns_ae_get_rss_key_size, + .get_rss_indir_size = hns_ae_get_rss_indir_size, + .get_rss = hns_ae_get_rss, + .set_rss = hns_ae_set_rss }; int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) { struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; + static atomic_t id = ATOMIC_INIT(-1); + + switch (dsaf_dev->dsaf_ver) { + case AE_VERSION_1: + hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq; + break; + case AE_VERSION_2: + hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq; + break; + default: + break; + } + snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME, + (int)atomic_inc_return(&id)); ae_dev->ops = &hns_dsaf_ops; ae_dev->dev = dsaf_dev->dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 026b38676..5ef0e96e9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -283,7 +283,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, } int hns_mac_set_multi(struct hns_mac_cb *mac_cb, - u32 port_num, char *addr, u8 en) + u32 port_num, char *addr, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -295,7 +295,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -368,7 +368,7 @@ static void hns_mac_param_get(struct mac_params *param, *retuen 0 - success , negative --fail */ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, - u32 port_num, u16 vlan_id, u8 en) + u32 port_num, u16 vlan_id, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -386,7 +386,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, mac_entry.in_port_num = mac_cb->mac_id; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -403,7 +403,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, *@en:enable *retuen 0 - success , negative --fail */ -int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en) +int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) { int ret; struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; @@ -427,7 +427,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, u8 en) return ret; mac_entry.port_num = port_num; - if (en == DISABLE) + if (!enable) ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry); else ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry); @@ -648,7 +648,7 @@ static int hns_mac_init_ex(struct hns_mac_cb *mac_cb) hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex); - ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, ENABLE); + ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, true); if (ret) goto free_mac_drv; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 7da95a758..0b052191d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -425,8 +425,8 @@ void mac_adjust_link(struct net_device *net_dev); void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status); int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr); int hns_mac_set_multi(struct hns_mac_cb *mac_cb, - u32 port_num, char *addr, u8 en); -int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, u8 en); + u32 port_num, char *addr, bool enable); +int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable); void hns_mac_start(struct hns_mac_cb *mac_cb); void hns_mac_stop(struct hns_mac_cb *mac_cb); int hns_mac_del_mac(struct hns_mac_cb *mac_cb, u32 vfn, char *mac); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index b674414a4..38fc5be38 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -35,21 +35,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) int ret, i; u32 desc_num; u32 buf_size; - const char *name, *mode_str; + const char *mode_str; struct device_node *np = dsaf_dev->dev->of_node; - if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v2")) - dsaf_dev->dsaf_ver = AE_VERSION_2; - else + if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) dsaf_dev->dsaf_ver = AE_VERSION_1; - - ret = of_property_read_string(np, "dsa_name", &name); - if (ret) { - dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret); - return ret; - } - strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE); - dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0'; + else + dsaf_dev->dsaf_ver = AE_VERSION_2; ret = of_property_read_string(np, "mode", &mode_str); if (ret) { @@ -238,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) } } +static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) +{ + u16 max_q_per_vf, max_vfn; + u32 q_id, q_num_per_port; + u32 mac_id; + + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + return; + + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, + HNS_DSAF_COMM_SERVICE_NW_IDX, + &max_vfn, &max_q_per_vf); + q_num_per_port = max_vfn * max_q_per_vf; + + for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { + dsaf_set_dev_field(dsaf_dev, + DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_QID_M, + DSAFV2_SERDES_LBK_QID_S, + q_id); + q_id += q_num_per_port; + } +} + /** * hns_dsaf_sw_port_type_cfg - cfg sw type * @dsaf_id: dsa fabric id @@ -274,6 +290,8 @@ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev, } } +#define HNS_DSAF_SBM_NUM(dev) \ + (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM) /** * hns_dsaf_sbm_cfg - config sbm * @dsaf_id: dsa fabric id @@ -283,7 +301,7 @@ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev) u32 o_sbm_cfg; u32 i; - for (i = 0; i < DSAF_SBM_NUM; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { o_sbm_cfg = dsaf_read_dev(dsaf_dev, DSAF_SBM_CFG_REG_0_REG + 0x80 * i); dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1); @@ -304,13 +322,19 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) u32 reg; u32 read_cnt; - for (i = 0; i < DSAF_SBM_NUM; i++) { + /* validate configure by setting SBM_CFG_MIB_EN bit from 0 to 1. */ + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { + reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; + dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 0); + } + + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1); } /* waitint for all sbm enable finished */ - for (i = 0; i < DSAF_SBM_NUM; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) { read_cnt = 0; reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i; do { @@ -338,83 +362,156 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev) */ static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) { - u32 o_sbm_bp_cfg0; - u32 o_sbm_bp_cfg1; - u32 o_sbm_bp_cfg2; - u32 o_sbm_bp_cfg3; + u32 o_sbm_bp_cfg; u32 reg; u32 i; /* XGE */ for (i = 0; i < DSAF_XGE_NUM; i++) { reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg0 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); - dsaf_set_field(o_sbm_bp_cfg0, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg1 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); - dsaf_set_field(o_sbm_bp_cfg1, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg1); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 104); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg3, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); - dsaf_set_field(o_sbm_bp_cfg3, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); /* for no enable pfc mode */ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg3 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg3, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128); - dsaf_set_field(o_sbm_bp_cfg3, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg3); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* PPE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 10); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } /* RoCEE */ for (i = 0; i < DSAF_COMM_CHN; i++) { reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; - o_sbm_bp_cfg2 = dsaf_read_dev(dsaf_dev, reg); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_SET_BUF_NUM_M, + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M, DSAF_SBM_CFG2_SET_BUF_NUM_S, 2); - dsaf_set_field(o_sbm_bp_cfg2, DSAF_SBM_CFG2_RESET_BUF_NUM_M, + dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M, DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4); - dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg2); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } +} + +static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev) +{ + u32 o_sbm_bp_cfg; + u32 reg; + u32 i; + + /* XGE */ + for (i = 0; i < DSAFV2_SBM_XGE_CHN; i++) { + reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M, + DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + + /* for no enable pfc mode */ + reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128); + dsaf_set_field(o_sbm_bp_cfg, + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M, + DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + } + + /* PPE */ + reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); + /* RoCEE */ + for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) { + reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i; + o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M, + DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2); + dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M, + DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4); + dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg); } } @@ -618,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); } +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver) || + dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG) + return; + + dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, + DSAFV2_SERDES_LBK_EN_B, !!en); +} + /** * hns_dsaf_tbl_stat_en - tbl * @dsaf_id: dsa fabric id @@ -949,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) /* set promisc def queue id */ hns_dsaf_mix_def_qid_cfg(dsaf_dev); + /* set inner loopback queue id */ + hns_dsaf_inner_qid_cfg(dsaf_dev); + /* in non switch mode, set all port to access mode */ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); @@ -985,11 +1095,38 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) else tc_cfg = HNS_DSAF_I8TC_CFG; + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + for (i = 0; i < DSAF_INODE_NUM; i++) { + reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; + dsaf_set_dev_field(dsaf_dev, reg, + DSAF_INODE_IN_PORT_NUM_M, + DSAF_INODE_IN_PORT_NUM_S, + i % DSAF_XGE_NUM); + } + } else { + for (i = 0; i < DSAF_PORT_TYPE_NUM; i++) { + reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; + dsaf_set_dev_field(dsaf_dev, reg, + DSAF_INODE_IN_PORT_NUM_M, + DSAF_INODE_IN_PORT_NUM_S, 0); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT1_NUM_M, + DSAFV2_INODE_IN_PORT1_NUM_S, 1); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT2_NUM_M, + DSAFV2_INODE_IN_PORT2_NUM_S, 2); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT3_NUM_M, + DSAFV2_INODE_IN_PORT3_NUM_S, 3); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT4_NUM_M, + DSAFV2_INODE_IN_PORT4_NUM_S, 4); + dsaf_set_dev_field(dsaf_dev, reg, + DSAFV2_INODE_IN_PORT5_NUM_M, + DSAFV2_INODE_IN_PORT5_NUM_S, 5); + } + } for (i = 0; i < DSAF_INODE_NUM; i++) { - reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i; - dsaf_set_dev_field(dsaf_dev, reg, DSAF_INODE_IN_PORT_NUM_M, - DSAF_INODE_IN_PORT_NUM_S, i % DSAF_XGE_NUM); - reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i; dsaf_write_dev(dsaf_dev, reg, tc_cfg); } @@ -1002,10 +1139,17 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev) static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) { u32 flag; + u32 finish_msk; u32 cnt = 0; int ret; - hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + hns_dsaf_sbm_bp_wl_cfg(dsaf_dev); + finish_msk = DSAF_SRAM_INIT_OVER_M; + } else { + hns_dsafv2_sbm_bp_wl_cfg(dsaf_dev); + finish_msk = DSAFV2_SRAM_INIT_OVER_M; + } /* enable sbm chanel, disable sbm chanel shcut function*/ hns_dsaf_sbm_cfg(dsaf_dev); @@ -1024,11 +1168,13 @@ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev) do { usleep_range(200, 210);/*udelay(200);*/ - flag = dsaf_read_dev(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG); + flag = dsaf_get_dev_field(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG, + finish_msk, DSAF_SRAM_INIT_OVER_S); cnt++; - } while (flag != DSAF_SRAM_INIT_FINISH_FLAG && cnt < DSAF_CFG_READ_CNT); + } while (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S) && + cnt < DSAF_CFG_READ_CNT); - if (flag != DSAF_SRAM_INIT_FINISH_FLAG) { + if (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S)) { dev_err(dsaf_dev->dev, "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n", dsaf_dev->ae_dev.name, flag, cnt); @@ -2011,7 +2157,7 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4); /* dsaf inode registers */ - for (i = 0; i < DSAF_SBM_NUM / DSAF_COMM_CHN; i++) { + for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) { j = i * DSAF_COMM_CHN + port; p[232 + i] = dsaf_read_dev(ddev, DSAF_SBM_CFG_REG_0_REG + j * 0x80); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index b2b934849..5fea226ef 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -18,25 +18,22 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" #define DSAF_MOD_VERSION "v1.0" +#define DSAF_DEVICE_NAME "dsaf" -#define ENABLE (0x1) -#define DISABLE (0x0) +#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 -#define HNS_DSAF_DEBUG_NW_REG_OFFSET (0x100000) +#define DSAF_BASE_INNER_PORT_NUM 127/* mac tbl qid*/ -#define DSAF_BASE_INNER_PORT_NUM (127) /* mac tbl qid*/ +#define DSAF_MAX_CHIP_NUM 2 /*max 2 chips */ -#define DSAF_MAX_CHIP_NUM (2) /*max 2 chips */ +#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE 22 -#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE (22) +#define HNS_DSAF_MAX_DESC_CNT 1024 +#define HNS_DSAF_MIN_DESC_CNT 16 -#define HNS_DSAF_MAX_DESC_CNT (1024) -#define HNS_DSAF_MIN_DESC_CNT (16) +#define DSAF_INVALID_ENTRY_IDX 0xffff -#define DSAF_INVALID_ENTRY_IDX (0xffff) - -#define DSAF_CFG_READ_CNT (30) -#define DSAF_SRAM_INIT_FINISH_FLAG (0xff) +#define DSAF_CFG_READ_CNT 30 #define MAC_NUM_OCTETS_PER_ADDR 6 @@ -274,10 +271,6 @@ struct dsaf_device { struct device *dev; struct hnae_ae_dev ae_dev; - void *priv; - - int virq[DSAF_IRQ_NUM]; - u8 __iomem *sc_base; u8 __iomem *sds_base; u8 __iomem *ppe_base; @@ -424,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); +void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 523e9b83d..607c3be42 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -149,7 +149,11 @@ void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) if (port < DSAF_SERVICE_NW_NUM) { reg_val_1 = 0x1 << port; - reg_val_2 = 0x1041041 << port; + /* there is difference between V1 and V2 in register.*/ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + reg_val_2 = 0x1041041 << port; + else + reg_val_2 = 0x2082082 << port; if (val == 0) { dsaf_write_reg(dsaf_dev->sc_base, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 67f33f185..f302ef907 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -19,6 +19,48 @@ #include "hns_dsaf_ppe.h" +void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value) +{ + dsaf_set_dev_bit(ppe_cb, PPEV2_CFG_TSO_EN_REG, 0, !!value); +} + +void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, + const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]) +{ + int key_item = 0; + + for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++) + dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4, + rss_key[key_item]); +} + +void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, + const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]) +{ + int i; + int reg_value; + + for (i = 0; i < (HNS_PPEV2_RSS_IND_TBL_SIZE / 4); i++) { + reg_value = dsaf_read_dev(ppe_cb, + PPEV2_INDRECTION_TBL_REG + i * 0x4); + + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N0_M, + PPEV2_CFG_RSS_TBL_4N0_S, + rss_tab[i * 4 + 0] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N1_M, + PPEV2_CFG_RSS_TBL_4N1_S, + rss_tab[i * 4 + 1] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N2_M, + PPEV2_CFG_RSS_TBL_4N2_S, + rss_tab[i * 4 + 2] & 0x1F); + dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N3_M, + PPEV2_CFG_RSS_TBL_4N3_S, + rss_tab[i * 4 + 3] & 0x1F); + dsaf_write_dev( + ppe_cb, PPEV2_INDRECTION_TBL_REG + i * 0x4, reg_value); + } +} + static void __iomem *hns_ppe_common_get_ioaddr( struct ppe_common_cb *ppe_common) { @@ -134,6 +176,11 @@ static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb) PPE_CNT_CLR_CE_B, 1); } +static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en) +{ + dsaf_write_dev(ppe_cb, PPEV2_VLAN_STRIP_EN_REG, en); +} + /** * hns_ppe_checksum_hw - set ppe checksum caculate * @ppe_device: ppe device @@ -266,13 +313,17 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) /** * ppe_init_hw - init ppe - * @ppe_device: ppe device + * @ppe_cb: ppe device */ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) { struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb; u32 port = ppe_cb->port; struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev; + int i; + + /* get default RSS key */ + netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE); hns_ppe_srst_by_port(dsaf_dev, port, 0); mdelay(10); @@ -285,8 +336,21 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE); else hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE); + hns_ppe_checksum_hw(ppe_cb, 0xffffffff); hns_ppe_cnt_clr_ce(ppe_cb); + + if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) { + hns_ppe_set_vlan_strip(ppe_cb, 0); + + /* set default RSS key in h/w */ + hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key); + + /* Set default indrection table in h/w */ + for (i = 0; i < HNS_PPEV2_RSS_IND_TBL_SIZE; i++) + ppe_cb->rss_indir_table[i] = i; + hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table); + } } /** @@ -341,13 +405,13 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index) if (ret) return; + for (i = 0; i < ppe_common->ppe_num; i++) + hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]); if (ret) return; - for (i = 0; i < ppe_common->ppe_num; i++) - hns_ppe_init_hw(&ppe_common->ppe_cb[i]); - hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index 4894f9a0d..0f5cb6962 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -25,15 +25,24 @@ #define ETH_PPE_DUMP_NUM 576 #define ETH_PPE_STATIC_NUM 12 + +#define HNS_PPEV2_RSS_IND_TBL_SIZE 256 +#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */ +#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32)) + enum ppe_qid_mode { - PPE_QID_MODE0 = 0, /* fixed queue id mode */ - PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ - PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */ - PPE_QID_MODE3, /* switch:4TC/8TAG non switch:2Port/64VM */ - PPE_QID_MODE4, /* switch:8VM/16TAG non switch:2Port/16VM/4TC */ - PPE_QID_MODE5, /* non switch:6Port/16TAG */ - PPE_QID_MODE6, /* non switch:6Port/2VM/8TC */ - PPE_QID_MODE7, /* non switch:2Port/8VM/8TC */ + PPE_QID_MODE0 = 0, /* fixed queue id mode */ + PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */ + PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */ + PPE_QID_MODE3, /* switch:4TC/8RSS non switch:2Port/64VM */ + PPE_QID_MODE4, /* switch:8VM/16RSS non switch:2Port/16VM/4TC */ + PPE_QID_MODE5, /* switch:16VM/8TC non switch:6Port/16RSS */ + PPE_QID_MODE6, /* switch:32VM/4RSS non switch:6Port/2VM/8TC */ + PPE_QID_MODE7, /* switch:32RSS non switch:2Port/8VM/8TC */ + PPE_QID_MODE8, /* switch:6VM/4TC/4RSS non switch:2Port/16VM/4RSS */ + PPE_QID_MODE9, /* non switch:2Port/32VM/2RSS */ + PPE_QID_MODE10, /* non switch:2Port/32RSS */ + PPE_QID_MODE11, /* non switch:2Port/4TC/16RSS */ }; enum ppe_port_mode { @@ -72,6 +81,8 @@ struct hns_ppe_cb { u8 port; /* port id in dsaf */ void __iomem *io_base; int virq; + u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ + u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ }; struct ppe_common_cb { @@ -102,4 +113,9 @@ void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data); void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data); void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data); +void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value); +void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb, + const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]); +void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, + const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]); #endif /* _HNS_DSAF_PPE_H */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 4db32c62f..121888074 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -136,19 +136,37 @@ void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) { - u32 clr = 1; - if (flag & RCB_INT_FLAG_TX) { - dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, clr); - dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, clr); + dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); + dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); } if (flag & RCB_INT_FLAG_RX) { - dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, clr); - dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, clr); + dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); + dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); } } +void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) +{ + u32 int_mask_en = !!mask; + + if (flag & RCB_INT_FLAG_TX) + dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); + + if (flag & RCB_INT_FLAG_RX) + dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); +} + +void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) +{ + if (flag & RCB_INT_FLAG_TX) + dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); + + if (flag & RCB_INT_FLAG_RX) + dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); +} + /** *hns_rcb_ring_enable_hw - enable ring *@ring: rcb ring @@ -193,6 +211,7 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) (u32)dma); dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); + dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, bd_size_type); dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, @@ -204,6 +223,7 @@ static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type) (u32)dma); dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, (u32)((dma >> 31) >> 1)); + dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, bd_size_type); dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, @@ -232,9 +252,6 @@ void hns_rcb_init_hw(struct ring_pair_cb *ring) static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common, u32 port_idx, u32 desc_cnt) { - if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) - port_idx = 0; - dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4, desc_cnt); } @@ -249,8 +266,6 @@ static int hns_rcb_set_port_coalesced_frames(struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames) { - if (port_idx >= HNS_RCB_SERVICE_NW_ENGINE_NUM) - port_idx = 0; if (coalesced_frames >= rcb_common->desc_num || coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES) return -EINVAL; @@ -354,6 +369,18 @@ int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common) dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG, HNS_RCB_COMMON_ENDIAN); + if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { + dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0); + dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1); + } else { + dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, + RCB_COM_CFG_FNA_B, false); + dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG, + RCB_COM_CFG_FA_B, true); + dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG, + RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K); + } + return 0; } @@ -387,19 +414,23 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) struct rcb_common_cb *rcb_common; struct ring_pair_cb *ring_pair_cb; u32 buf_size; - u16 desc_num; - int irq_idx; + u16 desc_num, mdnum_ppkt; + bool irq_idx, is_ver1; ring_pair_cb = container_of(q, struct ring_pair_cb, q); + is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver); if (ring_type == RX_RING) { ring = &q->rx_ring; ring->io_base = ring_pair_cb->q.io_base; irq_idx = HNS_RCB_IRQ_IDX_RX; + mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; } else { ring = &q->tx_ring; ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + HNS_RCB_TX_REG_OFFSET; irq_idx = HNS_RCB_IRQ_IDX_TX; + mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : + HNS_RCBV2_RING_MAX_TXBD_PER_PKT; } rcb_common = ring_pair_cb->rcb_common; @@ -414,7 +445,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) ring->buf_size = buf_size; ring->desc_num = desc_num; - ring->max_desc_num_per_pkt = HNS_RCB_RING_MAX_BD_PER_PKT; + ring->max_desc_num_per_pkt = mdnum_ppkt; ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE; ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE; ring->next_to_use = 0; @@ -445,14 +476,22 @@ static int hns_rcb_get_port(struct rcb_common_cb *rcb_common, int ring_idx) return port; } +#define SERVICE_RING_IRQ_IDX(v1) \ + ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) +#define DEBUG_RING_IRQ_IDX(v1) \ + ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) +#define DEBUG_RING_IRQ_OFFSET(v1) \ + ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) { int comm_index = rcb_common->comm_index; + bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) - return HNS_SERVICE_RING_IRQ_IDX; + return SERVICE_RING_IRQ_IDX(is_ver1); else - return HNS_DEBUG_RING_IRQ_IDX + (comm_index - 1) * 2; + return DEBUG_RING_IRQ_IDX(is_ver1) + + (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); } #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ @@ -468,6 +507,9 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) u32 ring_num = rcb_common->ring_num; int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common); struct device_node *np = rcb_common->dsaf_dev->dev->of_node; + struct platform_device *pdev = + to_platform_device(rcb_common->dsaf_dev->dev); + bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); for (i = 0; i < ring_num; i++) { ring_pair_cb = &rcb_common->ring_pair_cb[i]; @@ -477,10 +519,12 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common) ring_pair_cb->q.io_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i); ring_pair_cb->port_id_in_dsa = hns_rcb_get_port(rcb_common, i); - ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] - = irq_of_parse_and_map(np, base_irq_idx + i * 2); - ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] - = irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1); + ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] = + is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) : + platform_get_irq(pdev, base_irq_idx + i * 3 + 1); + ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] = + is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) : + platform_get_irq(pdev, base_irq_idx + i * 3); ring_pair_cb->q.phy_base = RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i); hns_rcb_ring_pair_get_cfg(ring_pair_cb); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index 3a2afe2dd..81fe9f849 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -26,6 +26,8 @@ struct rcb_common_cb; #define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN #define HNS_RCB_DEBUG_NW_ENGINE_NUM 1 #define HNS_RCB_RING_MAX_BD_PER_PKT 3 +#define HNS_RCB_RING_MAX_TXBD_PER_PKT 3 +#define HNS_RCBV2_RING_MAX_TXBD_PER_PKT 8 #define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU #define HNS_RCB_RING_MAX_PENDING_BD 1024 @@ -52,6 +54,9 @@ struct rcb_common_cb; #define HNS_DUMP_REG_NUM 500 #define HNS_STATIC_REG_NUM 12 +#define HNS_TSO_MODE_8BD_32K 1 +#define HNS_TSO_MDOE_4BD_16K 0 + enum rcb_int_flag { RCB_INT_FLAG_TX = 0x1, RCB_INT_FLAG_RX = (0x1 << 1), @@ -106,13 +111,17 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); -void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, u16 *max_vfn, u16 *max_q_per_vf); +void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); + void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val); void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag); void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable); +void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask); +void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); + void hns_rcb_init_hw(struct ring_pair_cb *ring); void hns_rcb_reset_ring_hw(struct hnae_queue *q); void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index bdbd80423..60d695daa 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -10,21 +10,12 @@ #ifndef _DSAF_REG_H_ #define _DSAF_REG_H_ -#define HNS_GE_FIFO_ERR_INTNUM 8 -#define HNS_XGE_ERR_INTNUM 6 -#define HNS_RCB_COMM_ERR_INTNUM 12 -#define HNS_PPE_TNL_ERR_INTNUM 8 -#define HNS_DSAF_EVENT_INTNUM 21 -#define HNS_DEBUG_RING_INTNUM 4 -#define HNS_SERVICE_RING_INTNUM 256 - -#define HNS_DEBUG_RING_IRQ_IDX (HNS_GE_FIFO_ERR_INTNUM + HNS_XGE_ERR_INTNUM +\ - HNS_RCB_COMM_ERR_INTNUM + HNS_PPE_TNL_ERR_INTNUM +\ - HNS_DSAF_EVENT_INTNUM) -#define HNS_SERVICE_RING_IRQ_IDX (HNS_DEBUG_RING_IRQ_IDX +\ - HNS_DEBUG_RING_INTNUM) - -#define DSAF_IRQ_NUM 18 +#define HNS_DEBUG_RING_IRQ_IDX 55 +#define HNS_SERVICE_RING_IRQ_IDX 59 +#define HNS_DEBUG_RING_IRQ_OFFSET 2 +#define HNSV2_DEBUG_RING_IRQ_IDX 409 +#define HNSV2_SERVICE_RING_IRQ_IDX 25 +#define HNSV2_DEBUG_RING_IRQ_OFFSET 9 #define DSAF_MAX_PORT_NUM_PER_CHIP 8 #define DSAF_SERVICE_PORT_NUM_PER_DSAF 6 @@ -39,9 +30,15 @@ #define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM +#define DSAF_PORT_TYPE_NUM 3 #define DSAF_NODE_NUM 18 #define DSAF_XOD_BIG_NUM DSAF_NODE_NUM #define DSAF_SBM_NUM DSAF_NODE_NUM +#define DSAFV2_SBM_NUM 8 +#define DSAFV2_SBM_XGE_CHN 6 +#define DSAFV2_SBM_PPE_CHN 1 +#define DASFV2_ROCEE_CRD_NUM 8 + #define DSAF_VOQ_NUM DSAF_NODE_NUM #define DSAF_INODE_NUM DSAF_NODE_NUM #define DSAF_XOD_NUM 8 @@ -52,56 +49,56 @@ #define DSAF_TCAM_SUM 512 #define DSAF_LINE_SUM (2048 * 14) -#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188 -#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C -#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190 -#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194 -#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300 -#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304 -#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308 -#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C -#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310 -#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314 -#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318 -#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C -#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320 -#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354 -#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00 -#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04 -#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08 -#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C -#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10 -#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14 -#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18 -#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C -#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20 -#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24 -#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48 -#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C -#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 -#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 -#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 -#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304 -#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308 -#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C -#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310 -#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314 -#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318 -#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328 -#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00 -#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04 -#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08 -#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C -#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10 -#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24 -#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44 +#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188 +#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C +#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190 +#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194 +#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300 +#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304 +#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308 +#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C +#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310 +#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314 +#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318 +#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C +#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320 +#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354 +#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00 +#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04 +#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08 +#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C +#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10 +#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14 +#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18 +#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C +#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20 +#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24 +#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48 +#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C +#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 +#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 +#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 +#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304 +#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308 +#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C +#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310 +#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314 +#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318 +#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328 +#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00 +#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04 +#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08 +#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C +#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10 +#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24 +#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44 /*serdes offset**/ #define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG @@ -137,6 +134,7 @@ #define DSAF_XGE_INT_STS_0_REG 0x1C0 #define DSAF_PPE_INT_STS_0_REG 0x1E0 #define DSAF_ROCEE_INT_STS_0_REG 0x200 +#define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -178,6 +176,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C +#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 #define DSAF_SBM_BP_CNT_0_0_REG 0x2018 @@ -319,6 +318,8 @@ #define PPE_CFG_TAG_GEN_REG 0x90 #define PPE_CFG_PARSE_TAG_REG 0x94 #define PPE_CFG_PRO_CHECK_EN_REG 0x98 +#define PPEV2_CFG_TSO_EN_REG 0xA0 +#define PPEV2_VLAN_STRIP_EN_REG 0xAC #define PPE_INTEN_REG 0x100 #define PPE_RINT_REG 0x104 #define PPE_INTSTS_REG 0x108 @@ -351,6 +352,8 @@ #define PPE_ECO0_REG 0x32C #define PPE_ECO1_REG 0x330 #define PPE_ECO2_REG 0x334 +#define PPEV2_INDRECTION_TBL_REG 0x800 +#define PPEV2_RSS_KEY_REG 0x900 #define RCB_COM_CFG_ENDIAN_REG 0x0 #define RCB_COM_CFG_SYS_FSH_REG 0xC @@ -361,6 +364,8 @@ #define RCB_COM_CFG_FA_REG 0x3C #define RCB_COM_CFG_PKT_TC_BP_REG 0x40 #define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44 +#define RCBV2_COM_CFG_USER_REG 0x30 +#define RCBV2_COM_CFG_TSO_MODE_REG 0x50 #define RCB_COM_INTMSK_TX_PKT_REG 0x3A0 #define RCB_COM_RINT_TX_PKT_REG 0x3A8 @@ -431,8 +436,10 @@ #define RCB_RING_INTMSK_RXWL_REG 0x000A0 #define RCB_RING_INTSTS_RX_RING_REG 0x000A4 +#define RCBV2_RX_RING_INT_STS_REG 0x000A8 #define RCB_RING_INTMSK_TXWL_REG 0x000AC #define RCB_RING_INTSTS_TX_RING_REG 0x000B0 +#define RCBV2_TX_RING_INT_STS_REG 0x000B4 #define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8 #define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC #define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4 @@ -678,6 +685,10 @@ #define XGMAC_TRX_CORE_SRST_M 0x2080 +#define DSAF_SRAM_INIT_OVER_M 0xff +#define DSAFV2_SRAM_INIT_OVER_M 0x3ff +#define DSAF_SRAM_INIT_OVER_S 0 + #define DSAF_CFG_EN_S 0 #define DSAF_CFG_TC_MODE_S 1 #define DSAF_CFG_CRC_EN_S 2 @@ -685,6 +696,7 @@ #define DSAF_CFG_MIX_MODE_S 4 #define DSAF_CFG_STP_MODE_S 5 #define DSAF_CFG_LOCA_ADDR_EN_S 6 +#define DSAFV2_CFG_VLAN_TAG_MODE_S 17 #define DSAF_CNT_CLR_CE_S 0 #define DSAF_SNAP_EN_S 1 @@ -707,6 +719,16 @@ #define DSAF_INODE_IN_PORT_NUM_M 7 #define DSAF_INODE_IN_PORT_NUM_S 0 +#define DSAFV2_INODE_IN_PORT1_NUM_M (7ULL << 3) +#define DSAFV2_INODE_IN_PORT1_NUM_S 3 +#define DSAFV2_INODE_IN_PORT2_NUM_M (7ULL << 6) +#define DSAFV2_INODE_IN_PORT2_NUM_S 6 +#define DSAFV2_INODE_IN_PORT3_NUM_M (7ULL << 9) +#define DSAFV2_INODE_IN_PORT3_NUM_S 9 +#define DSAFV2_INODE_IN_PORT4_NUM_M (7ULL << 12) +#define DSAFV2_INODE_IN_PORT4_NUM_S 12 +#define DSAFV2_INODE_IN_PORT5_NUM_M (7ULL << 15) +#define DSAFV2_INODE_IN_PORT5_NUM_S 15 #define HNS_DSAF_I4TC_CFG 0x18688688 #define HNS_DSAF_I8TC_CFG 0x18FAC688 @@ -738,6 +760,33 @@ #define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10 #define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10) +#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9) +#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S 18 +#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 18) + +#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG2_SET_BUF_NUM_S 0 +#define DSAFV2_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_S 9 +#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0 +#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 9 +#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) + +#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S 0 +#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0) +#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 +#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) + #define DSAF_TBL_TCAM_ADDR_S 0 #define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1) @@ -797,11 +846,30 @@ #define PPE_CFG_QID_MODE_CF_QID_MODE_S 8 #define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S) +#define PPEV2_CFG_RSS_TBL_4N0_S 0 +#define PPEV2_CFG_RSS_TBL_4N0_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N0_S) + +#define PPEV2_CFG_RSS_TBL_4N1_S 8 +#define PPEV2_CFG_RSS_TBL_4N1_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N1_S) + +#define PPEV2_CFG_RSS_TBL_4N2_S 16 +#define PPEV2_CFG_RSS_TBL_4N2_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N2_S) + +#define PPEV2_CFG_RSS_TBL_4N3_S 24 +#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) + +#define DSAFV2_SERDES_LBK_EN_B 8 +#define DSAFV2_SERDES_LBK_QID_S 0 +#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S) + #define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_SNAP_EN_B 1 #define PPE_COMMON_CNT_CLR_CE_B 0 #define PPE_COMMON_CNT_CLR_SNAP_EN_B 1 +#define RCB_COM_TSO_MODE_B 0 +#define RCB_COM_CFG_FNA_B 1 +#define RCB_COM_CFG_FA_B 0 #define GMAC_DUPLEX_TYPE_B 0 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 08cef0dfb..3f77ff77a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -33,10 +33,105 @@ #define RCB_IRQ_NOT_INITED 0 #define RCB_IRQ_INITED 1 +#define HNS_BUFFER_SIZE_2048 2048 + +#define BD_MAX_SEND_SIZE 8191 +#define SKB_TMP_LEN(SKB) \ + (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) + +static void fill_v2_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + struct hnae_desc *desc = &ring->desc[ring->next_to_use]; + struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct iphdr *iphdr; + struct ipv6hdr *ipv6hdr; + struct sk_buff *skb; + int skb_tmp_len; + __be16 protocol; + u8 bn_pid = 0; + u8 rrcfv = 0; + u8 ip_offset = 0; + u8 tvsvsn = 0; + u16 mss = 0; + u8 l4_len = 0; + u16 paylen = 0; + + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + + /*config bd buffer end */ + hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); + hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + ip_offset = ETH_HLEN; + + if (protocol == htons(ETH_P_8021Q)) { + ip_offset += VLAN_HLEN; + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + + if (skb->protocol == htons(ETH_P_IP)) { + iphdr = ip_hdr(skb); + hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1); + hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); + + /* check for tcp/udp header */ + if (iphdr->protocol == IPPROTO_TCP) { + hnae_set_bit(tvsvsn, + HNSV2_TXD_TSE_B, 1); + skb_tmp_len = SKB_TMP_LEN(skb); + l4_len = tcp_hdrlen(skb); + mss = mtu - skb_tmp_len - ETH_FCS_LEN; + paylen = skb->len - skb_tmp_len; + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1); + ipv6hdr = ipv6_hdr(skb); + hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1); + + /* check for tcp/udp header */ + if (ipv6hdr->nexthdr == IPPROTO_TCP) { + hnae_set_bit(tvsvsn, + HNSV2_TXD_TSE_B, 1); + skb_tmp_len = SKB_TMP_LEN(skb); + l4_len = tcp_hdrlen(skb); + mss = mtu - skb_tmp_len - ETH_FCS_LEN; + paylen = skb->len - skb_tmp_len; + } + } + desc->tx.ip_offset = ip_offset; + desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn; + desc->tx.mss = cpu_to_le16(mss); + desc->tx.l4_len = l4_len; + desc->tx.paylen = cpu_to_le16(paylen); + } + } + + hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end); + + desc->tx.bn_pid = bn_pid; + desc->tx.ra_ri_cs_fe_vld = rrcfv; + + ring_ptr_move_fw(ring, next_to_use); +} static void fill_desc(struct hnae_ring *ring, void *priv, int size, dma_addr_t dma, int frag_end, - int buf_num, enum hns_desc_type type) + int buf_num, enum hns_desc_type type, int mtu) { struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; @@ -100,47 +195,129 @@ static void unfill_desc(struct hnae_ring *ring) ring_ptr_move_bw(ring, next_to_use); } -int hns_nic_net_xmit_hw(struct net_device *ndev, - struct sk_buff *skb, - struct hns_nic_ring_data *ring_data) +static int hns_nic_maybe_stop_tx( + struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) { - struct hns_nic_priv *priv = netdev_priv(ndev); - struct device *dev = priv->dev; - struct hnae_ring *ring = ring_data->ring; - struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; + struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; int buf_num; - dma_addr_t dma; - int size, next_to_use; - int i, j; - struct sk_buff *new_skb; - - assert(ring->max_desc_num_per_pkt <= ring->desc_num); /* no. of segments (plus a header) */ buf_num = skb_shinfo(skb)->nr_frags + 1; if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { - if (ring_space(ring) < 1) { - ring->stats.tx_busy++; - goto out_net_tx_busy; - } + if (ring_space(ring) < 1) + return -EBUSY; new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) { - ring->stats.sw_err_cnt++; - netdev_err(ndev, "no memory to xmit!\n"); - goto out_err_tx_ok; - } + if (!new_skb) + return -ENOMEM; dev_kfree_skb_any(skb); - skb = new_skb; + *out_skb = new_skb; buf_num = 1; - assert(skb_shinfo(skb)->nr_frags == 1); } else if (buf_num > ring_space(ring)) { + return -EBUSY; + } + + *bnum = buf_num; + return 0; +} + +static int hns_nic_maybe_stop_tso( + struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring) +{ + int i; + int size; + int buf_num; + int frag_num; + struct sk_buff *skb = *out_skb; + struct sk_buff *new_skb = NULL; + struct skb_frag_struct *frag; + + size = skb_headlen(skb); + buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + } + + if (unlikely(buf_num > ring->max_desc_num_per_pkt)) { + buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + if (ring_space(ring) < buf_num) + return -EBUSY; + /* manual split the send packet */ + new_skb = skb_copy(skb, GFP_ATOMIC); + if (!new_skb) + return -ENOMEM; + dev_kfree_skb_any(skb); + *out_skb = new_skb; + + } else if (ring_space(ring) < buf_num) { + return -EBUSY; + } + + *bnum = buf_num; + return 0; +} + +static void fill_tso_desc(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu) +{ + int frag_buf_num; + int sizeoflast; + int k; + + frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE; + sizeoflast = size % BD_MAX_SEND_SIZE; + sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE; + + /* when the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) + fill_v2_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : BD_MAX_SEND_SIZE, + dma + BD_MAX_SEND_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + buf_num, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE, + mtu); +} + +int hns_nic_net_xmit_hw(struct net_device *ndev, + struct sk_buff *skb, + struct hns_nic_ring_data *ring_data) +{ + struct hns_nic_priv *priv = netdev_priv(ndev); + struct device *dev = priv->dev; + struct hnae_ring *ring = ring_data->ring; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int buf_num; + int seg_num; + dma_addr_t dma; + int size, next_to_use; + int i; + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: ring->stats.tx_busy++; goto out_net_tx_busy; + case -ENOMEM: + ring->stats.sw_err_cnt++; + netdev_err(ndev, "no memory to xmit!\n"); + goto out_err_tx_ok; + default: + break; } + + /* no. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; next_to_use = ring->next_to_use; /* fill the first part */ @@ -151,11 +328,11 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_err_tx_ok; } - fill_desc(ring, skb, size, dma, buf_num == 1 ? 1 : 0, buf_num, - DESC_TYPE_SKB); + priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + buf_num, DESC_TYPE_SKB, ndev->mtu); /* fill the fragments */ - for (i = 1; i < buf_num; i++) { + for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; size = skb_frag_size(frag); dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); @@ -164,8 +341,9 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, ring->stats.sw_err_cnt++; goto out_map_frag_fail; } - fill_desc(ring, skb_frag_page(frag), size, dma, - buf_num - 1 == i ? 1 : 0, buf_num, DESC_TYPE_PAGE); + priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, buf_num, + DESC_TYPE_PAGE, ndev->mtu); } /*complete translate all packets*/ @@ -182,19 +360,20 @@ int hns_nic_net_xmit_hw(struct net_device *ndev, out_map_frag_fail: - for (j = i - 1; j > 0; j--) { + while (ring->next_to_use != next_to_use) { unfill_desc(ring); - next_to_use = ring->next_to_use; - dma_unmap_page(dev, ring->desc_cb[next_to_use].dma, - ring->desc_cb[next_to_use].length, - DMA_TO_DEVICE); + if (ring->next_to_use != next_to_use) + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_single(dev, + ring->desc_cb[next_to_use].dma, + ring->desc_cb[next_to_use].length, + DMA_TO_DEVICE); } - unfill_desc(ring); - next_to_use = ring->next_to_use; - dma_unmap_single(dev, ring->desc_cb[next_to_use].dma, - ring->desc_cb[next_to_use].length, DMA_TO_DEVICE); - out_err_tx_ok: dev_kfree_skb_any(skb); @@ -313,20 +492,67 @@ static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag, return max_size; } -static void -hns_nic_reuse_page(struct hnae_desc_cb *desc_cb, int tsize, int last_offset) +static void hns_nic_reuse_page(struct sk_buff *skb, int i, + struct hnae_ring *ring, int pull_len, + struct hnae_desc_cb *desc_cb) { + struct hnae_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + /* avoid re-using remote pages,flag default unreuse */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) { - /* move offset up to the next cache line */ - desc_cb->page_offset += tsize; + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* if we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; - if (desc_cb->page_offset <= last_offset) { desc_cb->reuse_flag = 1; /* bump ref count on page before it is given*/ get_page(desc_cb->priv); } + return; } + + /* move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } +} + +static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum) +{ + *out_bnum = hnae_get_field(bnum_flag, + HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1; +} + +static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum) +{ + *out_bnum = hnae_get_field(bnum_flag, + HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); } static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, @@ -334,30 +560,42 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, { struct hnae_ring *ring = ring_data->ring; struct net_device *ndev = ring_data->napi.dev; + struct hns_nic_priv *priv = netdev_priv(ndev); struct sk_buff *skb; struct hnae_desc *desc; struct hnae_desc_cb *desc_cb; unsigned char *va; - int bnum, length, size, i, truesize, last_offset; + int bnum, length, i; int pull_len; u32 bnum_flag; - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; - length = le16_to_cpu(desc->rx.pkt_len); - bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); - bnum = hnae_get_field(bnum_flag, HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S); - *out_bnum = bnum; + + prefetch(desc); + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; - skb = *out_skb = napi_alloc_skb(&ring_data->napi, HNS_RX_HEAD_SIZE); + /* prefetch first cache line of first page */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring_data->napi, + HNS_RX_HEAD_SIZE); if (unlikely(!skb)) { netdev_err(ndev, "alloc rx skb fail\n"); ring->stats.sw_err_cnt++; return -ENOMEM; } + prefetchw(skb->data); + length = le16_to_cpu(desc->rx.pkt_len); + bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag); + priv->ops.get_rxd_bnum(bnum_flag, &bnum); + *out_bnum = bnum; + if (length <= HNS_RX_HEAD_SIZE) { memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); @@ -380,13 +618,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long))); - size = le16_to_cpu(desc->rx.size); - truesize = ALIGN(size, L1_CACHE_BYTES); - skb_add_rx_frag(skb, 0, desc_cb->priv, - desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); - - hns_nic_reuse_page(desc_cb, truesize, last_offset); + hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); ring_ptr_move_fw(ring, next_to_clean); if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/ @@ -396,13 +628,8 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, for (i = 1; i < bnum; i++) { desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - truesize = ALIGN(size, L1_CACHE_BYTES); - skb_add_rx_frag(skb, i, desc_cb->priv, - desc_cb->page_offset, - size, truesize); - hns_nic_reuse_page(desc_cb, truesize, last_offset); + hns_nic_reuse_page(skb, i, ring, 0, desc_cb); ring_ptr_move_fw(ring, next_to_clean); } } @@ -540,20 +767,20 @@ recv: } /* make all data has been write before submit */ - if (clean_count > 0) { - hns_nic_alloc_rx_buffers(ring_data, clean_count); - clean_count = 0; - } - if (recv_pkts < budget) { ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - rmb(); /*complete read rx ring bd number*/ - if (ex_num > 0) { - num += ex_num; + + if (ex_num > clean_count) { + num += ex_num - clean_count; + rmb(); /*complete read rx ring bd number*/ goto recv; } } + /* make all data has been write before submit */ + if (clean_count > 0) + hns_nic_alloc_rx_buffers(ring_data, clean_count); + return recv_pkts; } @@ -642,14 +869,20 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, bytes = 0; pkts = 0; - while (head != ring->next_to_clean) + while (head != ring->next_to_clean) { hns_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + } NETIF_TX_UNLOCK(ndev); dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); netdev_tx_completed_queue(dev_queue, pkts, bytes); + if (unlikely(priv->link && !netif_carrier_ok(ndev))) + netif_carrier_on(ndev); + if (unlikely(pkts && netif_carrier_ok(ndev) && (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) { /* Make sure that anybody stopping the queue after this @@ -716,6 +949,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) ring_data->ring, 0); ring_data->fini_process(ring_data); + return 0; } return clean_complete; @@ -848,15 +1082,58 @@ static void hns_nic_ring_close(struct net_device *netdev, int idx) napi_disable(&priv->ring_data[idx].napi); } -static int hns_nic_init_irq(struct hns_nic_priv *priv) +static void hns_set_irq_affinity(struct hns_nic_priv *priv) { struct hnae_handle *h = priv->ae_handle; struct hns_nic_ring_data *rd; int i; - int ret; int cpu; cpumask_t mask; + /*diffrent irq banlance for 16core and 32core*/ + if (h->q_num == num_possible_cpus()) { + for (i = 0; i < h->q_num * 2; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index)) { + cpumask_clear(&mask); + cpu = rd->queue_index; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + } else { + for (i = 0; i < h->q_num; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index * 2)) { + cpumask_clear(&mask); + cpu = rd->queue_index * 2; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + + for (i = h->q_num; i < h->q_num * 2; i++) { + rd = &priv->ring_data[i]; + if (cpu_online(rd->queue_index * 2 + 1)) { + cpumask_clear(&mask); + cpu = rd->queue_index * 2 + 1; + cpumask_set_cpu(cpu, &mask); + (void)irq_set_affinity_hint(rd->ring->irq, + &mask); + } + } + } +} + +static int hns_nic_init_irq(struct hns_nic_priv *priv) +{ + struct hnae_handle *h = priv->ae_handle; + struct hns_nic_ring_data *rd; + int i; + int ret; + for (i = 0; i < h->q_num * 2; i++) { rd = &priv->ring_data[i]; @@ -878,16 +1155,11 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv) } disable_irq(rd->ring->irq); rd->ring->irq_init_flag = RCB_IRQ_INITED; - - /*set cpu affinity*/ - if (cpu_online(rd->queue_index)) { - cpumask_clear(&mask); - cpu = rd->queue_index; - cpumask_set_cpu(cpu, &mask); - irq_set_affinity_hint(rd->ring->irq, &mask); - } } + /*set cpu affinity*/ + hns_set_irq_affinity(priv); + return 0; } @@ -1136,6 +1408,51 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu) return ret; } +static int hns_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_handle *h = priv->ae_handle; + + switch (priv->enet_ver) { + case AE_VERSION_1: + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) + netdev_info(netdev, "enet v1 do not support tso!\n"); + break; + default: + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = fill_tso_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; + /* The chip only support 7*4096 */ + netif_set_gso_max_size(netdev, 7 * 4096); + h->dev->ops->set_tso_stats(h, 1); + } else { + priv->ops.fill_desc = fill_v2_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + h->dev->ops->set_tso_stats(h, 0); + } + break; + } + netdev->features = features; + return 0; +} + +static netdev_features_t hns_nic_fix_features( + struct net_device *netdev, netdev_features_t features) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + + switch (priv->enet_ver) { + case AE_VERSION_1: + features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_HW_VLAN_CTAG_FILTER); + break; + default: + break; + } + return features; +} + /** * nic_set_multicast_list - set mutl mac address * @netdev: net device @@ -1231,6 +1548,8 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_set_mac_address = hns_nic_net_set_mac_address, .ndo_change_mtu = hns_nic_change_mtu, .ndo_do_ioctl = hns_nic_do_ioctl, + .ndo_set_features = hns_nic_set_features, + .ndo_fix_features = hns_nic_fix_features, .ndo_get_stats64 = hns_nic_get_stats64, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = hns_nic_poll_controller, @@ -1315,22 +1634,26 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) return; hns_nic_dump(priv); - netdev_info(priv->netdev, "Reset %s port\n", - (type == HNAE_PORT_DEBUG ? "debug" : "business")); + netdev_info(priv->netdev, "try to reset %s port!\n", + (type == HNAE_PORT_DEBUG ? "debug" : "service")); rtnl_lock(); /* put off any impending NetWatchDogTimeout */ priv->netdev->trans_start = jiffies; - if (type == HNAE_PORT_DEBUG) + if (type == HNAE_PORT_DEBUG) { hns_nic_net_reinit(priv->netdev); + } else { + netif_carrier_off(priv->netdev); + netif_tx_disable(priv->netdev); + } rtnl_unlock(); } /* for doing service complete*/ static void hns_nic_service_event_complete(struct hns_nic_priv *priv) { - assert(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); + WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state)); smp_mb__before_atomic(); clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state); @@ -1435,8 +1758,9 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) for (i = 0; i < h->q_num * 2; i++) { netif_napi_del(&priv->ring_data[i].napi); if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) { - irq_set_affinity_hint(priv->ring_data[i].ring->irq, - NULL); + (void)irq_set_affinity_hint( + priv->ring_data[i].ring->irq, + NULL); free_irq(priv->ring_data[i].ring->irq, &priv->ring_data[i]); } @@ -1446,6 +1770,31 @@ static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv) kfree(priv->ring_data); } +static void hns_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_handle *h = priv->ae_handle; + + if (AE_IS_VER1(priv->enet_ver)) { + priv->ops.fill_desc = fill_desc; + priv->ops.get_rxd_bnum = get_rx_desc_bnum; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + } else { + priv->ops.get_rxd_bnum = get_v2rx_desc_bnum; + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = fill_tso_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso; + /* This chip only support 7*4096 */ + netif_set_gso_max_size(netdev, 7 * 4096); + h->dev->ops->set_tso_stats(h, 1); + } else { + priv->ops.fill_desc = fill_v2_desc; + priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx; + } + } +} + static int hns_nic_try_get_ae(struct net_device *ndev) { struct hns_nic_priv *priv = netdev_priv(ndev); @@ -1453,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) int ret; h = hnae_get_handle(&priv->netdev->dev, - priv->ae_name, priv->port_id, NULL); + priv->ae_node, priv->port_id, NULL); if (IS_ERR_OR_NULL(h)) { ret = PTR_ERR(h); dev_dbg(priv->dev, "has not handle, register notifier!\n"); @@ -1473,6 +1822,8 @@ static int hns_nic_try_get_ae(struct net_device *ndev) goto out_init_ring_data; } + hns_nic_set_priv_ops(ndev); + ret = register_netdev(ndev); if (ret) { dev_err(priv->dev, "probe register netdev fail!\n"); @@ -1524,18 +1875,21 @@ static int hns_nic_dev_probe(struct platform_device *pdev) priv->dev = dev; priv->netdev = ndev; - if (of_device_is_compatible(node, "hisilicon,hns-nic-v2")) - priv->enet_ver = AE_VERSION_2; - else + if (of_device_is_compatible(node, "hisilicon,hns-nic-v1")) priv->enet_ver = AE_VERSION_1; + else + priv->enet_ver = AE_VERSION_2; - ret = of_property_read_string(node, "ae-name", &priv->ae_name); - if (ret) - goto out_read_string_fail; + priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); + if (IS_ERR_OR_NULL(priv->ae_node)) { + ret = PTR_ERR(priv->ae_node); + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } ret = of_property_read_u32(node, "port-id", &priv->port_id); if (ret) - goto out_read_string_fail; + goto out_read_prop_fail; hns_init_mac_addr(ndev); @@ -1543,6 +1897,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) ndev->priv_flags |= IFF_UNICAST_FLT; ndev->netdev_ops = &hns_nic_netdev_ops; hns_ethtool_set_ops(ndev); + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; @@ -1550,6 +1905,17 @@ static int hns_nic_dev_probe(struct platform_device *pdev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO; + switch (priv->enet_ver) { + case AE_VERSION_2: + ndev->features |= NETIF_F_TSO | NETIF_F_TSO6; + ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6; + break; + default: + break; + } + SET_NETDEV_DEV(ndev, dev); if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) @@ -1582,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); -out_read_string_fail: +out_read_prop_fail: free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index dae0ed19a..c68ab3d34 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -40,8 +40,18 @@ struct hns_nic_ring_data { void (*fini_process)(struct hns_nic_ring_data *); }; +/* compatible the difference between two versions */ +struct hns_nic_ops { + void (*fill_desc)(struct hnae_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + int buf_num, enum hns_desc_type type, int mtu); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hnae_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + struct hns_nic_priv { - const char *ae_name; + const struct device_node *ae_node; u32 enet_ver; u32 port_id; int phy_mode; @@ -51,6 +61,8 @@ struct hns_nic_priv { struct device *dev; struct hnae_handle *ae_handle; + struct hns_nic_ops ops; + /* the cb for nic to manage the ring buffer, the first half of the * array is for tx_ring and vice versa for the second half */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a03321299..3c4a3bc31 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -11,7 +11,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> - #include "hns_enet.h" #define HNS_PHY_PAGE_MDIX 0 @@ -72,24 +71,22 @@ static void hns_get_mdix_mode(struct net_device *net_dev, struct hns_nic_priv *priv = netdev_priv(net_dev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev || !phy_dev->bus) { + if (!phy_dev || !phy_dev->mdio.bus) { cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; return; } - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_MDIX); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_MDIX); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSC_REG); + retval = phy_read(phy_dev, HNS_PHY_CSC_REG); mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S); - retval = mdiobus_read(phy_dev->bus, phy_dev->addr, HNS_PHY_CSS_REG); + retval = phy_read(phy_dev, HNS_PHY_CSS_REG); mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B); is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); switch (mdix_ctrl) { case 0x0: @@ -254,53 +251,36 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) if (en) { /* speed : 1000M */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, 2); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 21, 0x1046); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 2); + phy_write(phy_dev, 21, 0x1046); /* Force Master */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 9, 0x1F00); + phy_write(phy_dev, 9, 0x1F00); /* Soft-reset */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); + phy_write(phy_dev, 0, 0x9140); /* If autoneg disabled,two soft-reset operations */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 0, 0x9140); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); + phy_write(phy_dev, 0, 0x9140); + phy_write(phy_dev, 22, 0xFA); /* Default is 0x0400 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x418); + phy_write(phy_dev, 1, 0x418); /* Force 1000M Link, Default is 0x0200 */ - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x20C); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); + phy_write(phy_dev, 7, 0x20C); + phy_write(phy_dev, 22, 0); /* Enable MAC loop-back */ - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } else { - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0xFA); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 1, 0x400); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 7, 0x200); - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - 22, 0); - - val = (u16)mdiobus_read(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG); + phy_write(phy_dev, 22, 0xFA); + phy_write(phy_dev, 1, 0x400); + phy_write(phy_dev, 7, 0x200); + phy_write(phy_dev, 22, 0); + + val = phy_read(phy_dev, COPPER_CONTROL_REG); val &= ~PHY_LOOP_BACK; - (void)mdiobus_write(phy_dev->bus, phy_dev->addr, - COPPER_CONTROL_REG, val); + phy_write(phy_dev, COPPER_CONTROL_REG, val); } return 0; } @@ -315,8 +295,10 @@ static int __lb_setup(struct net_device *ndev, switch (loop) { case MAC_INTERNALLOOP_PHY: - if ((phy_dev) && (!phy_dev->is_c45)) + if ((phy_dev) && (!phy_dev->is_c45)) { ret = hns_nic_config_phy_loopback(phy_dev, 0x1); + ret |= h->dev->ops->set_loopback(h, loop, 0x1); + } break; case MAC_INTERNALLOOP_MAC: if ((h->dev->ops->set_loopback) && @@ -396,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, struct sk_buff *skb) { struct net_device *ndev; + struct hns_nic_priv *priv; struct hnae_ring *ring; struct netdev_queue *dev_queue; struct sk_buff *new_skb; @@ -405,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data, char buff[33]; /* 32B data and the last character '\0' */ if (!ring_data) { /* Just for doing create frame*/ + ndev = skb->dev; + priv = netdev_priv(ndev); + frame_size = skb->len; memset(skb->data, 0xFF, frame_size); + if ((!AE_IS_VER1(priv->enet_ver)) && + (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) { + memcpy(skb->data, ndev->dev_addr, 6); + skb->data[5] += 0x1f; + } + frame_size &= ~1ul; memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, @@ -506,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev, /* place data into test skb */ (void)skb_put(skb, size); + skb->dev = ndev; __lb_other_process(NULL, skb); skb->queue_mapping = NIC_LB_TEST_RING_ID; @@ -667,6 +660,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN); + drvinfo->eedump_len = 0; } /** @@ -1018,16 +1012,9 @@ int hns_phy_led_set(struct net_device *netdev, int value) struct hns_nic_priv *priv = netdev_priv(netdev); struct phy_device *phy_dev = priv->phy; - if (!phy_dev->bus) { - netdev_err(netdev, "phy_dev->bus is null!\n"); - return -EINVAL; - } - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, HNS_LED_FC_REG, - value); - retval = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED); + retval = phy_write(phy_dev, HNS_LED_FC_REG, value); + retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER); if (retval) { netdev_err(netdev, "mdiobus_write fail !\n"); return retval; @@ -1052,19 +1039,15 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) if (phy_dev) switch (state) { case ETHTOOL_ID_ACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - priv->phy_led_val = (u16)mdiobus_read(phy_dev->bus, - phy_dev->addr, - HNS_LED_FC_REG); + priv->phy_led_val = phy_read(phy_dev, HNS_LED_FC_REG); - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; return 2; @@ -1079,20 +1062,18 @@ int hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) return ret; break; case ETHTOOL_ID_INACTIVE: - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_LED); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_LED); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_LED_FC_REG, priv->phy_led_val); + ret = phy_write(phy_dev, HNS_LED_FC_REG, + priv->phy_led_val); if (ret) return ret; - ret = mdiobus_write(phy_dev->bus, phy_dev->addr, - HNS_PHY_PAGE_REG, - HNS_PHY_PAGE_COPPER); + ret = phy_write(phy_dev, HNS_PHY_PAGE_REG, + HNS_PHY_PAGE_COPPER); if (ret) return ret; break; @@ -1187,6 +1168,95 @@ static int hns_nic_nway_reset(struct net_device *netdev) return ret; } +static u32 +hns_get_rss_key_size(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + u32 ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + ret = ops->get_rss_key_size(priv->ae_handle); + + return ret; +} + +static u32 +hns_get_rss_indir_size(struct net_device *netdev) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + u32 ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + ret = ops->get_rss_indir_size(priv->ae_handle); + + return ret; +} + +static int +hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + int ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + + if (!indir) + return 0; + + ret = ops->get_rss(priv->ae_handle, indir, key, hfunc); + + return 0; +} + +static int +hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key, + const u8 hfunc) +{ + struct hns_nic_priv *priv = netdev_priv(netdev); + struct hnae_ae_ops *ops; + int ret; + + if (AE_IS_VER1(priv->enet_ver)) { + netdev_err(netdev, + "RSS feature is not supported on this hardware\n"); + return -EOPNOTSUPP; + } + + ops = priv->ae_handle->dev->ops; + + /* currently hfunc can only be Toeplitz hash */ + if (key || + (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) + return -EOPNOTSUPP; + if (!indir) + return 0; + + ret = ops->set_rss(priv->ae_handle, indir, key, hfunc); + + return 0; +} + static struct ethtool_ops hns_ethtool_ops = { .get_drvinfo = hns_nic_get_drvinfo, .get_link = hns_nic_get_link, @@ -1206,6 +1276,10 @@ static struct ethtool_ops hns_ethtool_ops = { .get_regs_len = hns_get_regs_len, .get_regs = hns_get_regs, .nway_reset = hns_nic_nway_reset, + .get_rxfh_key_size = hns_get_rss_key_size, + .get_rxfh_indir_size = hns_get_rss_indir_size, + .get_rxfh = hns_get_rss, + .set_rxfh = hns_set_rss, }; void hns_ethtool_set_ops(struct net_device *ndev) diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 37491c85b..765ddb3dc 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c @@ -458,16 +458,11 @@ static int hns_mdio_probe(struct platform_device *pdev) } mdio_dev->subctrl_vbase = - syscon_node_to_regmap(of_parse_phandle(np, "subctrl_vbase", 0)); + syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0)); if (IS_ERR(mdio_dev->subctrl_vbase)) { dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n"); mdio_dev->subctrl_vbase = NULL; } - new_bus->irq = devm_kcalloc(&pdev->dev, PHY_MAX_ADDR, - sizeof(int), GFP_KERNEL); - if (!new_bus->irq) - return -ENOMEM; - new_bus->parent = &pdev->dev; platform_set_drvdata(pdev, new_bus); |