diff options
Diffstat (limited to 'drivers/infiniband/hw')
103 files changed, 30951 insertions, 6549 deletions
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile index aded2a5cc..c7ad0a4c8 100644 --- a/drivers/infiniband/hw/Makefile +++ b/drivers/infiniband/hw/Makefile @@ -2,6 +2,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/ obj-$(CONFIG_INFINIBAND_QIB) += qib/ obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/ obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/ +obj-$(CONFIG_INFINIBAND_I40IW) += i40iw/ obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/ obj-$(CONFIG_INFINIBAND_NES) += nes/ diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index f504ba73e..d403231a4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1877,7 +1877,7 @@ err: static int is_loopback_dst(struct iw_cm_id *cm_id) { struct net_device *dev; - struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr); if (!dev) @@ -1892,10 +1892,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct iwch_ep *ep; struct rtable *rt; int err = 0; - struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; - struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; + struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; - if (cm_id->remote_addr.ss_family != PF_INET) { + if (cm_id->m_remote_addr.ss_family != PF_INET) { err = -ENOSYS; goto out; } @@ -1961,9 +1961,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) state_set(&ep->com, CONNECTING); ep->tos = IPTOS_LOWDELAY; - memcpy(&ep->com.local_addr, &cm_id->local_addr, + memcpy(&ep->com.local_addr, &cm_id->m_local_addr, sizeof(ep->com.local_addr)); - memcpy(&ep->com.remote_addr, &cm_id->remote_addr, + memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, sizeof(ep->com.remote_addr)); /* send connect request to rnic */ @@ -1992,7 +1992,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) might_sleep(); - if (cm_id->local_addr.ss_family != PF_INET) { + if (cm_id->m_local_addr.ss_family != PF_INET) { err = -ENOSYS; goto fail1; } @@ -2008,7 +2008,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog) cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; ep->backlog = backlog; - memcpy(&ep->com.local_addr, &cm_id->local_addr, + memcpy(&ep->com.local_addr, &cm_id->m_local_addr, sizeof(ep->com.local_addr)); /* diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 2734820d2..3234a8be1 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -657,7 +657,8 @@ err: return ERR_PTR(err); } -static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) +static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata) { struct iwch_dev *rhp; struct iwch_pd *php; @@ -1389,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.iwcm->add_ref = iwch_qp_add_ref; dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref; dev->ibdev.iwcm->get_qp = iwch_get_qp; + memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name, + sizeof(dev->ibdev.iwcm->ifname)); ret = ib_register_device(&dev->ibdev, NULL); if (ret) diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index cd2ff5f95..651711370 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -302,7 +302,7 @@ void _c4iw_free_ep(struct kref *kref) if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; cxgb4_clip_release( ep->com.dev->rdev.lldi.ports[0], @@ -314,12 +314,6 @@ void _c4iw_free_ep(struct kref *kref) dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); } - if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) { - print_addr(&ep->com, __func__, "remove_mapinfo/mapping"); - iwpm_remove_mapinfo(&ep->com.local_addr, - &ep->com.mapped_local_addr); - iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); - } kfree(ep); } @@ -455,7 +449,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) state_set(&ep->com, DEAD); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = - (struct sockaddr_in6 *)&ep->com.mapped_local_addr; + (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } @@ -485,12 +479,19 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) unsigned int flowclen = 80; struct fw_flowc_wr *flowc; int i; + u16 vlan = ep->l2t->vlan; + int nparams; + + if (vlan == CPL_L2T_VLAN_NONE) + nparams = 8; + else + nparams = 9; skb = get_skb(skb, flowclen, GFP_KERNEL); flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | - FW_FLOWC_WR_NPARAMS_V(8)); + FW_FLOWC_WR_NPARAMS_V(nparams)); flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, 16)) | FW_WR_FLOWID_V(ep->hwtid)); @@ -511,9 +512,17 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = cpu_to_be32(ep->emss); - /* Pad WR to 16 byte boundary */ - flowc->mnemval[8].mnemonic = 0; - flowc->mnemval[8].val = 0; + if (nparams == 9) { + u16 pri; + + pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; + flowc->mnemval[8].val = cpu_to_be32(pri); + } else { + /* Pad WR to 16 byte boundary */ + flowc->mnemval[8].mnemonic = 0; + flowc->mnemval[8].val = 0; + } for (i = 0; i < 9; i++) { flowc->mnemval[i].r4[0] = 0; flowc->mnemval[i].r4[1] = 0; @@ -568,54 +577,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } -/* - * c4iw_form_pm_msg - Form a port mapper message with mapping info - */ -static void c4iw_form_pm_msg(struct c4iw_ep *ep, - struct iwpm_sa_data *pm_msg) -{ - memcpy(&pm_msg->loc_addr, &ep->com.local_addr, - sizeof(ep->com.local_addr)); - memcpy(&pm_msg->rem_addr, &ep->com.remote_addr, - sizeof(ep->com.remote_addr)); -} - -/* - * c4iw_form_reg_msg - Form a port mapper message with dev info - */ -static void c4iw_form_reg_msg(struct c4iw_dev *dev, - struct iwpm_dev_data *pm_msg) -{ - memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE); - memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name, - IWPM_IFNAME_SIZE); -} - -static void c4iw_record_pm_msg(struct c4iw_ep *ep, - struct iwpm_sa_data *pm_msg) -{ - memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr, - sizeof(ep->com.mapped_local_addr)); - memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr, - sizeof(ep->com.mapped_remote_addr)); -} - -static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep) -{ - int ret; - - print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep "); - print_addr(&child_ep->com, __func__, "get_remote_addr child_ep "); - - ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr, - &child_ep->com.mapped_remote_addr, - &child_ep->com.remote_addr, RDMA_NL_C4IW); - if (ret) - PDBG("Unable to find remote peer addr info - err %d\n", ret); - - return ret; -} - static void best_mtu(const unsigned short *mtus, unsigned short mtu, unsigned int *idx, int use_ts, int ipv6) { @@ -645,13 +606,13 @@ static int send_connect(struct c4iw_ep *ep) int wscale; int win, sizev4, sizev6, wrlen; struct sockaddr_in *la = (struct sockaddr_in *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; struct sockaddr_in *ra = (struct sockaddr_in *) - &ep->com.mapped_remote_addr; + &ep->com.remote_addr; struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) - &ep->com.mapped_remote_addr; + &ep->com.remote_addr; int ret; enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; u32 isn = (prandom_u32() & ~7UL) - 1; @@ -710,7 +671,7 @@ static int send_connect(struct c4iw_ep *ep) L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP_V(ep->tos) | + DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win); opt2 = RX_CHANNEL_V(0) | @@ -1829,10 +1790,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) req->le.filter = cpu_to_be32(cxgb4_select_ntuple( ep->com.dev->rdev.lldi.ports[0], ep->l2t)); - sin = (struct sockaddr_in *)&ep->com.mapped_local_addr; + sin = (struct sockaddr_in *)&ep->com.local_addr; req->le.lport = sin->sin_port; req->le.u.ipv4.lip = sin->sin_addr.s_addr; - sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr; + sin = (struct sockaddr_in *)&ep->com.remote_addr; req->le.pport = sin->sin_port; req->le.u.ipv4.pip = sin->sin_addr.s_addr; req->tcb.t_state_to_astid = @@ -1864,7 +1825,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP_V(ep->tos) | + DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win)); req->tcb.opt2 = (__force __be32) (PACE_V(1) | @@ -1928,7 +1889,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, struct dst_entry *dst, struct c4iw_dev *cdev, - bool clear_mpa_v1, enum chip_type adapter_type) + bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) { struct neighbour *n; int err, step; @@ -1958,7 +1919,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, goto out; } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, - n, pdev, 0); + n, pdev, rt_tos2priority(tos)); if (!ep->l2t) goto out; ep->mtu = pdev->mtu; @@ -2013,13 +1974,13 @@ static int c4iw_reconnect(struct c4iw_ep *ep) { int err = 0; struct sockaddr_in *laddr = (struct sockaddr_in *) - &ep->com.cm_id->local_addr; + &ep->com.cm_id->m_local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *) - &ep->com.cm_id->remote_addr; + &ep->com.cm_id->m_remote_addr; struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) - &ep->com.cm_id->local_addr; + &ep->com.cm_id->m_local_addr; struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) - &ep->com.cm_id->remote_addr; + &ep->com.cm_id->m_remote_addr; int iptype; __u8 *ra; @@ -2038,10 +1999,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep) insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); /* find a route */ - if (ep->com.cm_id->local_addr.ss_family == AF_INET) { + if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port, - raddr->sin_port, 0); + raddr->sin_port, ep->com.cm_id->tos); iptype = 4; ra = (__u8 *)&raddr->sin_addr; } else { @@ -2058,7 +2019,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep) goto fail3; } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, - ep->com.dev->rdev.lldi.adapter_type); + ep->com.dev->rdev.lldi.adapter_type, + ep->com.cm_id->tos); if (err) { pr_err("%s - cannot alloc l2e.\n", __func__); goto fail4; @@ -2069,7 +2031,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ep->l2t->idx); state_set(&ep->com, CONNECTING); - ep->tos = 0; + ep->tos = ep->com.cm_id->tos; /* send connect request to rnic */ err = send_connect(ep); @@ -2109,10 +2071,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct sockaddr_in6 *ra6; ep = lookup_atid(t, atid); - la = (struct sockaddr_in *)&ep->com.mapped_local_addr; - ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr; - la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; - ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr; + la = (struct sockaddr_in *)&ep->com.local_addr; + ra = (struct sockaddr_in *)&ep->com.remote_addr; + la6 = (struct sockaddr_in6 *)&ep->com.local_addr; + ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, status, status2errno(status)); @@ -2154,7 +2116,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; cxgb4_clip_release( ep->com.dev->rdev.lldi.ports[0], (const u32 *) @@ -2189,7 +2151,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = - (struct sockaddr_in6 *)&ep->com.mapped_local_addr; + (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } @@ -2391,6 +2353,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) u16 peer_mss = ntohs(req->tcpopt.mss); int iptype; unsigned short hdrs; + u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); parent_ep = lookup_stid(t, stid); if (!parent_ep) { @@ -2399,8 +2362,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) } if (state_read(&parent_ep->com) != LISTEN) { - printk(KERN_ERR "%s - listening ep not in LISTEN\n", - __func__); + PDBG("%s - listening ep not in LISTEN\n", __func__); goto reject; } @@ -2415,7 +2377,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ntohs(peer_port), peer_mss); dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, local_port, peer_port, - PASS_OPEN_TOS_G(ntohl(req->tos_stid))); + tos); } else { PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" , __func__, parent_ep, hwtid, @@ -2441,7 +2403,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) } err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, - parent_ep->com.dev->rdev.lldi.adapter_type); + parent_ep->com.dev->rdev.lldi.adapter_type, tos); if (err) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -2459,18 +2421,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) child_ep->com.dev = dev; child_ep->com.cm_id = NULL; - /* - * The mapped_local and mapped_remote addresses get setup with - * the actual 4-tuple. The local address will be based on the - * actual local address of the connection, but on the port number - * of the parent listening endpoint. The remote address is - * setup based on a query to the IWPM since we don't know what it - * originally was before mapping. If no mapping was done, then - * mapped_remote == remote, and mapped_local == local. - */ if (iptype == 4) { struct sockaddr_in *sin = (struct sockaddr_in *) - &child_ep->com.mapped_local_addr; + &child_ep->com.local_addr; sin->sin_family = PF_INET; sin->sin_port = local_port; @@ -2482,12 +2435,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) &parent_ep->com.local_addr)->sin_port; sin->sin_addr.s_addr = *(__be32 *)local_ip; - sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr; + sin = (struct sockaddr_in *)&child_ep->com.remote_addr; sin->sin_family = PF_INET; sin->sin_port = peer_port; sin->sin_addr.s_addr = *(__be32 *)peer_ip; } else { - sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr; + sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = local_port; memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); @@ -2498,18 +2451,15 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) &parent_ep->com.local_addr)->sin6_port; memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); - sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr; + sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; sin6->sin6_family = PF_INET6; sin6->sin6_port = peer_port; memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); } - memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr, - sizeof(child_ep->com.remote_addr)); - get_remote_addr(parent_ep, child_ep); c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; - child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); + child_ep->tos = tos; child_ep->dst = dst; child_ep->hwtid = hwtid; @@ -2522,7 +2472,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) accept_cr(child_ep, skb, req); set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); if (iptype == 6) { - sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr; + sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } @@ -2765,7 +2715,7 @@ out: if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; cxgb4_clip_release( ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, @@ -3026,8 +2976,8 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { struct in_device *ind; int found = 0; - struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; - struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; + struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; ind = in_dev_get(dev->rdev.lldi.ports[0]); if (!ind) @@ -3072,8 +3022,8 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr, static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) { struct in6_addr uninitialized_var(addr); - struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr; - struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr; + struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { memcpy(la6->sin6_addr.s6_addr, &addr, 16); @@ -3092,11 +3042,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct sockaddr_in *raddr; struct sockaddr_in6 *laddr6; struct sockaddr_in6 *raddr6; - struct iwpm_dev_data pm_reg_msg; - struct iwpm_sa_data pm_msg; __u8 *ra; int iptype; - int iwpm_err = 0; if ((conn_param->ord > cur_max_read_depth(dev)) || (conn_param->ird > cur_max_read_depth(dev))) { @@ -3144,47 +3091,17 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } insert_handle(dev, &dev->atid_idr, ep, ep->atid); - memcpy(&ep->com.local_addr, &cm_id->local_addr, + memcpy(&ep->com.local_addr, &cm_id->m_local_addr, sizeof(ep->com.local_addr)); - memcpy(&ep->com.remote_addr, &cm_id->remote_addr, + memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, sizeof(ep->com.remote_addr)); - /* No port mapper available, go with the specified peer information */ - memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, - sizeof(ep->com.mapped_local_addr)); - memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr, - sizeof(ep->com.mapped_remote_addr)); - - c4iw_form_reg_msg(dev, &pm_reg_msg); - iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); - if (iwpm_err) { - PDBG("%s: Port Mapper reg pid fail (err = %d).\n", - __func__, iwpm_err); - } - if (iwpm_valid_pid() && !iwpm_err) { - c4iw_form_pm_msg(ep, &pm_msg); - iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW); - if (iwpm_err) - PDBG("%s: Port Mapper query fail (err = %d).\n", - __func__, iwpm_err); - else - c4iw_record_pm_msg(ep, &pm_msg); - } - if (iwpm_create_mapinfo(&ep->com.local_addr, - &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { - iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW); - err = -ENOMEM; - goto fail1; - } - print_addr(&ep->com, __func__, "add_query/create_mapinfo"); - set_bit(RELEASE_MAPINFO, &ep->com.flags); - - laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr; - raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr; - laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; - raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr; + laddr = (struct sockaddr_in *)&ep->com.local_addr; + raddr = (struct sockaddr_in *)&ep->com.remote_addr; + laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; + raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; - if (cm_id->remote_addr.ss_family == AF_INET) { + if (cm_id->m_remote_addr.ss_family == AF_INET) { iptype = 4; ra = (__u8 *)&raddr->sin_addr; @@ -3203,7 +3120,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ra, ntohs(raddr->sin_port)); ep->dst = find_route(dev, laddr->sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port, - raddr->sin_port, 0); + raddr->sin_port, cm_id->tos); } else { iptype = 6; ra = (__u8 *)&raddr6->sin6_addr; @@ -3234,7 +3151,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, - ep->com.dev->rdev.lldi.adapter_type); + ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); goto fail3; @@ -3245,7 +3162,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ep->l2t->idx); state_set(&ep->com, CONNECTING); - ep->tos = 0; + ep->tos = cm_id->tos; /* send connect request to rnic */ err = send_connect(ep); @@ -3269,7 +3186,7 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) { int err; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) { err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], @@ -3302,7 +3219,7 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) { int err; struct sockaddr_in *sin = (struct sockaddr_in *) - &ep->com.mapped_local_addr; + &ep->com.local_addr; if (dev->rdev.lldi.enable_fw_ofld_conn) { do { @@ -3343,9 +3260,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) int err = 0; struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); struct c4iw_listen_ep *ep; - struct iwpm_dev_data pm_reg_msg; - struct iwpm_sa_data pm_msg; - int iwpm_err = 0; might_sleep(); @@ -3360,7 +3274,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) ep->com.cm_id = cm_id; ep->com.dev = dev; ep->backlog = backlog; - memcpy(&ep->com.local_addr, &cm_id->local_addr, + memcpy(&ep->com.local_addr, &cm_id->m_local_addr, sizeof(ep->com.local_addr)); /* @@ -3369,10 +3283,10 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) if (dev->rdev.lldi.enable_fw_ofld_conn && ep->com.local_addr.ss_family == AF_INET) ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, - cm_id->local_addr.ss_family, ep); + cm_id->m_local_addr.ss_family, ep); else ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, - cm_id->local_addr.ss_family, ep); + cm_id->m_local_addr.ss_family, ep); if (ep->stid == -1) { printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); @@ -3381,36 +3295,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) } insert_handle(dev, &dev->stid_idr, ep, ep->stid); - /* No port mapper available, go with the specified info */ - memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr, - sizeof(ep->com.mapped_local_addr)); - - c4iw_form_reg_msg(dev, &pm_reg_msg); - iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW); - if (iwpm_err) { - PDBG("%s: Port Mapper reg pid fail (err = %d).\n", - __func__, iwpm_err); - } - if (iwpm_valid_pid() && !iwpm_err) { - memcpy(&pm_msg.loc_addr, &ep->com.local_addr, - sizeof(ep->com.local_addr)); - iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW); - if (iwpm_err) - PDBG("%s: Port Mapper query fail (err = %d).\n", - __func__, iwpm_err); - else - memcpy(&ep->com.mapped_local_addr, - &pm_msg.mapped_loc_addr, - sizeof(ep->com.mapped_local_addr)); - } - if (iwpm_create_mapinfo(&ep->com.local_addr, - &ep->com.mapped_local_addr, RDMA_NL_C4IW)) { - err = -ENOMEM; - goto fail3; - } - print_addr(&ep->com, __func__, "add_mapping/create_mapinfo"); + memcpy(&ep->com.local_addr, &cm_id->m_local_addr, + sizeof(ep->com.local_addr)); - set_bit(RELEASE_MAPINFO, &ep->com.flags); state_set(&ep->com, LISTEN); if (ep->com.local_addr.ss_family == AF_INET) err = create_server4(dev, ep); @@ -3421,7 +3308,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) goto out; } -fail3: cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); fail2: @@ -3456,7 +3342,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) goto done; err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, __func__); - sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr; + sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } @@ -3580,7 +3466,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, state_set(&ep->com, DEAD); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = - (struct sockaddr_in6 *)&ep->com.mapped_local_addr; + (struct sockaddr_in6 *)&ep->com.local_addr; cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], (const u32 *)&sin6->sin6_addr.s6_addr, 1); } diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 4e94cff5b..b0b955724 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) } } out: - if (wq) + if (wq) { + if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) { + if (t4_sq_empty(wq)) + complete(&qhp->sq_drained); + if (t4_rq_empty(wq)) + complete(&qhp->rq_drained); + } spin_unlock(&qhp->lock); + } return ret; } diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 8024ea441..ae2e8b23d 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -87,17 +87,6 @@ struct c4iw_debugfs_data { int pos; }; -/* registered cxgb4 netlink callbacks */ -static struct ibnl_client_cbs c4iw_nl_cb_table[] = { - [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, - [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, - [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, - [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, - [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, - [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, - [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} -}; - static int count_idrs(int id, void *p, void *data) { int *countp = data; @@ -242,13 +231,13 @@ static int dump_qp(int id, void *p, void *data) if (qp->ep) { if (qp->ep->com.local_addr.ss_family == AF_INET) { struct sockaddr_in *lsin = (struct sockaddr_in *) - &qp->ep->com.local_addr; + &qp->ep->com.cm_id->local_addr; struct sockaddr_in *rsin = (struct sockaddr_in *) - &qp->ep->com.remote_addr; + &qp->ep->com.cm_id->remote_addr; struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) - &qp->ep->com.mapped_local_addr; + &qp->ep->com.cm_id->m_local_addr; struct sockaddr_in *mapped_rsin = (struct sockaddr_in *) - &qp->ep->com.mapped_remote_addr; + &qp->ep->com.cm_id->m_remote_addr; cc = snprintf(qpd->buf + qpd->pos, space, "rc qp sq id %u rq id %u state %u " @@ -264,15 +253,15 @@ static int dump_qp(int id, void *p, void *data) ntohs(mapped_rsin->sin_port)); } else { struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) - &qp->ep->com.local_addr; + &qp->ep->com.cm_id->local_addr; struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) - &qp->ep->com.remote_addr; + &qp->ep->com.cm_id->remote_addr; struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) - &qp->ep->com.mapped_local_addr; + &qp->ep->com.cm_id->m_local_addr; struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *) - &qp->ep->com.mapped_remote_addr; + &qp->ep->com.cm_id->m_remote_addr; cc = snprintf(qpd->buf + qpd->pos, space, "rc qp sq id %u rq id %u state %u " @@ -545,13 +534,13 @@ static int dump_ep(int id, void *p, void *data) if (ep->com.local_addr.ss_family == AF_INET) { struct sockaddr_in *lsin = (struct sockaddr_in *) - &ep->com.local_addr; + &ep->com.cm_id->local_addr; struct sockaddr_in *rsin = (struct sockaddr_in *) - &ep->com.remote_addr; + &ep->com.cm_id->remote_addr; struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) - &ep->com.mapped_local_addr; + &ep->com.cm_id->m_local_addr; struct sockaddr_in *mapped_rsin = (struct sockaddr_in *) - &ep->com.mapped_remote_addr; + &ep->com.cm_id->m_remote_addr; cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " @@ -569,13 +558,13 @@ static int dump_ep(int id, void *p, void *data) ntohs(mapped_rsin->sin_port)); } else { struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) - &ep->com.local_addr; + &ep->com.cm_id->local_addr; struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *) - &ep->com.remote_addr; + &ep->com.cm_id->remote_addr; struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.cm_id->m_local_addr; struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *) - &ep->com.mapped_remote_addr; + &ep->com.cm_id->m_remote_addr; cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p qp %p state %d flags 0x%lx " @@ -610,9 +599,9 @@ static int dump_listen_ep(int id, void *p, void *data) if (ep->com.local_addr.ss_family == AF_INET) { struct sockaddr_in *lsin = (struct sockaddr_in *) - &ep->com.local_addr; + &ep->com.cm_id->local_addr; struct sockaddr_in *mapped_lsin = (struct sockaddr_in *) - &ep->com.mapped_local_addr; + &ep->com.cm_id->m_local_addr; cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p state %d flags 0x%lx stid %d " @@ -623,9 +612,9 @@ static int dump_listen_ep(int id, void *p, void *data) ntohs(mapped_lsin->sin_port)); } else { struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *) - &ep->com.local_addr; + &ep->com.cm_id->local_addr; struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *) - &ep->com.mapped_local_addr; + &ep->com.cm_id->m_local_addr; cc = snprintf(epd->buf + epd->pos, space, "ep %p cm_id %p state %d flags 0x%lx stid %d " @@ -801,10 +790,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.start, rdev->lldi.vr->cq.size); - PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p " + PDBG("udb %pR db_reg %p gts_reg %p " "qpmask 0x%x cqmask 0x%x\n", - (unsigned)pci_resource_len(rdev->lldi.pdev, 2), - (void *)pci_resource_start(rdev->lldi.pdev, 2), + &rdev->lldi.pdev->resource[2], rdev->lldi.db_reg, rdev->lldi.gts_reg, rdev->qpmask, rdev->cqmask); @@ -1506,20 +1494,6 @@ static int __init c4iw_init_module(void) printk(KERN_WARNING MOD "could not create debugfs entry, continuing\n"); - if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS, - c4iw_nl_cb_table)) - pr_err("%s[%u]: Failed to add netlink callback\n" - , __func__, __LINE__); - - err = iwpm_init(RDMA_NL_C4IW); - if (err) { - pr_err("port mapper initialization failed with %d\n", err); - ibnl_remove_client(RDMA_NL_C4IW); - c4iw_cm_term(); - debugfs_remove_recursive(c4iw_debugfs_root); - return err; - } - cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); return 0; @@ -1537,8 +1511,6 @@ static void __exit c4iw_exit_module(void) } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); - iwpm_exit(RDMA_NL_C4IW); - ibnl_remove_client(RDMA_NL_C4IW); c4iw_cm_term(); debugfs_remove_recursive(c4iw_debugfs_root); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index fb2de75a0..df43f871a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -476,6 +476,8 @@ struct c4iw_qp { wait_queue_head_t wait; struct timer_list timer; int sq_sig_all; + struct completion rq_drained; + struct completion sq_drained; }; static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) @@ -753,7 +755,6 @@ enum c4iw_ep_flags { CLOSE_SENT = 3, TIMEOUT = 4, QP_REFERENCED = 5, - RELEASE_MAPINFO = 6, }; enum c4iw_ep_history { @@ -790,8 +791,6 @@ struct c4iw_ep_common { struct mutex mutex; struct sockaddr_storage local_addr; struct sockaddr_storage remote_addr; - struct sockaddr_storage mapped_local_addr; - struct sockaddr_storage mapped_remote_addr; struct c4iw_wr_wait wr_wait; unsigned long flags; unsigned long history; @@ -843,45 +842,6 @@ struct c4iw_ep { struct c4iw_ep_stats stats; }; -static inline void print_addr(struct c4iw_ep_common *epc, const char *func, - const char *msg) -{ - -#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr)) -#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port) -#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr)) -#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port) - - if (c4iw_debug) { - switch (epc->local_addr.ss_family) { - case AF_INET: - PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n", - func, msg, SINA(&epc->local_addr), - SINP(&epc->local_addr), - SINP(&epc->mapped_local_addr), - SINA(&epc->remote_addr), - SINP(&epc->remote_addr), - SINP(&epc->mapped_remote_addr)); - break; - case AF_INET6: - PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n", - func, msg, SIN6A(&epc->local_addr), - SIN6P(&epc->local_addr), - SIN6P(&epc->mapped_local_addr), - SIN6A(&epc->remote_addr), - SIN6P(&epc->remote_addr), - SIN6P(&epc->mapped_remote_addr)); - break; - default: - break; - } - } -#undef SINA -#undef SINP -#undef SIN6A -#undef SIN6P -} - static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) { return cm_id->provider_data; @@ -961,7 +921,8 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents); int c4iw_dealloc_mw(struct ib_mw *mw); -struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); +struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata); struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata); @@ -1016,6 +977,8 @@ extern int c4iw_wr_log; extern int db_fc_threshold; extern int db_coalescing_threshold; extern int use_dsgl; +void c4iw_drain_rq(struct ib_qp *qp); +void c4iw_drain_sq(struct ib_qp *qp); #endif diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 7849890c4..008be07d5 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -34,6 +34,7 @@ #include <linux/moduleparam.h> #include <rdma/ib_umem.h> #include <linux/atomic.h> +#include <rdma/ib_user_verbs.h> #include "iw_cxgb4.h" @@ -552,7 +553,8 @@ err: return ERR_PTR(err); } -struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) +struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata) { struct c4iw_dev *rhp; struct c4iw_pd *php; @@ -617,12 +619,14 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, int ret = 0; int length = roundup(max_num_sg * sizeof(u64), 32); + php = to_c4iw_pd(pd); + rhp = php->rhp; + if (mr_type != IB_MR_TYPE_MEM_REG || - max_num_sg > t4_max_fr_depth(use_dsgl)) + max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl && + use_dsgl)) return ERR_PTR(-EINVAL); - php = to_c4iw_pd(pd); - rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index ec04272fb..7574f394f 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -339,7 +339,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro props->max_mr = c4iw_num_stags(&dev->rdev); props->max_pd = T4_MAX_NUM_PD; props->local_ca_ack_delay = 0; - props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl); + props->max_fast_reg_page_list_len = + t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl); return 0; } @@ -564,6 +565,8 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.get_protocol_stats = c4iw_get_mib; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; dev->ibdev.get_port_immutable = c4iw_port_immutable; + dev->ibdev.drain_sq = c4iw_drain_sq; + dev->ibdev.drain_rq = c4iw_drain_rq; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) @@ -577,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref; dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref; dev->ibdev.iwcm->get_qp = c4iw_get_qp; + memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name, + sizeof(dev->ibdev.iwcm->ifname)); ret = ib_register_device(&dev->ibdev, NULL); if (ret) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 8ff690bf0..e8993e49b 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -610,7 +610,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, } static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, - struct ib_reg_wr *wr, u8 *len16, u8 t5dev) + struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported) { struct c4iw_mr *mhp = to_c4iw_mr(wr->mr); struct fw_ri_immd *imdp; @@ -619,7 +619,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32); int rem; - if (mhp->mpl_len > t4_max_fr_depth(use_dsgl)) + if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl)) return -EINVAL; wqe->fr.qpbinde_to_dcacpu = 0; @@ -633,7 +633,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff); - if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { + if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) { struct fw_ri_dsgl *sglp; for (i = 0; i < mhp->mpl_len; i++) @@ -812,9 +812,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, fw_opcode = FW_RI_FR_NSMR_WR; swsqe->opcode = FW_RI_FAST_REGISTER; err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16, - is_t5( - qhp->rhp->rdev.lldi.adapter_type) ? - 1 : 0); + qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl); break; case IB_WR_LOCAL_INV: if (wr->send_flags & IB_SEND_FENCE) @@ -1625,7 +1623,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, unsigned int sqsize, rqsize; struct c4iw_ucontext *ucontext; int ret; - struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; + struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; + struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; PDBG("%s ib_pd %p\n", __func__, pd); @@ -1701,6 +1700,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->attr.max_ird = 0; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; spin_lock_init(&qhp->lock); + init_completion(&qhp->sq_drained); + init_completion(&qhp->rq_drained); mutex_init(&qhp->mutex); init_waitqueue_head(&qhp->wait); atomic_set(&qhp->refcnt, 1); @@ -1710,29 +1711,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, goto err2; if (udata) { - mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); - if (!mm1) { + sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL); + if (!sq_key_mm) { ret = -ENOMEM; goto err3; } - mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); - if (!mm2) { + rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL); + if (!rq_key_mm) { ret = -ENOMEM; goto err4; } - mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); - if (!mm3) { + sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL); + if (!sq_db_key_mm) { ret = -ENOMEM; goto err5; } - mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); - if (!mm4) { + rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL); + if (!rq_db_key_mm) { ret = -ENOMEM; goto err6; } if (t4_sq_onchip(&qhp->wq.sq)) { - mm5 = kmalloc(sizeof *mm5, GFP_KERNEL); - if (!mm5) { + ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm), + GFP_KERNEL); + if (!ma_sync_key_mm) { ret = -ENOMEM; goto err7; } @@ -1747,7 +1749,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, uresp.rq_size = qhp->wq.rq.size; uresp.rq_memsize = qhp->wq.rq.memsize; spin_lock(&ucontext->mmap_lock); - if (mm5) { + if (ma_sync_key_mm) { uresp.ma_sync_key = ucontext->key; ucontext->key += PAGE_SIZE; } else { @@ -1765,28 +1767,29 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err8; - mm1->key = uresp.sq_key; - mm1->addr = qhp->wq.sq.phys_addr; - mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); - insert_mmap(ucontext, mm1); - mm2->key = uresp.rq_key; - mm2->addr = virt_to_phys(qhp->wq.rq.queue); - mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); - insert_mmap(ucontext, mm2); - mm3->key = uresp.sq_db_gts_key; - mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa; - mm3->len = PAGE_SIZE; - insert_mmap(ucontext, mm3); - mm4->key = uresp.rq_db_gts_key; - mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa; - mm4->len = PAGE_SIZE; - insert_mmap(ucontext, mm4); - if (mm5) { - mm5->key = uresp.ma_sync_key; - mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) - + PCIE_MA_SYNC_A) & PAGE_MASK; - mm5->len = PAGE_SIZE; - insert_mmap(ucontext, mm5); + sq_key_mm->key = uresp.sq_key; + sq_key_mm->addr = qhp->wq.sq.phys_addr; + sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); + insert_mmap(ucontext, sq_key_mm); + rq_key_mm->key = uresp.rq_key; + rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); + rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); + insert_mmap(ucontext, rq_key_mm); + sq_db_key_mm->key = uresp.sq_db_gts_key; + sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; + sq_db_key_mm->len = PAGE_SIZE; + insert_mmap(ucontext, sq_db_key_mm); + rq_db_key_mm->key = uresp.rq_db_gts_key; + rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa; + rq_db_key_mm->len = PAGE_SIZE; + insert_mmap(ucontext, rq_db_key_mm); + if (ma_sync_key_mm) { + ma_sync_key_mm->key = uresp.ma_sync_key; + ma_sync_key_mm->addr = + (pci_resource_start(rhp->rdev.lldi.pdev, 0) + + PCIE_MA_SYNC_A) & PAGE_MASK; + ma_sync_key_mm->len = PAGE_SIZE; + insert_mmap(ucontext, ma_sync_key_mm); } } qhp->ibqp.qp_num = qhp->wq.sq.qid; @@ -1799,15 +1802,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->wq.rq.memsize, attrs->cap.max_recv_wr); return &qhp->ibqp; err8: - kfree(mm5); + kfree(ma_sync_key_mm); err7: - kfree(mm4); + kfree(rq_db_key_mm); err6: - kfree(mm3); + kfree(sq_db_key_mm); err5: - kfree(mm2); + kfree(rq_key_mm); err4: - kfree(mm1); + kfree(sq_key_mm); err3: remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); err2: @@ -1892,3 +1895,31 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; } + +void c4iw_drain_sq(struct ib_qp *ibqp) +{ + struct c4iw_qp *qp = to_c4iw_qp(ibqp); + unsigned long flag; + bool need_to_wait; + + spin_lock_irqsave(&qp->lock, flag); + need_to_wait = !t4_sq_empty(&qp->wq); + spin_unlock_irqrestore(&qp->lock, flag); + + if (need_to_wait) + wait_for_completion(&qp->sq_drained); +} + +void c4iw_drain_rq(struct ib_qp *ibqp) +{ + struct c4iw_qp *qp = to_c4iw_qp(ibqp); + unsigned long flag; + bool need_to_wait; + + spin_lock_irqsave(&qp->lock, flag); + need_to_wait = !t4_rq_empty(&qp->wq); + spin_unlock_irqrestore(&qp->lock, flag); + + if (need_to_wait) + wait_for_completion(&qp->rq_drained); +} diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 343e8daf2..1e2666979 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -753,103 +753,4 @@ struct fw_ri_wr { #define FW_RI_WR_P2PTYPE_G(x) \ (((x) >> FW_RI_WR_P2PTYPE_S) & FW_RI_WR_P2PTYPE_M) -struct tcp_options { - __be16 mss; - __u8 wsf; -#if defined(__LITTLE_ENDIAN_BITFIELD) - __u8:4; - __u8 unknown:1; - __u8:1; - __u8 sack:1; - __u8 tstamp:1; -#else - __u8 tstamp:1; - __u8 sack:1; - __u8:1; - __u8 unknown:1; - __u8:4; -#endif -}; - -struct cpl_pass_accept_req { - union opcode_tid ot; - __be16 rsvd; - __be16 len; - __be32 hdr_len; - __be16 vlan; - __be16 l2info; - __be32 tos_stid; - struct tcp_options tcpopt; -}; - -/* cpl_pass_accept_req.hdr_len fields */ -#define SYN_RX_CHAN_S 0 -#define SYN_RX_CHAN_M 0xF -#define SYN_RX_CHAN_V(x) ((x) << SYN_RX_CHAN_S) -#define SYN_RX_CHAN_G(x) (((x) >> SYN_RX_CHAN_S) & SYN_RX_CHAN_M) - -#define TCP_HDR_LEN_S 10 -#define TCP_HDR_LEN_M 0x3F -#define TCP_HDR_LEN_V(x) ((x) << TCP_HDR_LEN_S) -#define TCP_HDR_LEN_G(x) (((x) >> TCP_HDR_LEN_S) & TCP_HDR_LEN_M) - -#define IP_HDR_LEN_S 16 -#define IP_HDR_LEN_M 0x3FF -#define IP_HDR_LEN_V(x) ((x) << IP_HDR_LEN_S) -#define IP_HDR_LEN_G(x) (((x) >> IP_HDR_LEN_S) & IP_HDR_LEN_M) - -#define ETH_HDR_LEN_S 26 -#define ETH_HDR_LEN_M 0x1F -#define ETH_HDR_LEN_V(x) ((x) << ETH_HDR_LEN_S) -#define ETH_HDR_LEN_G(x) (((x) >> ETH_HDR_LEN_S) & ETH_HDR_LEN_M) - -/* cpl_pass_accept_req.l2info fields */ -#define SYN_MAC_IDX_S 0 -#define SYN_MAC_IDX_M 0x1FF -#define SYN_MAC_IDX_V(x) ((x) << SYN_MAC_IDX_S) -#define SYN_MAC_IDX_G(x) (((x) >> SYN_MAC_IDX_S) & SYN_MAC_IDX_M) - -#define SYN_XACT_MATCH_S 9 -#define SYN_XACT_MATCH_V(x) ((x) << SYN_XACT_MATCH_S) -#define SYN_XACT_MATCH_F SYN_XACT_MATCH_V(1U) - -#define SYN_INTF_S 12 -#define SYN_INTF_M 0xF -#define SYN_INTF_V(x) ((x) << SYN_INTF_S) -#define SYN_INTF_G(x) (((x) >> SYN_INTF_S) & SYN_INTF_M) - -struct ulptx_idata { - __be32 cmd_more; - __be32 len; -}; - -#define ULPTX_NSGE_S 0 -#define ULPTX_NSGE_M 0xFFFF -#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) - -#define RX_DACK_MODE_S 29 -#define RX_DACK_MODE_M 0x3 -#define RX_DACK_MODE_V(x) ((x) << RX_DACK_MODE_S) -#define RX_DACK_MODE_G(x) (((x) >> RX_DACK_MODE_S) & RX_DACK_MODE_M) - -#define RX_DACK_CHANGE_S 31 -#define RX_DACK_CHANGE_V(x) ((x) << RX_DACK_CHANGE_S) -#define RX_DACK_CHANGE_F RX_DACK_CHANGE_V(1U) - -enum { /* TCP congestion control algorithms */ - CONG_ALG_RENO, - CONG_ALG_TAHOE, - CONG_ALG_NEWRENO, - CONG_ALG_HIGHSPEED -}; - -#define CONG_CNTRL_S 14 -#define CONG_CNTRL_M 0x3 -#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S) -#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M) - -#define T5_ISS_S 18 -#define T5_ISS_V(x) ((x) << T5_ISS_S) -#define T5_ISS_F T5_ISS_V(1U) - #endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig new file mode 100644 index 000000000..6e7d27a14 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/Kconfig @@ -0,0 +1,7 @@ +config INFINIBAND_I40IW + tristate "Intel(R) Ethernet X722 iWARP Driver" + depends on INET && I40E + select GENERIC_ALLOCATOR + ---help--- + Intel(R) Ethernet X722 iWARP Driver + INET && I40IW && INFINIBAND && I40E diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile new file mode 100644 index 000000000..90068c03d --- /dev/null +++ b/drivers/infiniband/hw/i40iw/Makefile @@ -0,0 +1,9 @@ +ccflags-y := -Idrivers/net/ethernet/intel/i40e + +obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o + +i40iw-objs :=\ + i40iw_cm.o i40iw_ctrl.o \ + i40iw_hmc.o i40iw_hw.o i40iw_main.o \ + i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \ + i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h new file mode 100644 index 000000000..819767681 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -0,0 +1,570 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_IW_H +#define I40IW_IW_H +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/crc32c.h> +#include <rdma/ib_smi.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> +#include <rdma/rdma_cm.h> +#include <rdma/iw_cm.h> +#include <rdma/iw_portmap.h> +#include <rdma/rdma_netlink.h> +#include <crypto/hash.h> + +#include "i40iw_status.h" +#include "i40iw_osdep.h" +#include "i40iw_d.h" +#include "i40iw_hmc.h" + +#include <i40e_client.h> +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_ucontext.h" +#include "i40iw_pble.h" +#include "i40iw_verbs.h" +#include "i40iw_cm.h" +#include "i40iw_user.h" +#include "i40iw_puda.h" + +#define I40IW_FW_VERSION 2 +#define I40IW_HW_VERSION 2 + +#define I40IW_ARP_ADD 1 +#define I40IW_ARP_DELETE 2 +#define I40IW_ARP_RESOLVE 3 + +#define I40IW_MACIP_ADD 1 +#define I40IW_MACIP_DELETE 2 + +#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1) +#define IW_CEQ_SIZE 2048 +#define IW_AEQ_SIZE 2048 + +#define RX_BUF_SIZE (1536 + 8) +#define IW_REG0_SIZE (4 * 1024) +#define IW_TX_TIMEOUT (6 * HZ) +#define IW_FIRST_QPN 1 +#define IW_SW_CONTEXT_ALIGN 1024 + +#define MAX_DPC_ITERATIONS 128 + +#define I40IW_EVENT_TIMEOUT 100000 +#define I40IW_VCHNL_EVENT_TIMEOUT 100000 + +#define I40IW_NO_VLAN 0xffff +#define I40IW_NO_QSET 0xffff + +/* access to mcast filter list */ +#define IW_ADD_MCAST false +#define IW_DEL_MCAST true + +#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001 +#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002 +#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004 +#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008 +#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010 +#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020 +#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080 +#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100 +#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 +#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400 +#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800 + +#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) +#define IW_CFG_FPM_QP_COUNT 32768 + +#define I40IW_MTU_TO_MSS 40 +#define I40IW_DEFAULT_MSS 1460 + +struct i40iw_cqp_compl_info { + u32 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + bool error; + u8 op_code; +}; + +#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args) + +#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args) + +#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args) + +struct i40iw_cqp_request { + struct cqp_commands_info info; + wait_queue_head_t waitq; + struct list_head list; + atomic_t refcount; + void (*callback_fcn)(struct i40iw_cqp_request*, u32); + void *param; + struct i40iw_cqp_compl_info compl_info; + bool waiting; + bool request_done; + bool dynamic; +}; + +struct i40iw_cqp { + struct i40iw_sc_cqp sc_cqp; + spinlock_t req_lock; /*cqp request list */ + wait_queue_head_t waitq; + struct i40iw_dma_mem sq; + struct i40iw_dma_mem host_ctx; + u64 *scratch_array; + struct i40iw_cqp_request *cqp_requests; + struct list_head cqp_avail_reqs; + struct list_head cqp_pending_reqs; +}; + +struct i40iw_device; + +struct i40iw_ccq { + struct i40iw_sc_cq sc_cq; + spinlock_t lock; /* ccq control */ + wait_queue_head_t waitq; + struct i40iw_dma_mem mem_cq; + struct i40iw_dma_mem shadow_area; +}; + +struct i40iw_ceq { + struct i40iw_sc_ceq sc_ceq; + struct i40iw_dma_mem mem; + u32 irq; + u32 msix_idx; + struct i40iw_device *iwdev; + struct tasklet_struct dpc_tasklet; +}; + +struct i40iw_aeq { + struct i40iw_sc_aeq sc_aeq; + struct i40iw_dma_mem mem; +}; + +struct i40iw_arp_entry { + u32 ip_addr[4]; + u8 mac_addr[ETH_ALEN]; +}; + +enum init_completion_state { + INVALID_STATE = 0, + INITIAL_STATE, + CQP_CREATED, + HMC_OBJS_CREATED, + PBLE_CHUNK_MEM, + CCQ_CREATED, + AEQ_CREATED, + CEQ_CREATED, + ILQ_CREATED, + IEQ_CREATED, + INET_NOTIFIER, + IP_ADDR_REGISTERED, + RDMA_DEV_REGISTERED +}; + +struct i40iw_msix_vector { + u32 idx; + u32 irq; + u32 cpu_affinity; + u32 ceq_id; +}; + +#define I40IW_MSIX_TABLE_SIZE 65 + +struct virtchnl_work { + struct work_struct work; + union { + struct i40iw_cqp_request *cqp_request; + struct i40iw_virtchnl_work_info work_info; + }; +}; + +struct i40e_qvlist_info; + +struct i40iw_device { + struct i40iw_ib_device *iwibdev; + struct net_device *netdev; + wait_queue_head_t vchnl_waitq; + struct i40iw_sc_dev sc_dev; + struct i40iw_handler *hdl; + struct i40e_info *ldev; + struct i40e_client *client; + struct i40iw_hw hw; + struct i40iw_cm_core cm_core; + unsigned long *mem_resources; + unsigned long *allocated_qps; + unsigned long *allocated_cqs; + unsigned long *allocated_mrs; + unsigned long *allocated_pds; + unsigned long *allocated_arps; + struct i40iw_qp **qp_table; + bool msix_shared; + u32 msix_count; + struct i40iw_msix_vector *iw_msixtbl; + struct i40e_qvlist_info *iw_qvlist; + + struct i40iw_hmc_pble_rsrc *pble_rsrc; + struct i40iw_arp_entry *arp_table; + struct i40iw_cqp cqp; + struct i40iw_ccq ccq; + u32 ceqs_count; + struct i40iw_ceq *ceqlist; + struct i40iw_aeq aeq; + u32 arp_table_size; + u32 next_arp_index; + spinlock_t resource_lock; /* hw resource access */ + u32 vendor_id; + u32 vendor_part_id; + u32 of_device_registered; + + u32 device_cap_flags; + unsigned long db_start; + u8 resource_profile; + u8 max_rdma_vfs; + u8 max_enabled_vfs; + u8 max_sge; + u8 iw_status; + u8 send_term_ok; + bool push_mode; /* Initialized from parameter passed to driver */ + + /* x710 specific */ + struct mutex pbl_mutex; + struct tasklet_struct dpc_tasklet; + struct workqueue_struct *virtchnl_wq; + struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT]; + struct i40iw_dma_mem obj_mem; + struct i40iw_dma_mem obj_next; + u8 *hmc_info_mem; + u32 sd_type; + struct workqueue_struct *param_wq; + atomic_t params_busy; + u32 mss; + enum init_completion_state init_state; + u16 mac_ip_table_idx; + atomic_t vchnl_msgs; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 max_pd; + u32 next_qp; + u32 next_cq; + u32 next_pd; + u32 max_mr_size; + u32 max_qp_wr; + u32 max_cqe; + u32 mr_stagmask; + u32 mpa_version; + bool dcb; +}; + +struct i40iw_ib_device { + struct ib_device ibdev; + struct i40iw_device *iwdev; +}; + +struct i40iw_handler { + struct list_head list; + struct i40e_client *client; + struct i40iw_device device; + struct i40e_info ldev; +}; + +/** + * to_iwdev - get device + * @ibdev: ib device + **/ +static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev; +} + +/** + * to_ucontext - get user context + * @ibucontext: ib user context + **/ +static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct i40iw_ucontext, ibucontext); +} + +/** + * to_iwpd - get protection domain + * @ibpd: ib pd + **/ +static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct i40iw_pd, ibpd); +} + +/** + * to_iwmr - get device memory region + * @ibdev: ib memory region + **/ +static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct i40iw_mr, ibmr); +} + +/** + * to_iwmr_from_ibfmr - get device memory region + * @ibfmr: ib fmr + **/ +static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct i40iw_mr, ibfmr); +} + +/** + * to_iwmw - get device memory window + * @ibmw: ib memory window + **/ +static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct i40iw_mr, ibmw); +} + +/** + * to_iwcq - get completion queue + * @ibcq: ib cqdevice + **/ +static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct i40iw_cq, ibcq); +} + +/** + * to_iwqp - get device qp + * @ibqp: ib qp + **/ +static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct i40iw_qp, ibqp); +} + +/* i40iw.c */ +void i40iw_add_ref(struct ib_qp *); +void i40iw_rem_ref(struct ib_qp *); +struct ib_qp *i40iw_get_qp(struct ib_device *, int); + +void i40iw_flush_wqes(struct i40iw_device *iwdev, + struct i40iw_qp *qp); + +void i40iw_manage_arp_cache(struct i40iw_device *iwdev, + unsigned char *mac_addr, + __be32 *ip_addr, + bool ipv4, + u32 action); + +int i40iw_manage_apbvt(struct i40iw_device *iwdev, + u16 accel_local_port, + bool add_port); + +struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait); +void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request); +void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request); + +/** + * i40iw_alloc_resource - allocate a resource + * @iwdev: device pointer + * @resource_array: resource bit array: + * @max_resources: maximum resource number + * @req_resources_num: Allocated resource number + * @next: next free id + **/ +static inline int i40iw_alloc_resource(struct i40iw_device *iwdev, + unsigned long *resource_array, + u32 max_resources, + u32 *req_resource_num, + u32 *next) +{ + u32 resource_num; + unsigned long flags; + + spin_lock_irqsave(&iwdev->resource_lock, flags); + resource_num = find_next_zero_bit(resource_array, max_resources, *next); + if (resource_num >= max_resources) { + resource_num = find_first_zero_bit(resource_array, max_resources); + if (resource_num >= max_resources) { + spin_unlock_irqrestore(&iwdev->resource_lock, flags); + return -EOVERFLOW; + } + } + set_bit(resource_num, resource_array); + *next = resource_num + 1; + if (*next == max_resources) + *next = 0; + spin_unlock_irqrestore(&iwdev->resource_lock, flags); + *req_resource_num = resource_num; + + return 0; +} + +/** + * i40iw_is_resource_allocated - detrmine if resource is + * allocated + * @iwdev: device pointer + * @resource_array: resource array for the resource_num + * @resource_num: resource number to check + **/ +static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev, + unsigned long *resource_array, + u32 resource_num) +{ + bool bit_is_set; + unsigned long flags; + + spin_lock_irqsave(&iwdev->resource_lock, flags); + + bit_is_set = test_bit(resource_num, resource_array); + spin_unlock_irqrestore(&iwdev->resource_lock, flags); + + return bit_is_set; +} + +/** + * i40iw_free_resource - free a resource + * @iwdev: device pointer + * @resource_array: resource array for the resource_num + * @resource_num: resource number to free + **/ +static inline void i40iw_free_resource(struct i40iw_device *iwdev, + unsigned long *resource_array, + u32 resource_num) +{ + unsigned long flags; + + spin_lock_irqsave(&iwdev->resource_lock, flags); + clear_bit(resource_num, resource_array); + spin_unlock_irqrestore(&iwdev->resource_lock, flags); +} + +/** + * to_iwhdl - Get the handler from the device pointer + * @iwdev: device pointer + **/ +static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev) +{ + return container_of(iw_dev, struct i40iw_handler, device); +} + +struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev); + +/** + * iw_init_resources - + */ +u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev); + +int i40iw_register_rdma_device(struct i40iw_device *iwdev); +void i40iw_port_ibevent(struct i40iw_device *iwdev); +int i40iw_cm_disconn(struct i40iw_qp *); +void i40iw_cm_disconn_worker(void *); +int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *, + struct sk_buff *); + +enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev, + struct i40iw_cqp_request *cqp_request); +enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev, + u8 *mac_addr, u8 *mac_index); +int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); + +void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); +void i40iw_add_pdusecount(struct i40iw_pd *iwpd); +void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, + struct i40iw_modify_qp_info *info, bool wait); + +enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev, + struct i40iw_cm_info *cminfo, + enum i40iw_quad_entry_type etype, + enum i40iw_quad_hash_manage_type mtype, + void *cmnode, + bool wait); +void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf); +void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp); +void i40iw_free_qp_resources(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + u32 qp_num); +enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, + struct i40iw_dma_mem *memptr, + u32 size, u32 mask); + +void i40iw_request_reset(struct i40iw_device *iwdev); +void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev); +void i40iw_setup_cm_core(struct i40iw_device *iwdev); +void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core); +void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq); +void i40iw_process_aeq(struct i40iw_device *); +void i40iw_next_iw_state(struct i40iw_qp *iwqp, + u8 state, u8 del_hash, + u8 term, u8 term_len); +int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack); +struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core, + u16 rem_port, + u32 *rem_addr, + u16 loc_port, + u32 *loc_addr, + bool add_refcnt); + +enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev, + struct i40iw_sc_qp *qp, + struct i40iw_qp_flush_info *info, + bool wait); + +void i40iw_copy_ip_ntohl(u32 *dst, u32 *src); +struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd, + u64 addr, + u64 size, + int acc, + u64 *iova_start); + +int i40iw_inetaddr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); +int i40iw_inet6addr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); +int i40iw_net_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c new file mode 100644 index 000000000..38f917a6c --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -0,0 +1,4137 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/atomic.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/if_arp.h> +#include <linux/if_vlan.h> +#include <linux/notifier.h> +#include <linux/net.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/random.h> +#include <linux/list.h> +#include <linux/threads.h> +#include <linux/highmem.h> +#include <net/arp.h> +#include <net/ndisc.h> +#include <net/neighbour.h> +#include <net/route.h> +#include <net/addrconf.h> +#include <net/ip6_route.h> +#include <net/ip_fib.h> +#include <net/tcp.h> +#include <asm/checksum.h> + +#include "i40iw.h" + +static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *); +static void i40iw_cm_post_event(struct i40iw_cm_event *event); +static void i40iw_disconnect_worker(struct work_struct *work); + +/** + * i40iw_free_sqbuf - put back puda buffer if refcount = 0 + * @dev: FPK device + * @buf: puda buffer to free + */ +void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp) +{ + struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp; + struct i40iw_puda_rsrc *ilq = dev->ilq; + + if (!atomic_dec_return(&buf->refcount)) + i40iw_puda_ret_bufpool(ilq, buf); +} + +/** + * i40iw_derive_hw_ird_setting - Calculate IRD + * + * @cm_ird: IRD of connection's node + * + * The ird from the connection is rounded to a supported HW + * setting (2,8,32,64) and then encoded for ird_size field of + * qp_ctx + */ +static u8 i40iw_derive_hw_ird_setting(u16 cm_ird) +{ + u8 encoded_ird_size; + u8 pof2_cm_ird = 1; + + /* round-off to next powerof2 */ + while (pof2_cm_ird < cm_ird) + pof2_cm_ird *= 2; + + /* ird_size field is encoded in qp_ctx */ + switch (pof2_cm_ird) { + case I40IW_HW_IRD_SETTING_64: + encoded_ird_size = 3; + break; + case I40IW_HW_IRD_SETTING_32: + case I40IW_HW_IRD_SETTING_16: + encoded_ird_size = 2; + break; + case I40IW_HW_IRD_SETTING_8: + case I40IW_HW_IRD_SETTING_4: + encoded_ird_size = 1; + break; + case I40IW_HW_IRD_SETTING_2: + default: + encoded_ird_size = 0; + break; + } + return encoded_ird_size; +} + +/** + * i40iw_record_ird_ord - Record IRD/ORD passed in + * @cm_node: connection's node + * @conn_ird: connection IRD + * @conn_ord: connection ORD + */ +static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord) +{ + if (conn_ird > I40IW_MAX_IRD_SIZE) + conn_ird = I40IW_MAX_IRD_SIZE; + + if (conn_ord > I40IW_MAX_ORD_SIZE) + conn_ord = I40IW_MAX_ORD_SIZE; + + cm_node->ird_size = conn_ird; + cm_node->ord_size = conn_ord; +} + +/** + * i40iw_copy_ip_ntohl - change network to host ip + * @dst: host ip + * @src: big endian + */ +void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src) +{ + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst = ntohl(*src); +} + +/** + * i40iw_copy_ip_htonl - change host addr to network ip + * @dst: host ip + * @src: little endian + */ +static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src) +{ + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst = htonl(*src); +} + +/** + * i40iw_fill_sockaddr4 - get addr info for passive connection + * @cm_node: connection's node + * @event: upper layer's cm event + */ +static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node, + struct iw_cm_event *event) +{ + struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; + + laddr->sin_family = AF_INET; + raddr->sin_family = AF_INET; + + laddr->sin_port = htons(cm_node->loc_port); + raddr->sin_port = htons(cm_node->rem_port); + + laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]); + raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]); +} + +/** + * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side + * @cm_node: connection's node + * @event: upper layer's cm event + */ +static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node, + struct iw_cm_event *event) +{ + struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; + struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr; + + laddr6->sin6_family = AF_INET6; + raddr6->sin6_family = AF_INET6; + + laddr6->sin6_port = htons(cm_node->loc_port); + raddr6->sin6_port = htons(cm_node->rem_port); + + i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32, + cm_node->loc_addr); + i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32, + cm_node->rem_addr); +} + +/** + * i40iw_get_addr_info + * @cm_node: contains ip/tcp info + * @cm_info: to get a copy of the cm_node ip/tcp info +*/ +static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node, + struct i40iw_cm_info *cm_info) +{ + cm_info->ipv4 = cm_node->ipv4; + cm_info->vlan_id = cm_node->vlan_id; + memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr)); + memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr)); + cm_info->loc_port = cm_node->loc_port; + cm_info->rem_port = cm_node->rem_port; +} + +/** + * i40iw_get_cmevent_info - for cm event upcall + * @cm_node: connection's node + * @cm_id: upper layers cm struct for the event + * @event: upper layer's cm event + */ +static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node, + struct iw_cm_id *cm_id, + struct iw_cm_event *event) +{ + memcpy(&event->local_addr, &cm_id->m_local_addr, + sizeof(event->local_addr)); + memcpy(&event->remote_addr, &cm_id->m_remote_addr, + sizeof(event->remote_addr)); + if (cm_node) { + event->private_data = (void *)cm_node->pdata_buf; + event->private_data_len = (u8)cm_node->pdata.size; + event->ird = cm_node->ird_size; + event->ord = cm_node->ord_size; + } +} + +/** + * i40iw_send_cm_event - upcall cm's event handler + * @cm_node: connection's node + * @cm_id: upper layer's cm info struct + * @type: Event type to indicate + * @status: status for the event type + */ +static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node, + struct iw_cm_id *cm_id, + enum iw_cm_event_type type, + int status) +{ + struct iw_cm_event event; + + memset(&event, 0, sizeof(event)); + event.event = type; + event.status = status; + switch (type) { + case IW_CM_EVENT_CONNECT_REQUEST: + if (cm_node->ipv4) + i40iw_fill_sockaddr4(cm_node, &event); + else + i40iw_fill_sockaddr6(cm_node, &event); + event.provider_data = (void *)cm_node; + event.private_data = (void *)cm_node->pdata_buf; + event.private_data_len = (u8)cm_node->pdata.size; + break; + case IW_CM_EVENT_CONNECT_REPLY: + i40iw_get_cmevent_info(cm_node, cm_id, &event); + break; + case IW_CM_EVENT_ESTABLISHED: + event.ird = cm_node->ird_size; + event.ord = cm_node->ord_size; + break; + case IW_CM_EVENT_DISCONNECT: + break; + case IW_CM_EVENT_CLOSE: + break; + default: + i40iw_pr_err("event type received type = %d\n", type); + return -1; + } + return cm_id->event_handler(cm_id, &event); +} + +/** + * i40iw_create_event - create cm event + * @cm_node: connection's node + * @type: Event type to generate + */ +static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node, + enum i40iw_cm_event_type type) +{ + struct i40iw_cm_event *event; + + if (!cm_node->cm_id) + return NULL; + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + + if (!event) + return NULL; + + event->type = type; + event->cm_node = cm_node; + memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr)); + memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr)); + event->cm_info.rem_port = cm_node->rem_port; + event->cm_info.loc_port = cm_node->loc_port; + event->cm_info.cm_id = cm_node->cm_id; + + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "node=%p event=%p type=%u dst=%pI4 src=%pI4\n", + cm_node, + event, + type, + event->cm_info.loc_addr, + event->cm_info.rem_addr); + + i40iw_cm_post_event(event); + return event; +} + +/** + * i40iw_free_retrans_entry - free send entry + * @cm_node: connection's node + */ +static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node) +{ + struct i40iw_sc_dev *dev = cm_node->dev; + struct i40iw_timer_entry *send_entry; + + send_entry = cm_node->send_entry; + if (send_entry) { + cm_node->send_entry = NULL; + i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf); + kfree(send_entry); + atomic_dec(&cm_node->ref_count); + } +} + +/** + * i40iw_cleanup_retrans_entry - free send entry with lock + * @cm_node: connection's node + */ +static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node) +{ + unsigned long flags; + + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + i40iw_free_retrans_entry(cm_node); + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); +} + +static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node) +{ + if ((cm_node->rem_mac[0] == 0x0) && + (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) || + ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43))))) + return true; + return false; +} + +/** + * i40iw_form_cm_frame - get a free packet and build frame + * @cm_node: connection's node ionfo to use in frame + * @options: pointer to options info + * @hdr: pointer mpa header + * @pdata: pointer to private data + * @flags: indicates FIN or ACK + */ +static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node, + struct i40iw_kmem_info *options, + struct i40iw_kmem_info *hdr, + struct i40iw_kmem_info *pdata, + u8 flags) +{ + struct i40iw_puda_buf *sqbuf; + struct i40iw_sc_dev *dev = cm_node->dev; + u8 *buf; + + struct tcphdr *tcph; + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct ethhdr *ethh; + u16 packetsize; + u16 eth_hlen = ETH_HLEN; + u32 opts_len = 0; + u32 pd_len = 0; + u32 hdr_len = 0; + + sqbuf = i40iw_puda_get_bufpool(dev->ilq); + if (!sqbuf) + return NULL; + buf = sqbuf->mem.va; + + if (options) + opts_len = (u32)options->size; + + if (hdr) + hdr_len = hdr->size; + + if (pdata) { + pd_len = pdata->size; + if (!is_remote_ne020_or_chelsio(cm_node)) + pd_len += MPA_ZERO_PAD_LEN; + } + + if (cm_node->vlan_id < VLAN_TAG_PRESENT) + eth_hlen += 4; + + if (cm_node->ipv4) + packetsize = sizeof(*iph) + sizeof(*tcph); + else + packetsize = sizeof(*ip6h) + sizeof(*tcph); + packetsize += opts_len + hdr_len + pd_len; + + memset(buf, 0x00, eth_hlen + packetsize); + + sqbuf->totallen = packetsize + eth_hlen; + sqbuf->maclen = eth_hlen; + sqbuf->tcphlen = sizeof(*tcph) + opts_len; + sqbuf->scratch = (void *)cm_node; + + ethh = (struct ethhdr *)buf; + buf += eth_hlen; + + if (cm_node->ipv4) { + sqbuf->ipv4 = true; + + iph = (struct iphdr *)buf; + buf += sizeof(*iph); + tcph = (struct tcphdr *)buf; + buf += sizeof(*tcph); + + ether_addr_copy(ethh->h_dest, cm_node->rem_mac); + ether_addr_copy(ethh->h_source, cm_node->loc_mac); + if (cm_node->vlan_id < VLAN_TAG_PRESENT) { + ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); + ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id); + + ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP); + } else { + ethh->h_proto = htons(ETH_P_IP); + } + + iph->version = IPVERSION; + iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ + iph->tos = 0; + iph->tot_len = htons(packetsize); + iph->id = htons(++cm_node->tcp_cntxt.loc_id); + + iph->frag_off = htons(0x4000); + iph->ttl = 0x40; + iph->protocol = IPPROTO_TCP; + iph->saddr = htonl(cm_node->loc_addr[0]); + iph->daddr = htonl(cm_node->rem_addr[0]); + } else { + sqbuf->ipv4 = false; + ip6h = (struct ipv6hdr *)buf; + buf += sizeof(*ip6h); + tcph = (struct tcphdr *)buf; + buf += sizeof(*tcph); + + ether_addr_copy(ethh->h_dest, cm_node->rem_mac); + ether_addr_copy(ethh->h_source, cm_node->loc_mac); + if (cm_node->vlan_id < VLAN_TAG_PRESENT) { + ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); + ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id); + ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6); + } else { + ethh->h_proto = htons(ETH_P_IPV6); + } + ip6h->version = 6; + ip6h->flow_lbl[0] = 0; + ip6h->flow_lbl[1] = 0; + ip6h->flow_lbl[2] = 0; + ip6h->payload_len = htons(packetsize - sizeof(*ip6h)); + ip6h->nexthdr = 6; + ip6h->hop_limit = 128; + i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32, + cm_node->loc_addr); + i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32, + cm_node->rem_addr); + } + + tcph->source = htons(cm_node->loc_port); + tcph->dest = htons(cm_node->rem_port); + + tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); + + if (flags & SET_ACK) { + cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; + tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); + tcph->ack = 1; + } else { + tcph->ack_seq = 0; + } + + if (flags & SET_SYN) { + cm_node->tcp_cntxt.loc_seq_num++; + tcph->syn = 1; + } else { + cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len; + } + + if (flags & SET_FIN) { + cm_node->tcp_cntxt.loc_seq_num++; + tcph->fin = 1; + } + + if (flags & SET_RST) + tcph->rst = 1; + + tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); + sqbuf->tcphlen = tcph->doff << 2; + tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); + tcph->urg_ptr = 0; + + if (opts_len) { + memcpy(buf, options->addr, opts_len); + buf += opts_len; + } + + if (hdr_len) { + memcpy(buf, hdr->addr, hdr_len); + buf += hdr_len; + } + + if (pd_len) + memcpy(buf, pdata->addr, pd_len); + + atomic_set(&sqbuf->refcount, 1); + + return sqbuf; +} + +/** + * i40iw_send_reset - Send RST packet + * @cm_node: connection's node + */ +static int i40iw_send_reset(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + int flags = SET_RST | SET_ACK; + + sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -1; + } + + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1); +} + +/** + * i40iw_active_open_err - send event for active side cm error + * @cm_node: connection's node + * @reset: Flag to send reset or not + */ +static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset) +{ + i40iw_cleanup_retrans_entry(cm_node); + cm_node->cm_core->stats_connect_errs++; + if (reset) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s cm_node=%p state=%d\n", + __func__, + cm_node, + cm_node->state); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + } + + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); +} + +/** + * i40iw_passive_open_err - handle passive side cm error + * @cm_node: connection's node + * @reset: send reset or just free cm_node + */ +static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset) +{ + i40iw_cleanup_retrans_entry(cm_node); + cm_node->cm_core->stats_passive_errs++; + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s cm_node=%p state =%d\n", + __func__, + cm_node, + cm_node->state); + if (reset) + i40iw_send_reset(cm_node); + else + i40iw_rem_ref_cm_node(cm_node); +} + +/** + * i40iw_event_connect_error - to create connect error event + * @event: cm information for connect event + */ +static void i40iw_event_connect_error(struct i40iw_cm_event *event) +{ + struct i40iw_qp *iwqp; + struct iw_cm_id *cm_id; + + cm_id = event->cm_node->cm_id; + if (!cm_id) + return; + + iwqp = cm_id->provider_data; + + if (!iwqp || !iwqp->iwdev) + return; + + iwqp->cm_id = NULL; + cm_id->provider_data = NULL; + i40iw_send_cm_event(event->cm_node, cm_id, + IW_CM_EVENT_CONNECT_REPLY, + -ECONNRESET); + cm_id->rem_ref(cm_id); + i40iw_rem_ref_cm_node(event->cm_node); +} + +/** + * i40iw_process_options + * @cm_node: connection's node + * @optionsloc: point to start of options + * @optionsize: size of all options + * @syn_packet: flag if syn packet + */ +static int i40iw_process_options(struct i40iw_cm_node *cm_node, + u8 *optionsloc, + u32 optionsize, + u32 syn_packet) +{ + u32 tmp; + u32 offset = 0; + union all_known_options *all_options; + char got_mss_option = 0; + + while (offset < optionsize) { + all_options = (union all_known_options *)(optionsloc + offset); + switch (all_options->as_base.optionnum) { + case OPTION_NUMBER_END: + offset = optionsize; + break; + case OPTION_NUMBER_NONE: + offset += 1; + continue; + case OPTION_NUMBER_MSS: + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s: MSS Length: %d Offset: %d Size: %d\n", + __func__, + all_options->as_mss.length, + offset, + optionsize); + got_mss_option = 1; + if (all_options->as_mss.length != 4) + return -1; + tmp = ntohs(all_options->as_mss.mss); + if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss) + cm_node->tcp_cntxt.mss = tmp; + break; + case OPTION_NUMBER_WINDOW_SCALE: + cm_node->tcp_cntxt.snd_wscale = + all_options->as_windowscale.shiftcount; + break; + default: + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "TCP Option not understood: %x\n", + all_options->as_base.optionnum); + break; + } + offset += all_options->as_base.length; + } + if (!got_mss_option && syn_packet) + cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS; + return 0; +} + +/** + * i40iw_handle_tcp_options - + * @cm_node: connection's node + * @tcph: pointer tcp header + * @optionsize: size of options rcvd + * @passive: active or passive flag + */ +static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node, + struct tcphdr *tcph, + int optionsize, + int passive) +{ + u8 *optionsloc = (u8 *)&tcph[1]; + + if (optionsize) { + if (i40iw_process_options(cm_node, + optionsloc, + optionsize, + (u32)tcph->syn)) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s: Node %p, Sending RESET\n", + __func__, + cm_node); + if (passive) + i40iw_passive_open_err(cm_node, true); + else + i40iw_active_open_err(cm_node, true); + return -1; + } + } + + cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << + cm_node->tcp_cntxt.snd_wscale; + + if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) + cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; + return 0; +} + +/** + * i40iw_build_mpa_v1 - build a MPA V1 frame + * @cm_node: connection's node + * @mpa_key: to do read0 or write0 + */ +static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node, + void *start_addr, + u8 mpa_key) +{ + struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr; + + switch (mpa_key) { + case MPA_KEY_REQUEST: + memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); + break; + case MPA_KEY_REPLY: + memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); + break; + default: + break; + } + mpa_frame->flags = IETF_MPA_FLAGS_CRC; + mpa_frame->rev = cm_node->mpa_frame_rev; + mpa_frame->priv_data_len = htons(cm_node->pdata.size); +} + +/** + * i40iw_build_mpa_v2 - build a MPA V2 frame + * @cm_node: connection's node + * @start_addr: buffer start address + * @mpa_key: to do read0 or write0 + */ +static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, + void *start_addr, + u8 mpa_key) +{ + struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; + struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; + + /* initialize the upper 5 bytes of the frame */ + i40iw_build_mpa_v1(cm_node, start_addr, mpa_key); + mpa_frame->flags |= IETF_MPA_V2_FLAG; + mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); + + /* initialize RTR msg */ + if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { + rtr_msg->ctrl_ird = IETF_NO_IRD_ORD; + rtr_msg->ctrl_ord = IETF_NO_IRD_ORD; + } else { + rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? + IETF_NO_IRD_ORD : cm_node->ird_size; + rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? + IETF_NO_IRD_ORD : cm_node->ord_size; + } + + rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; + rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; + + switch (mpa_key) { + case MPA_KEY_REQUEST: + rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; + rtr_msg->ctrl_ord |= IETF_RDMA0_READ; + break; + case MPA_KEY_REPLY: + switch (cm_node->send_rdma0_op) { + case SEND_RDMA_WRITE_ZERO: + rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; + break; + case SEND_RDMA_READ_ZERO: + rtr_msg->ctrl_ord |= IETF_RDMA0_READ; + break; + } + break; + default: + break; + } + rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); + rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); +} + +/** + * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2 + * @cm_node: connection's node + * @mpa: mpa: data buffer + * @mpa_key: to do read0 or write0 + */ +static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node, + struct i40iw_kmem_info *mpa, + u8 mpa_key) +{ + int hdr_len = 0; + + switch (cm_node->mpa_frame_rev) { + case IETF_MPA_V1: + hdr_len = sizeof(struct ietf_mpa_v1); + i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key); + break; + case IETF_MPA_V2: + hdr_len = sizeof(struct ietf_mpa_v2); + i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key); + break; + default: + break; + } + + return hdr_len; +} + +/** + * i40iw_send_mpa_request - active node send mpa request to passive node + * @cm_node: connection's node + */ +static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + + if (!cm_node) { + i40iw_pr_err("cm_node == NULL\n"); + return -1; + } + + cm_node->mpa_hdr.addr = &cm_node->mpa_frame; + cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node, + &cm_node->mpa_hdr, + MPA_KEY_REQUEST); + if (!cm_node->mpa_hdr.size) { + i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size); + return -1; + } + + sqbuf = i40iw_form_cm_frame(cm_node, + NULL, + &cm_node->mpa_hdr, + &cm_node->pdata, + SET_ACK); + if (!sqbuf) { + i40iw_pr_err("sq_buf == NULL\n"); + return -1; + } + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * i40iw_send_mpa_reject - + * @cm_node: connection's node + * @pdata: reject data for connection + * @plen: length of reject data + */ +static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node, + const void *pdata, + u8 plen) +{ + struct i40iw_puda_buf *sqbuf; + struct i40iw_kmem_info priv_info; + + cm_node->mpa_hdr.addr = &cm_node->mpa_frame; + cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node, + &cm_node->mpa_hdr, + MPA_KEY_REPLY); + + cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT; + priv_info.addr = (void *)pdata; + priv_info.size = plen; + + sqbuf = i40iw_form_cm_frame(cm_node, + NULL, + &cm_node->mpa_hdr, + &priv_info, + SET_ACK | SET_FIN); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -ENOMEM; + } + cm_node->state = I40IW_CM_STATE_FIN_WAIT1; + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * recv_mpa - process an IETF MPA frame + * @cm_node: connection's node + * @buffer: Data pointer + * @type: to return accept or reject + * @len: Len of mpa buffer + */ +static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len) +{ + struct ietf_mpa_v1 *mpa_frame; + struct ietf_mpa_v2 *mpa_v2_frame; + struct ietf_rtr_msg *rtr_msg; + int mpa_hdr_len; + int priv_data_len; + + *type = I40IW_MPA_REQUEST_ACCEPT; + + if (len < sizeof(struct ietf_mpa_v1)) { + i40iw_pr_err("ietf buffer small (%x)\n", len); + return -1; + } + + mpa_frame = (struct ietf_mpa_v1 *)buffer; + mpa_hdr_len = sizeof(struct ietf_mpa_v1); + priv_data_len = ntohs(mpa_frame->priv_data_len); + + if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) { + i40iw_pr_err("large pri_data %d\n", priv_data_len); + return -1; + } + if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) { + i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev); + return -1; + } + if (mpa_frame->rev > cm_node->mpa_frame_rev) { + i40iw_pr_err("rev %d\n", mpa_frame->rev); + return -1; + } + cm_node->mpa_frame_rev = mpa_frame->rev; + + if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) { + if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { + i40iw_pr_err("Unexpected MPA Key received\n"); + return -1; + } + } else { + if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) { + i40iw_pr_err("Unexpected MPA Key received\n"); + return -1; + } + } + + if (priv_data_len + mpa_hdr_len > len) { + i40iw_pr_err("ietf buffer len(%x + %x != %x)\n", + priv_data_len, mpa_hdr_len, len); + return -1; + } + if (len > MAX_CM_BUFFER) { + i40iw_pr_err("ietf buffer large len = %d\n", len); + return -1; + } + + switch (mpa_frame->rev) { + case IETF_MPA_V2:{ + u16 ird_size; + u16 ord_size; + u16 ctrl_ord; + u16 ctrl_ird; + + mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; + mpa_hdr_len += IETF_RTR_MSG_SIZE; + rtr_msg = &mpa_v2_frame->rtr_msg; + + /* parse rtr message */ + ctrl_ord = ntohs(rtr_msg->ctrl_ord); + ctrl_ird = ntohs(rtr_msg->ctrl_ird); + ird_size = ctrl_ird & IETF_NO_IRD_ORD; + ord_size = ctrl_ord & IETF_NO_IRD_ORD; + + if (!(ctrl_ird & IETF_PEER_TO_PEER)) + return -1; + + if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) { + cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD; + goto negotiate_done; + } + + if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) { + /* responder */ + if (!ord_size && (ctrl_ord & IETF_RDMA0_READ)) + cm_node->ird_size = 1; + if (cm_node->ord_size > ird_size) + cm_node->ord_size = ird_size; + } else { + /* initiator */ + if (!ird_size && (ctrl_ord & IETF_RDMA0_READ)) + return -1; + if (cm_node->ord_size > ird_size) + cm_node->ord_size = ird_size; + + if (cm_node->ird_size < ord_size) + /* no resources available */ + return -1; + } + +negotiate_done: + if (ctrl_ord & IETF_RDMA0_READ) + cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; + else if (ctrl_ord & IETF_RDMA0_WRITE) + cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; + else /* Not supported RDMA0 operation */ + return -1; + i40iw_debug(cm_node->dev, I40IW_DEBUG_CM, + "MPAV2: Negotiated ORD: %d, IRD: %d\n", + cm_node->ord_size, cm_node->ird_size); + break; + } + break; + case IETF_MPA_V1: + default: + break; + } + + memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len); + cm_node->pdata.size = priv_data_len; + + if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) + *type = I40IW_MPA_REQUEST_REJECT; + + if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS) + cm_node->snd_mark_en = true; + + return 0; +} + +/** + * i40iw_schedule_cm_timer + * @@cm_node: connection's node + * @sqbuf: buffer to send + * @type: if it es send ot close + * @send_retrans: if rexmits to be done + * @close_when_complete: is cm_node to be removed + * + * note - cm_node needs to be protected before calling this. Encase in: + * i40iw_rem_ref_cm_node(cm_core, cm_node); + * i40iw_schedule_cm_timer(...) + * atomic_inc(&cm_node->ref_count); + */ +int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *sqbuf, + enum i40iw_timer_type type, + int send_retrans, + int close_when_complete) +{ + struct i40iw_sc_dev *dev = cm_node->dev; + struct i40iw_cm_core *cm_core = cm_node->cm_core; + struct i40iw_timer_entry *new_send; + int ret = 0; + u32 was_timer_set; + unsigned long flags; + + new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); + if (!new_send) { + i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf); + return -ENOMEM; + } + new_send->retrycount = I40IW_DEFAULT_RETRYS; + new_send->retranscount = I40IW_DEFAULT_RETRANS; + new_send->sqbuf = sqbuf; + new_send->timetosend = jiffies; + new_send->type = type; + new_send->send_retrans = send_retrans; + new_send->close_when_complete = close_when_complete; + + if (type == I40IW_TIMER_TYPE_CLOSE) { + new_send->timetosend += (HZ / 10); + if (cm_node->close_entry) { + kfree(new_send); + i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf); + i40iw_pr_err("already close entry\n"); + return -EINVAL; + } + cm_node->close_entry = new_send; + } + + if (type == I40IW_TIMER_TYPE_SEND) { + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + cm_node->send_entry = new_send; + atomic_inc(&cm_node->ref_count); + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT; + + atomic_inc(&sqbuf->refcount); + i40iw_puda_send_buf(dev->ilq, sqbuf); + if (!send_retrans) { + i40iw_cleanup_retrans_entry(cm_node); + if (close_when_complete) + i40iw_rem_ref_cm_node(cm_node); + return ret; + } + } + + spin_lock_irqsave(&cm_core->ht_lock, flags); + was_timer_set = timer_pending(&cm_core->tcp_timer); + + if (!was_timer_set) { + cm_core->tcp_timer.expires = new_send->timetosend; + add_timer(&cm_core->tcp_timer); + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + return ret; +} + +/** + * i40iw_retrans_expired - Could not rexmit the packet + * @cm_node: connection's node + */ +static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node) +{ + struct iw_cm_id *cm_id = cm_node->cm_id; + enum i40iw_cm_node_state state = cm_node->state; + + cm_node->state = I40IW_CM_STATE_CLOSED; + switch (state) { + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_CLOSING: + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_LAST_ACK: + if (cm_node->cm_id) + cm_id->rem_ref(cm_id); + i40iw_send_reset(cm_node); + break; + default: + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); + break; + } +} + +/** + * i40iw_handle_close_entry - for handling retry/timeouts + * @cm_node: connection's node + * @rem_node: flag for remove cm_node + */ +static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node) +{ + struct i40iw_timer_entry *close_entry = cm_node->close_entry; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct i40iw_qp *iwqp; + unsigned long flags; + + if (!close_entry) + return; + iwqp = (struct i40iw_qp *)close_entry->sqbuf; + if (iwqp) { + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->cm_id) { + iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; + iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR; + iwqp->last_aeq = I40IW_AE_RESET_SENT; + iwqp->ibqp_state = IB_QPS_ERR; + spin_unlock_irqrestore(&iwqp->lock, flags); + i40iw_cm_disconn(iwqp); + } else { + spin_unlock_irqrestore(&iwqp->lock, flags); + } + } else if (rem_node) { + /* TIME_WAIT state */ + i40iw_rem_ref_cm_node(cm_node); + } + if (cm_id) + cm_id->rem_ref(cm_id); + kfree(close_entry); + cm_node->close_entry = NULL; +} + +/** + * i40iw_cm_timer_tick - system's timer expired callback + * @pass: Pointing to cm_core + */ +static void i40iw_cm_timer_tick(unsigned long pass) +{ + unsigned long nexttimeout = jiffies + I40IW_LONG_TIME; + struct i40iw_cm_node *cm_node; + struct i40iw_timer_entry *send_entry, *close_entry; + struct list_head *list_core_temp; + struct list_head *list_node; + struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass; + u32 settimer = 0; + unsigned long timetosend; + struct i40iw_sc_dev *dev; + unsigned long flags; + + struct list_head timer_list; + + INIT_LIST_HEAD(&timer_list); + spin_lock_irqsave(&cm_core->ht_lock, flags); + + list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) { + cm_node = container_of(list_node, struct i40iw_cm_node, list); + if (cm_node->close_entry || cm_node->send_entry) { + atomic_inc(&cm_node->ref_count); + list_add(&cm_node->timer_entry, &timer_list); + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + list_for_each_safe(list_node, list_core_temp, &timer_list) { + cm_node = container_of(list_node, + struct i40iw_cm_node, + timer_entry); + close_entry = cm_node->close_entry; + + if (close_entry) { + if (time_after(close_entry->timetosend, jiffies)) { + if (nexttimeout > close_entry->timetosend || + !settimer) { + nexttimeout = close_entry->timetosend; + settimer = 1; + } + } else { + i40iw_handle_close_entry(cm_node, 1); + } + } + + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + + send_entry = cm_node->send_entry; + if (!send_entry) + goto done; + if (time_after(send_entry->timetosend, jiffies)) { + if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { + if ((nexttimeout > send_entry->timetosend) || + !settimer) { + nexttimeout = send_entry->timetosend; + settimer = 1; + } + } else { + i40iw_free_retrans_entry(cm_node); + } + goto done; + } + + if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) || + (cm_node->state == I40IW_CM_STATE_CLOSED)) { + i40iw_free_retrans_entry(cm_node); + goto done; + } + + if (!send_entry->retranscount || !send_entry->retrycount) { + i40iw_free_retrans_entry(cm_node); + + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + i40iw_retrans_expired(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + goto done; + } + cm_node->cm_core->stats_pkt_retrans++; + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + + dev = cm_node->dev; + atomic_inc(&send_entry->sqbuf->refcount); + i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf); + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + if (send_entry->send_retrans) { + send_entry->retranscount--; + timetosend = (I40IW_RETRY_TIMEOUT << + (I40IW_DEFAULT_RETRANS - + send_entry->retranscount)); + + send_entry->timetosend = jiffies + + min(timetosend, I40IW_MAX_TIMEOUT); + if (nexttimeout > send_entry->timetosend || !settimer) { + nexttimeout = send_entry->timetosend; + settimer = 1; + } + } else { + int close_when_complete; + + close_when_complete = send_entry->close_when_complete; + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "cm_node=%p state=%d\n", + cm_node, + cm_node->state); + i40iw_free_retrans_entry(cm_node); + if (close_when_complete) + i40iw_rem_ref_cm_node(cm_node); + } +done: + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + i40iw_rem_ref_cm_node(cm_node); + } + + if (settimer) { + spin_lock_irqsave(&cm_core->ht_lock, flags); + if (!timer_pending(&cm_core->tcp_timer)) { + cm_core->tcp_timer.expires = nexttimeout; + add_timer(&cm_core->tcp_timer); + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } +} + +/** + * i40iw_send_syn - send SYN packet + * @cm_node: connection's node + * @sendack: flag to set ACK bit or not + */ +int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack) +{ + struct i40iw_puda_buf *sqbuf; + int flags = SET_SYN; + char optionsbuffer[sizeof(struct option_mss) + + sizeof(struct option_windowscale) + + sizeof(struct option_base) + TCP_OPTIONS_PADDING]; + struct i40iw_kmem_info opts; + + int optionssize = 0; + /* Sending MSS option */ + union all_known_options *options; + + opts.addr = optionsbuffer; + if (!cm_node) { + i40iw_pr_err("no cm_node\n"); + return -EINVAL; + } + + options = (union all_known_options *)&optionsbuffer[optionssize]; + options->as_mss.optionnum = OPTION_NUMBER_MSS; + options->as_mss.length = sizeof(struct option_mss); + options->as_mss.mss = htons(cm_node->tcp_cntxt.mss); + optionssize += sizeof(struct option_mss); + + options = (union all_known_options *)&optionsbuffer[optionssize]; + options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE; + options->as_windowscale.length = sizeof(struct option_windowscale); + options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; + optionssize += sizeof(struct option_windowscale); + options = (union all_known_options *)&optionsbuffer[optionssize]; + options->as_end = OPTION_NUMBER_END; + optionssize += 1; + + if (sendack) + flags |= SET_ACK; + + opts.size = optionssize; + + sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -1; + } + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * i40iw_send_ack - Send ACK packet + * @cm_node: connection's node + */ +static void i40iw_send_ack(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + + sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK); + if (sqbuf) + i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf); + else + i40iw_pr_err("no sqbuf\n"); +} + +/** + * i40iw_send_fin - Send FIN pkt + * @cm_node: connection's node + */ +static int i40iw_send_fin(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + + sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -1; + } + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * i40iw_find_node - find a cm node that matches the reference cm node + * @cm_core: cm's core + * @rem_port: remote tcp port num + * @rem_addr: remote ip addr + * @loc_port: local tcp port num + * @loc_addr: loc ip addr + * @add_refcnt: flag to increment refcount of cm_node + */ +struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core, + u16 rem_port, + u32 *rem_addr, + u16 loc_port, + u32 *loc_addr, + bool add_refcnt) +{ + struct list_head *hte; + struct i40iw_cm_node *cm_node; + unsigned long flags; + + hte = &cm_core->connected_nodes; + + /* walk list and find cm_node associated with this session ID */ + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_entry(cm_node, hte, list) { + if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) && + (cm_node->loc_port == loc_port) && + !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) && + (cm_node->rem_port == rem_port)) { + if (add_refcnt) + atomic_inc(&cm_node->ref_count); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + return cm_node; + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + /* no owner node */ + return NULL; +} + +/** + * i40iw_find_listener - find a cm node listening on this addr-port pair + * @cm_core: cm's core + * @dst_port: listener tcp port num + * @dst_addr: listener ip addr + * @listener_state: state to match with listen node's + */ +static struct i40iw_cm_listener *i40iw_find_listener( + struct i40iw_cm_core *cm_core, + u32 *dst_addr, + u16 dst_port, + u16 vlan_id, + enum i40iw_cm_listener_state + listener_state) +{ + struct i40iw_cm_listener *listen_node; + static const u32 ip_zero[4] = { 0, 0, 0, 0 }; + u32 listen_addr[4]; + u16 listen_port; + unsigned long flags; + + /* walk list and find cm_node associated with this session ID */ + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr)); + listen_port = listen_node->loc_port; + /* compare node pair, return node handle if a match */ + if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) || + !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) && + (listen_port == dst_port) && + (listener_state & listen_node->listener_state)) { + atomic_inc(&listen_node->ref_count); + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return listen_node; + } + } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return NULL; +} + +/** + * i40iw_add_hte_node - add a cm node to the hash table + * @cm_core: cm's core + * @cm_node: connection's node + */ +static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, + struct i40iw_cm_node *cm_node) +{ + struct list_head *hte; + unsigned long flags; + + if (!cm_node || !cm_core) { + i40iw_pr_err("cm_node or cm_core == NULL\n"); + return; + } + spin_lock_irqsave(&cm_core->ht_lock, flags); + + /* get a handle on the hash table element (list head for this slot) */ + hte = &cm_core->connected_nodes; + list_add_tail(&cm_node->list, hte); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); +} + +/** + * listen_port_in_use - determine if port is in use + * @port: Listen port number + */ +static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) +{ + struct i40iw_cm_listener *listen_node; + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + if (listen_node->loc_port == port) { + ret = true; + break; + } + } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return ret; +} + +/** + * i40iw_del_multiple_qhash - Remove qhash and child listens + * @iwdev: iWarp device + * @cm_info: CM info for parent listen node + * @cm_parent_listen_node: The parent listen node + */ +static enum i40iw_status_code i40iw_del_multiple_qhash( + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *cm_parent_listen_node) +{ + struct i40iw_cm_listener *child_listen_node; + enum i40iw_status_code ret = I40IW_ERR_CONFIG; + struct list_head *pos, *tpos; + unsigned long flags; + + spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); + list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) { + child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list); + if (child_listen_node->ipv4) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "removing child listen for IP=%pI4, port=%d, vlan=%d\n", + child_listen_node->loc_addr, + child_listen_node->loc_port, + child_listen_node->vlan_id); + else + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, + "removing child listen for IP=%pI6, port=%d, vlan=%d\n", + child_listen_node->loc_addr, + child_listen_node->loc_port, + child_listen_node->vlan_id); + list_del(pos); + memcpy(cm_info->loc_addr, child_listen_node->loc_addr, + sizeof(cm_info->loc_addr)); + cm_info->vlan_id = child_listen_node->vlan_id; + ret = i40iw_manage_qhash(iwdev, cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false); + kfree(child_listen_node); + cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++; + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "freed pointer = %p\n", + child_listen_node); + } + spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); + + return ret; +} + +/** + * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac + * @addr: local IPv6 address + * @vlan_id: vlan id for the given IPv6 address + * @mac: mac address for the given IPv6 address + * + * Returns the net_device of the IPv6 address and also sets the + * vlan id and mac for that address. + */ +static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac) +{ + struct net_device *ip_dev = NULL; +#if IS_ENABLED(CONFIG_IPV6) + struct in6_addr laddr6; + + i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr); + if (vlan_id) + *vlan_id = I40IW_NO_VLAN; + if (mac) + eth_zero_addr(mac); + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + if (ip_dev->dev_addr && mac) + ether_addr_copy(mac, ip_dev->dev_addr); + break; + } + } + rcu_read_unlock(); +#endif + return ip_dev; +} + +/** + * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address + * @addr: local IPv4 address + */ +static u16 i40iw_get_vlan_ipv4(u32 *addr) +{ + struct net_device *netdev; + u16 vlan_id = I40IW_NO_VLAN; + + netdev = ip_dev_find(&init_net, htonl(addr[0])); + if (netdev) { + vlan_id = rdma_vlan_dev_vlan_id(netdev); + dev_put(netdev); + } + return vlan_id; +} + +/** + * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6 + * @iwdev: iWarp device + * @cm_info: CM info for parent listen node + * @cm_parent_listen_node: The parent listen node + * + * Adds a qhash and a child listen node for every IPv6 address + * on the adapter and adds the associated qhash filter + */ +static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *cm_parent_listen_node) +{ + struct net_device *ip_dev; + struct inet6_dev *idev; + struct inet6_ifaddr *ifp; + enum i40iw_status_code ret = 0; + struct i40iw_cm_listener *child_listen_node; + unsigned long flags; + + rtnl_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) && + (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || + (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { + idev = __in6_dev_get(ip_dev); + if (!idev) { + i40iw_pr_err("idev == NULL\n"); + break; + } + list_for_each_entry(ifp, &idev->addr_list, if_list) { + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "IP=%pI6, vlan_id=%d, MAC=%pM\n", + &ifp->addr, + rdma_vlan_dev_vlan_id(ip_dev), + ip_dev->dev_addr); + child_listen_node = + kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Allocating child listener %p\n", + child_listen_node); + if (!child_listen_node) { + i40iw_pr_err("listener memory allocation\n"); + ret = I40IW_ERR_NO_MEMORY; + goto exit; + } + cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + cm_parent_listen_node->vlan_id = cm_info->vlan_id; + + memcpy(child_listen_node, cm_parent_listen_node, + sizeof(*child_listen_node)); + + i40iw_copy_ip_ntohl(child_listen_node->loc_addr, + ifp->addr.in6_u.u6_addr32); + memcpy(cm_info->loc_addr, child_listen_node->loc_addr, + sizeof(cm_info->loc_addr)); + + ret = i40iw_manage_qhash(iwdev, cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, true); + if (!ret) { + spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); + list_add(&child_listen_node->child_listen_list, + &cm_parent_listen_node->child_listen_list); + spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); + cm_parent_listen_node->cm_core->stats_listen_nodes_created++; + } else { + kfree(child_listen_node); + } + } + } + } +exit: + rtnl_unlock(); + return ret; +} + +/** + * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4 + * @iwdev: iWarp device + * @cm_info: CM info for parent listen node + * @cm_parent_listen_node: The parent listen node + * + * Adds a qhash and a child listen node for every IPv4 address + * on the adapter and adds the associated qhash filter + */ +static enum i40iw_status_code i40iw_add_mqh_4( + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *cm_parent_listen_node) +{ + struct net_device *dev; + struct in_device *idev; + struct i40iw_cm_listener *child_listen_node; + enum i40iw_status_code ret = 0; + unsigned long flags; + + rtnl_lock(); + for_each_netdev(&init_net, dev) { + if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) && + (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || + (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + idev = in_dev_get(dev); + for_ifa(idev) { + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n", + &ifa->ifa_address, + rdma_vlan_dev_vlan_id(dev), + dev->dev_addr); + child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); + cm_parent_listen_node->cm_core->stats_listen_nodes_created++; + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Allocating child listener %p\n", + child_listen_node); + if (!child_listen_node) { + i40iw_pr_err("listener memory allocation\n"); + in_dev_put(idev); + ret = I40IW_ERR_NO_MEMORY; + goto exit; + } + cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev); + cm_parent_listen_node->vlan_id = cm_info->vlan_id; + memcpy(child_listen_node, + cm_parent_listen_node, + sizeof(*child_listen_node)); + + child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address); + memcpy(cm_info->loc_addr, child_listen_node->loc_addr, + sizeof(cm_info->loc_addr)); + + ret = i40iw_manage_qhash(iwdev, + cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, + true); + if (!ret) { + spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); + list_add(&child_listen_node->child_listen_list, + &cm_parent_listen_node->child_listen_list); + spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); + } else { + kfree(child_listen_node); + cm_parent_listen_node->cm_core->stats_listen_nodes_created--; + } + } + endfor_ifa(idev); + in_dev_put(idev); + } + } +exit: + rtnl_unlock(); + return ret; +} + +/** + * i40iw_dec_refcnt_listen - delete listener and associated cm nodes + * @cm_core: cm's core + * @free_hanging_nodes: to free associated cm_nodes + * @apbvt_del: flag to delete the apbvt + */ +static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, + struct i40iw_cm_listener *listener, + int free_hanging_nodes, bool apbvt_del) +{ + int ret = -EINVAL; + int err = 0; + struct list_head *list_pos; + struct list_head *list_temp; + struct i40iw_cm_node *cm_node; + struct list_head reset_list; + struct i40iw_cm_info nfo; + struct i40iw_cm_node *loopback; + enum i40iw_cm_node_state old_state; + unsigned long flags; + + /* free non-accelerated child nodes for this listener */ + INIT_LIST_HEAD(&reset_list); + if (free_hanging_nodes) { + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) { + cm_node = container_of(list_pos, struct i40iw_cm_node, list); + if ((cm_node->listener == listener) && !cm_node->accelerated) { + atomic_inc(&cm_node->ref_count); + list_add(&cm_node->reset_entry, &reset_list); + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } + + list_for_each_safe(list_pos, list_temp, &reset_list) { + cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry); + loopback = cm_node->loopbackpartner; + if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) { + i40iw_rem_ref_cm_node(cm_node); + } else { + if (!loopback) { + i40iw_cleanup_retrans_entry(cm_node); + err = i40iw_send_reset(cm_node); + if (err) { + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_pr_err("send reset\n"); + } else { + old_state = cm_node->state; + cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED; + if (old_state != I40IW_CM_STATE_MPAREQ_RCVD) + i40iw_rem_ref_cm_node(cm_node); + } + } else { + struct i40iw_cm_event event; + + event.cm_node = loopback; + memcpy(event.cm_info.rem_addr, + loopback->rem_addr, sizeof(event.cm_info.rem_addr)); + memcpy(event.cm_info.loc_addr, + loopback->loc_addr, sizeof(event.cm_info.loc_addr)); + event.cm_info.rem_port = loopback->rem_port; + event.cm_info.loc_port = loopback->loc_port; + event.cm_info.cm_id = loopback->cm_id; + event.cm_info.ipv4 = loopback->ipv4; + atomic_inc(&loopback->ref_count); + loopback->state = I40IW_CM_STATE_CLOSED; + i40iw_event_connect_error(&event); + cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED; + i40iw_rem_ref_cm_node(cm_node); + } + } + } + + if (!atomic_dec_return(&listener->ref_count)) { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_del(&listener->list); + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + + if (listener->iwdev) { + if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) + i40iw_manage_apbvt(listener->iwdev, + listener->loc_port, + I40IW_MANAGE_APBVT_DEL); + + memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr)); + nfo.loc_port = listener->loc_port; + nfo.ipv4 = listener->ipv4; + nfo.vlan_id = listener->vlan_id; + + if (!list_empty(&listener->child_listen_list)) { + i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener); + } else { + if (listener->qhash_set) + i40iw_manage_qhash(listener->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + } + } + + cm_core->stats_listen_destroyed++; + kfree(listener); + cm_core->stats_listen_nodes_destroyed++; + listener = NULL; + ret = 0; + } + + if (listener) { + if (atomic_read(&listener->pend_accepts_cnt) > 0) + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s: listener (%p) pending accepts=%u\n", + __func__, + listener, + atomic_read(&listener->pend_accepts_cnt)); + } + + return ret; +} + +/** + * i40iw_cm_del_listen - delete a linstener + * @cm_core: cm's core + * @listener: passive connection's listener + * @apbvt_del: flag to delete apbvt + */ +static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core, + struct i40iw_cm_listener *listener, + bool apbvt_del) +{ + listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE; + listener->cm_id = NULL; /* going to be destroyed pretty soon */ + return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del); +} + +/** + * i40iw_addr_resolve_neigh - resolve neighbor address + * @iwdev: iwarp device structure + * @src_ip: local ip address + * @dst_ip: remote ip address + * @arpindex: if there is an arp entry + */ +static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, + u32 src_ip, + u32 dst_ip, + int arpindex) +{ + struct rtable *rt; + struct neighbour *neigh; + int rc = arpindex; + struct net_device *netdev = iwdev->netdev; + __be32 dst_ipaddr = htonl(dst_ip); + __be32 src_ipaddr = htonl(src_ip); + + rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0); + if (IS_ERR(rt)) { + i40iw_pr_err("ip_route_output\n"); + return rc; + } + + if (netif_is_bond_slave(netdev)) + netdev = netdev_master_upper_dev_get(netdev); + + neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); + + rcu_read_lock(); + if (neigh) { + if (neigh->nud_state & NUD_VALID) { + if (arpindex >= 0) { + if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr, + neigh->ha)) + /* Mac address same as arp table */ + goto resolve_neigh_exit; + i40iw_manage_arp_cache(iwdev, + iwdev->arp_table[arpindex].mac_addr, + &dst_ip, + true, + I40IW_ARP_DELETE); + } + + i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD); + rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE); + } else { + neigh_event_send(neigh, NULL); + } + } + resolve_neigh_exit: + + rcu_read_unlock(); + if (neigh) + neigh_release(neigh); + + ip_rt_put(rt); + return rc; +} + +/** + * i40iw_get_dst_ipv6 + */ +static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, + struct sockaddr_in6 *dst_addr) +{ + struct dst_entry *dst; + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + fl6.daddr = dst_addr->sin6_addr; + fl6.saddr = src_addr->sin6_addr; + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = dst_addr->sin6_scope_id; + + dst = ip6_route_output(&init_net, NULL, &fl6); + return dst; +} + +/** + * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address + * @iwdev: iwarp device structure + * @dst_ip: remote ip address + * @arpindex: if there is an arp entry + */ +static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, + u32 *src, + u32 *dest, + int arpindex) +{ + struct neighbour *neigh; + int rc = arpindex; + struct net_device *netdev = iwdev->netdev; + struct dst_entry *dst; + struct sockaddr_in6 dst_addr; + struct sockaddr_in6 src_addr; + + memset(&dst_addr, 0, sizeof(dst_addr)); + dst_addr.sin6_family = AF_INET6; + i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest); + memset(&src_addr, 0, sizeof(src_addr)); + src_addr.sin6_family = AF_INET6; + i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src); + dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); + if (!dst || dst->error) { + if (dst) { + dst_release(dst); + i40iw_pr_err("ip6_route_output returned dst->error = %d\n", + dst->error); + } + return rc; + } + + if (netif_is_bond_slave(netdev)) + netdev = netdev_master_upper_dev_get(netdev); + + neigh = dst_neigh_lookup(dst, &dst_addr); + + rcu_read_lock(); + if (neigh) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha); + if (neigh->nud_state & NUD_VALID) { + if (arpindex >= 0) { + if (ether_addr_equal + (iwdev->arp_table[arpindex].mac_addr, + neigh->ha)) { + /* Mac address same as in arp table */ + goto resolve_neigh_exit6; + } + i40iw_manage_arp_cache(iwdev, + iwdev->arp_table[arpindex].mac_addr, + dest, + false, + I40IW_ARP_DELETE); + } + i40iw_manage_arp_cache(iwdev, + neigh->ha, + dest, + false, + I40IW_ARP_ADD); + rc = i40iw_arp_table(iwdev, + dest, + false, + NULL, + I40IW_ARP_RESOLVE); + } else { + neigh_event_send(neigh, NULL); + } + } + + resolve_neigh_exit6: + rcu_read_unlock(); + if (neigh) + neigh_release(neigh); + dst_release(dst); + return rc; +} + +/** + * i40iw_ipv4_is_loopback - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr) +{ + return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); +} + +/** + * i40iw_ipv6_is_loopback - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr) +{ + struct in6_addr raddr6; + + i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); + return (!memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6)); +} + +/** + * i40iw_make_cm_node - create a new instance of a cm node + * @cm_core: cm's core + * @iwdev: iwarp device structure + * @cm_info: quad info for connection + * @listener: passive connection's listener + */ +static struct i40iw_cm_node *i40iw_make_cm_node( + struct i40iw_cm_core *cm_core, + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *listener) +{ + struct i40iw_cm_node *cm_node; + struct timespec ts; + int oldarpindex; + int arpindex; + struct net_device *netdev = iwdev->netdev; + + /* create an hte and cm_node for this instance */ + cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); + if (!cm_node) + return NULL; + + /* set our node specific transport info */ + cm_node->ipv4 = cm_info->ipv4; + cm_node->vlan_id = cm_info->vlan_id; + memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr)); + memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr)); + cm_node->loc_port = cm_info->loc_port; + cm_node->rem_port = cm_info->rem_port; + + cm_node->mpa_frame_rev = iwdev->mpa_version; + cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; + cm_node->ird_size = I40IW_MAX_IRD_SIZE; + cm_node->ord_size = I40IW_MAX_ORD_SIZE; + + cm_node->listener = listener; + cm_node->cm_id = cm_info->cm_id; + ether_addr_copy(cm_node->loc_mac, netdev->dev_addr); + spin_lock_init(&cm_node->retrans_list_lock); + + atomic_set(&cm_node->ref_count, 1); + /* associate our parent CM core */ + cm_node->cm_core = cm_core; + cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID; + cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE; + cm_node->tcp_cntxt.rcv_wnd = + I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE; + ts = current_kernel_time(); + cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec); + cm_node->tcp_cntxt.mss = iwdev->mss; + + cm_node->iwdev = iwdev; + cm_node->dev = &iwdev->sc_dev; + + if ((cm_node->ipv4 && + i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) || + (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr, + cm_node->rem_addr))) { + arpindex = i40iw_arp_table(iwdev, + cm_node->rem_addr, + false, + NULL, + I40IW_ARP_RESOLVE); + } else { + oldarpindex = i40iw_arp_table(iwdev, + cm_node->rem_addr, + false, + NULL, + I40IW_ARP_RESOLVE); + if (cm_node->ipv4) + arpindex = i40iw_addr_resolve_neigh(iwdev, + cm_info->loc_addr[0], + cm_info->rem_addr[0], + oldarpindex); + else if (IS_ENABLED(CONFIG_IPV6)) + arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, + cm_info->loc_addr, + cm_info->rem_addr, + oldarpindex); + else + arpindex = -EINVAL; + } + if (arpindex < 0) { + i40iw_pr_err("cm_node arpindex\n"); + kfree(cm_node); + return NULL; + } + ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr); + i40iw_add_hte_node(cm_core, cm_node); + cm_core->stats_nodes_created++; + return cm_node; +} + +/** + * i40iw_rem_ref_cm_node - destroy an instance of a cm node + * @cm_node: connection's node + */ +static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) +{ + struct i40iw_cm_core *cm_core = cm_node->cm_core; + struct i40iw_qp *iwqp; + struct i40iw_cm_info nfo; + unsigned long flags; + + spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags); + if (atomic_dec_return(&cm_node->ref_count)) { + spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); + return; + } + list_del(&cm_node->list); + spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); + + /* if the node is destroyed before connection was accelerated */ + if (!cm_node->accelerated && cm_node->accept_pend) { + pr_err("node destroyed before established\n"); + atomic_dec(&cm_node->listener->pend_accepts_cnt); + } + if (cm_node->close_entry) + i40iw_handle_close_entry(cm_node, 0); + if (cm_node->listener) { + i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); + } else { + if (!i40iw_listen_port_in_use(cm_core, htons(cm_node->loc_port)) && + cm_node->apbvt_set && cm_node->iwdev) { + i40iw_manage_apbvt(cm_node->iwdev, + cm_node->loc_port, + I40IW_MANAGE_APBVT_DEL); + i40iw_get_addr_info(cm_node, &nfo); + if (cm_node->qhash_set) { + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; + } + } + } + + iwqp = cm_node->iwqp; + if (iwqp) { + iwqp->cm_node = NULL; + i40iw_rem_ref(&iwqp->ibqp); + cm_node->iwqp = NULL; + } else if (cm_node->qhash_set) { + i40iw_get_addr_info(cm_node, &nfo); + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; + } + + cm_node->cm_core->stats_nodes_destroyed++; + kfree(cm_node); +} + +/** + * i40iw_handle_fin_pkt - FIN packet received + * @cm_node: connection's node + */ +static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node) +{ + u32 ret; + + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_MPAREJ_RCVD: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_LAST_ACK; + i40iw_send_fin(cm_node); + break; + case I40IW_CM_STATE_MPAREQ_SENT: + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSING; + i40iw_send_ack(cm_node); + /* + * Wait for ACK as this is simultaneous close. + * After we receive ACK, do not send anything. + * Just rm the node. + */ + break; + case I40IW_CM_STATE_FIN_WAIT2: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_TIME_WAIT; + i40iw_send_ack(cm_node); + ret = + i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0); + if (ret) + i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state); + break; + case I40IW_CM_STATE_TIME_WAIT: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_OFFLOADED: + default: + i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state); + break; + } +} + +/** + * i40iw_handle_rst_pkt - process received RST packet + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + i40iw_cleanup_retrans_entry(cm_node); + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_MPAREQ_SENT: + switch (cm_node->mpa_frame_rev) { + case IETF_MPA_V2: + cm_node->mpa_frame_rev = IETF_MPA_V1; + /* send a syn and goto syn sent state */ + cm_node->state = I40IW_CM_STATE_SYN_SENT; + if (i40iw_send_syn(cm_node, 0)) + i40iw_active_open_err(cm_node, false); + break; + case IETF_MPA_V1: + default: + i40iw_active_open_err(cm_node, false); + break; + } + break; + case I40IW_CM_STATE_MPAREQ_RCVD: + atomic_add_return(1, &cm_node->passive_state); + break; + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_LISTENING: + i40iw_pr_err("Bad state state = %d\n", cm_node->state); + i40iw_passive_open_err(cm_node, false); + break; + case I40IW_CM_STATE_OFFLOADED: + i40iw_active_open_err(cm_node, false); + break; + case I40IW_CM_STATE_CLOSED: + break; + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_LAST_ACK: + cm_node->cm_id->rem_ref(cm_node->cm_id); + case I40IW_CM_STATE_TIME_WAIT: + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_rem_ref_cm_node(cm_node); + break; + default: + break; + } +} + +/** + * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + int ret; + int datasize = rbuf->datalen; + u8 *dataloc = rbuf->data; + + enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN; + u32 res_type; + + ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize); + if (ret) { + if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT) + i40iw_active_open_err(cm_node, true); + else + i40iw_passive_open_err(cm_node, true); + return; + } + + switch (cm_node->state) { + case I40IW_CM_STATE_ESTABLISHED: + if (res_type == I40IW_MPA_REQUEST_REJECT) + i40iw_pr_err("state for reject\n"); + cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD; + type = I40IW_CM_EVENT_MPA_REQ; + i40iw_send_ack(cm_node); /* ACK received MPA request */ + atomic_set(&cm_node->passive_state, + I40IW_PASSIVE_STATE_INDICATED); + break; + case I40IW_CM_STATE_MPAREQ_SENT: + i40iw_cleanup_retrans_entry(cm_node); + if (res_type == I40IW_MPA_REQUEST_REJECT) { + type = I40IW_CM_EVENT_MPA_REJECT; + cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD; + } else { + type = I40IW_CM_EVENT_CONNECTED; + cm_node->state = I40IW_CM_STATE_OFFLOADED; + i40iw_send_ack(cm_node); + } + break; + default: + pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); + break; + } + i40iw_create_event(cm_node, type); +} + +/** + * i40iw_indicate_pkt_err - Send up err event to cm + * @cm_node: connection's node + */ +static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node) +{ + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_MPAREQ_SENT: + i40iw_active_open_err(cm_node, true); + break; + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_SYN_RCVD: + i40iw_passive_open_err(cm_node, true); + break; + case I40IW_CM_STATE_OFFLOADED: + default: + break; + } +} + +/** + * i40iw_check_syn - Check for error on received syn ack + * @cm_node: connection's node + * @tcph: pointer tcp header + */ +static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph) +{ + int err = 0; + + if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) { + err = 1; + i40iw_active_open_err(cm_node, true); + } + return err; +} + +/** + * i40iw_check_seq - check seq numbers if OK + * @cm_node: connection's node + * @tcph: pointer tcp header + */ +static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph) +{ + int err = 0; + u32 seq; + u32 ack_seq; + u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; + u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; + u32 rcv_wnd; + + seq = ntohl(tcph->seq); + ack_seq = ntohl(tcph->ack_seq); + rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; + if (ack_seq != loc_seq_num) + err = -1; + else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd))) + err = -1; + if (err) { + i40iw_pr_err("seq number\n"); + i40iw_indicate_pkt_err(cm_node); + } + return err; +} + +/** + * i40iw_handle_syn_pkt - is for Passive node + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + int ret; + u32 inc_sequence; + int optionsize; + struct i40iw_cm_info nfo; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + inc_sequence = ntohl(tcph->seq); + + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_MPAREQ_SENT: + /* Rcvd syn on active open connection */ + i40iw_active_open_err(cm_node, 1); + break; + case I40IW_CM_STATE_LISTENING: + /* Passive OPEN */ + if (atomic_read(&cm_node->listener->pend_accepts_cnt) > + cm_node->listener->backlog) { + cm_node->cm_core->stats_backlog_drops++; + i40iw_passive_open_err(cm_node, false); + break; + } + ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1); + if (ret) { + i40iw_passive_open_err(cm_node, false); + /* drop pkt */ + break; + } + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; + cm_node->accept_pend = 1; + atomic_inc(&cm_node->listener->pend_accepts_cnt); + + cm_node->state = I40IW_CM_STATE_SYN_RCVD; + i40iw_get_addr_info(cm_node, &nfo); + ret = i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, + (void *)cm_node, + false); + cm_node->qhash_set = true; + break; + case I40IW_CM_STATE_CLOSED: + i40iw_cleanup_retrans_entry(cm_node); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_OFFLOADED: + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_MPAREQ_RCVD: + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_CLOSING: + case I40IW_CM_STATE_UNKNOWN: + default: + break; + } +} + +/** + * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side) + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + int ret; + u32 inc_sequence; + int optionsize; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + inc_sequence = ntohl(tcph->seq); + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + i40iw_cleanup_retrans_entry(cm_node); + /* active open */ + if (i40iw_check_syn(cm_node, tcph)) { + i40iw_pr_err("check syn fail\n"); + return; + } + cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + /* setup options */ + ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0); + if (ret) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "cm_node=%p tcp_options failed\n", + cm_node); + break; + } + i40iw_cleanup_retrans_entry(cm_node); + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; + i40iw_send_ack(cm_node); /* ACK for the syn_ack */ + ret = i40iw_send_mpa_request(cm_node); + if (ret) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "cm_node=%p i40iw_send_mpa_request failed\n", + cm_node); + break; + } + cm_node->state = I40IW_CM_STATE_MPAREQ_SENT; + break; + case I40IW_CM_STATE_MPAREQ_RCVD: + i40iw_passive_open_err(cm_node, true); + break; + case I40IW_CM_STATE_LISTENING: + cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_CLOSED: + cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); + i40iw_cleanup_retrans_entry(cm_node); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_OFFLOADED: + case I40IW_CM_STATE_CLOSING: + case I40IW_CM_STATE_UNKNOWN: + case I40IW_CM_STATE_MPAREQ_SENT: + default: + break; + } +} + +/** + * i40iw_handle_ack_pkt - process packet with ACK + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + u32 inc_sequence; + int ret = 0; + int optionsize; + u32 datasize = rbuf->datalen; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + + if (i40iw_check_seq(cm_node, tcph)) + return -EINVAL; + + inc_sequence = ntohl(tcph->seq); + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_RCVD: + i40iw_cleanup_retrans_entry(cm_node); + ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1); + if (ret) + break; + cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + cm_node->state = I40IW_CM_STATE_ESTABLISHED; + if (datasize) { + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + i40iw_handle_rcv_mpa(cm_node, rbuf); + } + break; + case I40IW_CM_STATE_ESTABLISHED: + i40iw_cleanup_retrans_entry(cm_node); + if (datasize) { + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + i40iw_handle_rcv_mpa(cm_node, rbuf); + } + break; + case I40IW_CM_STATE_MPAREQ_SENT: + cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + if (datasize) { + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + i40iw_handle_rcv_mpa(cm_node, rbuf); + } + break; + case I40IW_CM_STATE_LISTENING: + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_CLOSED: + i40iw_cleanup_retrans_entry(cm_node); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_CLOSING: + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + if (!cm_node->accept_pend) + cm_node->cm_id->rem_ref(cm_node->cm_id); + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_FIN_WAIT2; + break; + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_OFFLOADED: + case I40IW_CM_STATE_MPAREQ_RCVD: + case I40IW_CM_STATE_UNKNOWN: + default: + i40iw_cleanup_retrans_entry(cm_node); + break; + } + return ret; +} + +/** + * i40iw_process_packet - process cm packet + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_process_packet(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN; + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + u32 fin_set = 0; + int ret; + + if (tcph->rst) { + pkt_type = I40IW_PKT_TYPE_RST; + } else if (tcph->syn) { + pkt_type = I40IW_PKT_TYPE_SYN; + if (tcph->ack) + pkt_type = I40IW_PKT_TYPE_SYNACK; + } else if (tcph->ack) { + pkt_type = I40IW_PKT_TYPE_ACK; + } + if (tcph->fin) + fin_set = 1; + + switch (pkt_type) { + case I40IW_PKT_TYPE_SYN: + i40iw_handle_syn_pkt(cm_node, rbuf); + break; + case I40IW_PKT_TYPE_SYNACK: + i40iw_handle_synack_pkt(cm_node, rbuf); + break; + case I40IW_PKT_TYPE_ACK: + ret = i40iw_handle_ack_pkt(cm_node, rbuf); + if (fin_set && !ret) + i40iw_handle_fin_pkt(cm_node); + break; + case I40IW_PKT_TYPE_RST: + i40iw_handle_rst_pkt(cm_node, rbuf); + break; + default: + if (fin_set && + (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph))) + i40iw_handle_fin_pkt(cm_node); + break; + } +} + +/** + * i40iw_make_listen_node - create a listen node with params + * @cm_core: cm's core + * @iwdev: iwarp device structure + * @cm_info: quad info for connection + */ +static struct i40iw_cm_listener *i40iw_make_listen_node( + struct i40iw_cm_core *cm_core, + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info) +{ + struct i40iw_cm_listener *listener; + unsigned long flags; + + /* cannot have multiple matching listeners */ + listener = i40iw_find_listener(cm_core, cm_info->loc_addr, + cm_info->loc_port, + cm_info->vlan_id, + I40IW_CM_LISTENER_EITHER_STATE); + if (listener && + (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) { + atomic_dec(&listener->ref_count); + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "Not creating listener since it already exists\n"); + return NULL; + } + + if (!listener) { + /* create a CM listen node (1/2 node to compare incoming traffic to) */ + listener = kzalloc(sizeof(*listener), GFP_ATOMIC); + if (!listener) + return NULL; + cm_core->stats_listen_nodes_created++; + memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr)); + listener->loc_port = cm_info->loc_port; + + INIT_LIST_HEAD(&listener->child_listen_list); + + atomic_set(&listener->ref_count, 1); + } else { + listener->reused_node = 1; + } + + listener->cm_id = cm_info->cm_id; + listener->ipv4 = cm_info->ipv4; + listener->vlan_id = cm_info->vlan_id; + atomic_set(&listener->pend_accepts_cnt, 0); + listener->cm_core = cm_core; + listener->iwdev = iwdev; + + listener->backlog = cm_info->backlog; + listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE; + + if (!listener->reused_node) { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_add(&listener->list, &cm_core->listen_nodes); + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + } + + return listener; +} + +/** + * i40iw_create_cm_node - make a connection node with params + * @cm_core: cm's core + * @iwdev: iwarp device structure + * @private_data_len: len to provate data for mpa request + * @private_data: pointer to private data for connection + * @cm_info: quad info for connection + */ +static struct i40iw_cm_node *i40iw_create_cm_node( + struct i40iw_cm_core *cm_core, + struct i40iw_device *iwdev, + u16 private_data_len, + void *private_data, + struct i40iw_cm_info *cm_info) +{ + int ret; + struct i40iw_cm_node *cm_node; + struct i40iw_cm_listener *loopback_remotelistener; + struct i40iw_cm_node *loopback_remotenode; + struct i40iw_cm_info loopback_cm_info; + + /* create a CM connection node */ + cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL); + if (!cm_node) + return NULL; + /* set our node side to client (active) side */ + cm_node->tcp_cntxt.client = 1; + cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE; + + if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) { + loopback_remotelistener = i40iw_find_listener( + cm_core, + cm_info->rem_addr, + cm_node->rem_port, + cm_node->vlan_id, + I40IW_CM_LISTENER_ACTIVE_STATE); + if (!loopback_remotelistener) { + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); + } else { + loopback_cm_info = *cm_info; + loopback_cm_info.loc_port = cm_info->rem_port; + loopback_cm_info.rem_port = cm_info->loc_port; + loopback_cm_info.cm_id = loopback_remotelistener->cm_id; + loopback_cm_info.ipv4 = cm_info->ipv4; + loopback_remotenode = i40iw_make_cm_node(cm_core, + iwdev, + &loopback_cm_info, + loopback_remotelistener); + if (!loopback_remotenode) { + i40iw_rem_ref_cm_node(cm_node); + return NULL; + } + cm_core->stats_loopbacks++; + loopback_remotenode->loopbackpartner = cm_node; + loopback_remotenode->tcp_cntxt.rcv_wscale = + I40IW_CM_DEFAULT_RCV_WND_SCALE; + cm_node->loopbackpartner = loopback_remotenode; + memcpy(loopback_remotenode->pdata_buf, private_data, + private_data_len); + loopback_remotenode->pdata.size = private_data_len; + + cm_node->state = I40IW_CM_STATE_OFFLOADED; + cm_node->tcp_cntxt.rcv_nxt = + loopback_remotenode->tcp_cntxt.loc_seq_num; + loopback_remotenode->tcp_cntxt.rcv_nxt = + cm_node->tcp_cntxt.loc_seq_num; + cm_node->tcp_cntxt.max_snd_wnd = + loopback_remotenode->tcp_cntxt.rcv_wnd; + loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd; + cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd; + loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; + cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale; + loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; + loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD; + i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ); + } + return cm_node; + } + + cm_node->pdata.size = private_data_len; + cm_node->pdata.addr = cm_node->pdata_buf; + + memcpy(cm_node->pdata_buf, private_data, private_data_len); + + cm_node->state = I40IW_CM_STATE_SYN_SENT; + ret = i40iw_send_syn(cm_node, 0); + + if (ret) { + if (cm_node->ipv4) + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "Api - connect() FAILED: dest addr=%pI4", + cm_node->rem_addr); + else + i40iw_debug(cm_node->dev, I40IW_DEBUG_CM, + "Api - connect() FAILED: dest addr=%pI6", + cm_node->rem_addr); + i40iw_rem_ref_cm_node(cm_node); + cm_node = NULL; + } + + if (cm_node) + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n", + cm_node->rem_port, + cm_node, + cm_node->cm_id); + + return cm_node; +} + +/** + * i40iw_cm_reject - reject and teardown a connection + * @cm_node: connection's node + * @pdate: ptr to private data for reject + * @plen: size of private data + */ +static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen) +{ + int ret = 0; + int err; + int passive_state; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct i40iw_cm_node *loopback = cm_node->loopbackpartner; + + if (cm_node->tcp_cntxt.client) + return ret; + i40iw_cleanup_retrans_entry(cm_node); + + if (!loopback) { + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == I40IW_SEND_RESET_EVENT) { + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_rem_ref_cm_node(cm_node); + } else { + if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { + i40iw_rem_ref_cm_node(cm_node); + } else { + ret = i40iw_send_mpa_reject(cm_node, pdata, plen); + if (ret) { + cm_node->state = I40IW_CM_STATE_CLOSED; + err = i40iw_send_reset(cm_node); + if (err) + i40iw_pr_err("send reset failed\n"); + } else { + cm_id->add_ref(cm_id); + } + } + } + } else { + cm_node->cm_id = NULL; + if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { + i40iw_rem_ref_cm_node(cm_node); + i40iw_rem_ref_cm_node(loopback); + } else { + ret = i40iw_send_cm_event(loopback, + loopback->cm_id, + IW_CM_EVENT_CONNECT_REPLY, + -ECONNREFUSED); + i40iw_rem_ref_cm_node(cm_node); + loopback->state = I40IW_CM_STATE_CLOSING; + + cm_id = loopback->cm_id; + i40iw_rem_ref_cm_node(loopback); + cm_id->rem_ref(cm_id); + } + } + + return ret; +} + +/** + * i40iw_cm_close - close of cm connection + * @cm_node: connection's node + */ +static int i40iw_cm_close(struct i40iw_cm_node *cm_node) +{ + int ret = 0; + + if (!cm_node) + return -EINVAL; + + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED: + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_ACCEPTING: + case I40IW_CM_STATE_MPAREQ_SENT: + case I40IW_CM_STATE_MPAREQ_RCVD: + i40iw_cleanup_retrans_entry(cm_node); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_CLOSE_WAIT: + cm_node->state = I40IW_CM_STATE_LAST_ACK; + i40iw_send_fin(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_TIME_WAIT: + case I40IW_CM_STATE_CLOSING: + ret = -1; + break; + case I40IW_CM_STATE_LISTENING: + i40iw_cleanup_retrans_entry(cm_node); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_MPAREJ_RCVD: + case I40IW_CM_STATE_UNKNOWN: + case I40IW_CM_STATE_INITED: + case I40IW_CM_STATE_CLOSED: + case I40IW_CM_STATE_LISTENER_DESTROYED: + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_OFFLOADED: + if (cm_node->send_entry) + i40iw_pr_err("send_entry\n"); + i40iw_rem_ref_cm_node(cm_node); + break; + } + return ret; +} + +/** + * i40iw_receive_ilq - recv an ETHERNET packet, and process it + * through CM + * @dev: FPK dev struct + * @rbuf: receive buffer + */ +void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf) +{ + struct i40iw_cm_node *cm_node; + struct i40iw_cm_listener *listener; + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct tcphdr *tcph; + struct i40iw_cm_info cm_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + struct vlan_ethhdr *ethh; + + /* if vlan, then maclen = 18 else 14 */ + iph = (struct iphdr *)rbuf->iph; + memset(&cm_info, 0, sizeof(cm_info)); + + i40iw_debug_buf(dev, + I40IW_DEBUG_ILQ, + "RECEIVE ILQ BUFFER", + rbuf->mem.va, + rbuf->totallen); + ethh = (struct vlan_ethhdr *)rbuf->mem.va; + + if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) { + cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK; + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s vlan_id=%d\n", + __func__, + cm_info.vlan_id); + } else { + cm_info.vlan_id = I40IW_NO_VLAN; + } + tcph = (struct tcphdr *)rbuf->tcph; + + if (rbuf->ipv4) { + cm_info.loc_addr[0] = ntohl(iph->daddr); + cm_info.rem_addr[0] = ntohl(iph->saddr); + cm_info.ipv4 = true; + } else { + ip6h = (struct ipv6hdr *)rbuf->iph; + i40iw_copy_ip_ntohl(cm_info.loc_addr, + ip6h->daddr.in6_u.u6_addr32); + i40iw_copy_ip_ntohl(cm_info.rem_addr, + ip6h->saddr.in6_u.u6_addr32); + cm_info.ipv4 = false; + } + cm_info.loc_port = ntohs(tcph->dest); + cm_info.rem_port = ntohs(tcph->source); + cm_node = i40iw_find_node(cm_core, + cm_info.rem_port, + cm_info.rem_addr, + cm_info.loc_port, + cm_info.loc_addr, + true); + + if (!cm_node) { + /* Only type of packet accepted are for */ + /* the PASSIVE open (syn only) */ + if (!tcph->syn || tcph->ack) + return; + listener = + i40iw_find_listener(cm_core, + cm_info.loc_addr, + cm_info.loc_port, + cm_info.vlan_id, + I40IW_CM_LISTENER_ACTIVE_STATE); + if (!listener) { + cm_info.cm_id = NULL; + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s no listener found\n", + __func__); + return; + } + cm_info.cm_id = listener->cm_id; + cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener); + if (!cm_node) { + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s allocate node failed\n", + __func__); + atomic_dec(&listener->ref_count); + return; + } + if (!tcph->rst && !tcph->fin) { + cm_node->state = I40IW_CM_STATE_LISTENING; + } else { + i40iw_rem_ref_cm_node(cm_node); + return; + } + atomic_inc(&cm_node->ref_count); + } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) { + i40iw_rem_ref_cm_node(cm_node); + return; + } + i40iw_process_packet(cm_node, rbuf); + i40iw_rem_ref_cm_node(cm_node); +} + +/** + * i40iw_setup_cm_core - allocate a top level instance of a cm + * core + * @iwdev: iwarp device structure + */ +void i40iw_setup_cm_core(struct i40iw_device *iwdev) +{ + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + + cm_core->iwdev = iwdev; + cm_core->dev = &iwdev->sc_dev; + + INIT_LIST_HEAD(&cm_core->connected_nodes); + INIT_LIST_HEAD(&cm_core->listen_nodes); + + init_timer(&cm_core->tcp_timer); + cm_core->tcp_timer.function = i40iw_cm_timer_tick; + cm_core->tcp_timer.data = (unsigned long)cm_core; + + spin_lock_init(&cm_core->ht_lock); + spin_lock_init(&cm_core->listen_list_lock); + + cm_core->event_wq = create_singlethread_workqueue("iwewq"); + cm_core->disconn_wq = create_singlethread_workqueue("iwdwq"); +} + +/** + * i40iw_cleanup_cm_core - deallocate a top level instance of a + * cm core + * @cm_core: cm's core + */ +void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core) +{ + unsigned long flags; + + if (!cm_core) + return; + + spin_lock_irqsave(&cm_core->ht_lock, flags); + if (timer_pending(&cm_core->tcp_timer)) + del_timer_sync(&cm_core->tcp_timer); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + destroy_workqueue(cm_core->event_wq); + destroy_workqueue(cm_core->disconn_wq); +} + +/** + * i40iw_init_tcp_ctx - setup qp context + * @cm_node: connection's node + * @tcp_info: offload info for tcp + * @iwqp: associate qp for the connection + */ +static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, + struct i40iw_tcp_offload_info *tcp_info, + struct i40iw_qp *iwqp) +{ + tcp_info->ipv4 = cm_node->ipv4; + tcp_info->drop_ooo_seg = true; + tcp_info->wscale = true; + tcp_info->ignore_tcp_opt = true; + tcp_info->ignore_tcp_uns_opt = true; + tcp_info->no_nagle = false; + + tcp_info->ttl = I40IW_DEFAULT_TTL; + tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR); + tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH); + tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH; + + tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED; + tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale; + tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale; + + tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); + tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); + tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + + tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss); + tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); + tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); + tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << + cm_node->tcp_cntxt.rcv_wscale); + + tcp_info->flow_label = 0; + tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); + if (cm_node->vlan_id < VLAN_TAG_PRESENT) { + tcp_info->insert_vlan_tag = true; + tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); + } + if (cm_node->ipv4) { + tcp_info->src_port = cpu_to_le16(cm_node->loc_port); + tcp_info->dst_port = cpu_to_le16(cm_node->rem_port); + + tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]); + tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]); + tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(iwqp->iwdev, + &tcp_info->dest_ip_addr3, + true, + NULL, + I40IW_ARP_RESOLVE)); + } else { + tcp_info->src_port = cpu_to_le16(cm_node->loc_port); + tcp_info->dst_port = cpu_to_le16(cm_node->rem_port); + tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]); + tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]); + tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]); + tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]); + tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]); + tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]); + tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]); + tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]); + tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table( + iwqp->iwdev, + &tcp_info->dest_ip_addr0, + false, + NULL, + I40IW_ARP_RESOLVE)); + } +} + +/** + * i40iw_cm_init_tsa_conn - setup qp for RTS + * @iwqp: associate qp for the connection + * @cm_node: connection's node + */ +static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp, + struct i40iw_cm_node *cm_node) +{ + struct i40iw_tcp_offload_info tcp_info; + struct i40iwarp_offload_info *iwarp_info; + struct i40iw_qp_host_ctx_info *ctx_info; + struct i40iw_device *iwdev = iwqp->iwdev; + struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; + + memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info)); + iwarp_info = &iwqp->iwarp_info; + ctx_info = &iwqp->ctx_info; + + ctx_info->tcp_info = &tcp_info; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + + iwarp_info->ord_size = cm_node->ord_size; + iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size); + + if (iwarp_info->ord_size == 1) + iwarp_info->ord_size = 2; + + iwarp_info->rd_enable = true; + iwarp_info->rdmap_ver = 1; + iwarp_info->ddp_ver = 1; + + iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id; + + ctx_info->tcp_info_valid = true; + ctx_info->iwarp_info_valid = true; + + i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp); + if (cm_node->snd_mark_en) { + iwarp_info->snd_mark_en = true; + iwarp_info->snd_mark_offset = (tcp_info.snd_nxt & + SNDMARKER_SEQNMASK) + cm_node->lsmm_size; + } + + cm_node->state = I40IW_CM_STATE_OFFLOADED; + tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED; + tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx; + + dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info); + + /* once tcp_info is set, no need to do it again */ + ctx_info->tcp_info_valid = false; + ctx_info->iwarp_info_valid = false; +} + +/** + * i40iw_cm_disconn - when a connection is being closed + * @iwqp: associate qp for the connection + */ +int i40iw_cm_disconn(struct i40iw_qp *iwqp) +{ + struct disconn_work *work; + struct i40iw_device *iwdev = iwqp->iwdev; + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; /* Timer will clean up */ + + i40iw_add_ref(&iwqp->ibqp); + work->iwqp = iwqp; + INIT_WORK(&work->work, i40iw_disconnect_worker); + queue_work(cm_core->disconn_wq, &work->work); + return 0; +} + +/** + * i40iw_loopback_nop - Send a nop + * @qp: associated hw qp + */ +static void i40iw_loopback_nop(struct i40iw_sc_qp *qp) +{ + u64 *wqe; + u64 header; + + wqe = qp->qp_uk.sq_base->elem; + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(0, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + set_64bit_val(wqe, 24, header); +} + +/** + * i40iw_qp_disconnect - free qp and close cm + * @iwqp: associate qp for the connection + */ +static void i40iw_qp_disconnect(struct i40iw_qp *iwqp) +{ + struct i40iw_device *iwdev; + struct i40iw_ib_device *iwibdev; + + iwdev = to_iwdev(iwqp->ibqp.device); + if (!iwdev) { + i40iw_pr_err("iwdev == NULL\n"); + return; + } + + iwibdev = iwdev->iwibdev; + + if (iwqp->active_conn) { + /* indicate this connection is NOT active */ + iwqp->active_conn = 0; + } else { + /* Need to free the Last Streaming Mode Message */ + if (iwqp->ietf_mem.va) { + if (iwqp->lsmm_mr) + iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr); + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); + } + } + + /* close the CM node down if it is still active */ + if (iwqp->cm_node) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__); + i40iw_cm_close(iwqp->cm_node); + } +} + +/** + * i40iw_cm_disconn_true - called by worker thread to disconnect qp + * @iwqp: associate qp for the connection + */ +static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp) +{ + struct iw_cm_id *cm_id; + struct i40iw_device *iwdev; + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + u16 last_ae; + u8 original_hw_tcp_state; + u8 original_ibqp_state; + int disconn_status = 0; + int issue_disconn = 0; + int issue_close = 0; + int issue_flush = 0; + struct ib_event ibevent; + unsigned long flags; + int ret; + + if (!iwqp) { + i40iw_pr_err("iwqp == NULL\n"); + return; + } + + spin_lock_irqsave(&iwqp->lock, flags); + cm_id = iwqp->cm_id; + /* make sure we havent already closed this connection */ + if (!cm_id) { + spin_unlock_irqrestore(&iwqp->lock, flags); + return; + } + + iwdev = to_iwdev(iwqp->ibqp.device); + + original_hw_tcp_state = iwqp->hw_tcp_state; + original_ibqp_state = iwqp->ibqp_state; + last_ae = iwqp->last_aeq; + + if (qp->term_flags) { + issue_disconn = 1; + issue_close = 1; + iwqp->cm_id = NULL; + /*When term timer expires after cm_timer, don't want + *terminate-handler to issue cm_disconn which can re-free + *a QP even after its refcnt=0. + */ + del_timer(&iwqp->terminate_timer); + if (!iwqp->flush_issued) { + iwqp->flush_issued = 1; + issue_flush = 1; + } + } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) || + ((original_ibqp_state == IB_QPS_RTS) && + (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { + issue_disconn = 1; + if (last_ae == I40IW_AE_LLP_CONNECTION_RESET) + disconn_status = -ECONNRESET; + } + + if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || + (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || + (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || + (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { + issue_close = 1; + iwqp->cm_id = NULL; + if (!iwqp->flush_issued) { + iwqp->flush_issued = 1; + issue_flush = 1; + } + } + + spin_unlock_irqrestore(&iwqp->lock, flags); + if (issue_flush && !iwqp->destroyed) { + /* Flush the queues */ + i40iw_flush_wqes(iwdev, iwqp); + + if (qp->term_flags) { + ibevent.device = iwqp->ibqp.device; + ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ? + IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR; + ibevent.element.qp = &iwqp->ibqp; + iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); + } + } + + if (cm_id && cm_id->event_handler) { + if (issue_disconn) { + ret = i40iw_send_cm_event(NULL, + cm_id, + IW_CM_EVENT_DISCONNECT, + disconn_status); + + if (ret) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "disconnect event failed %s: - cm_id = %p\n", + __func__, cm_id); + } + if (issue_close) { + i40iw_qp_disconnect(iwqp); + cm_id->provider_data = iwqp; + ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0); + if (ret) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "close event failed %s: - cm_id = %p\n", + __func__, cm_id); + cm_id->rem_ref(cm_id); + } + } +} + +/** + * i40iw_disconnect_worker - worker for connection close + * @work: points or disconn structure + */ +static void i40iw_disconnect_worker(struct work_struct *work) +{ + struct disconn_work *dwork = container_of(work, struct disconn_work, work); + struct i40iw_qp *iwqp = dwork->iwqp; + + kfree(dwork); + i40iw_cm_disconn_true(iwqp); + i40iw_rem_ref(&iwqp->ibqp); +} + +/** + * i40iw_accept - registered call for connection to be accepted + * @cm_id: cm information for passive connection + * @conn_param: accpet parameters + */ +int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) +{ + struct ib_qp *ibqp; + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + struct i40iw_cm_node *cm_node; + struct ib_qp_attr attr; + int passive_state; + struct i40iw_ib_device *iwibdev; + struct ib_mr *ibmr; + struct i40iw_pd *iwpd; + u16 buf_len = 0; + struct i40iw_kmem_info accept; + enum i40iw_status_code status; + u64 tagged_offset; + + memset(&attr, 0, sizeof(attr)); + ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); + if (!ibqp) + return -EINVAL; + + iwqp = to_iwqp(ibqp); + iwdev = iwqp->iwdev; + dev = &iwdev->sc_dev; + cm_node = (struct i40iw_cm_node *)cm_id->provider_data; + + if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) { + cm_node->ipv4 = true; + cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr); + } else { + cm_node->ipv4 = false; + i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL); + } + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "Accept vlan_id=%d\n", + cm_node->vlan_id); + if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { + if (cm_node->loopbackpartner) + i40iw_rem_ref_cm_node(cm_node->loopbackpartner); + i40iw_rem_ref_cm_node(cm_node); + return -EINVAL; + } + + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == I40IW_SEND_RESET_EVENT) { + i40iw_rem_ref_cm_node(cm_node); + return -ECONNRESET; + } + + cm_node->cm_core->stats_accepts++; + iwqp->cm_node = (void *)cm_node; + cm_node->iwqp = iwqp; + + buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN; + + status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1); + + if (status) + return -ENOMEM; + cm_node->pdata.size = conn_param->private_data_len; + accept.addr = iwqp->ietf_mem.va; + accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY); + memcpy(accept.addr + accept.size, conn_param->private_data, + conn_param->private_data_len); + + /* setup our first outgoing iWarp send WQE (the IETF frame response) */ + if ((cm_node->ipv4 && + !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) || + (!cm_node->ipv4 && + !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) { + iwibdev = iwdev->iwibdev; + iwpd = iwqp->iwpd; + tagged_offset = (uintptr_t)iwqp->ietf_mem.va; + ibmr = i40iw_reg_phys_mr(&iwpd->ibpd, + iwqp->ietf_mem.pa, + buf_len, + IB_ACCESS_LOCAL_WRITE, + &tagged_offset); + if (IS_ERR(ibmr)) { + i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem); + return -ENOMEM; + } + + ibmr->pd = &iwpd->ibpd; + ibmr->device = iwpd->ibpd.device; + iwqp->lsmm_mr = ibmr; + if (iwqp->page) + iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); + if (is_remote_ne020_or_chelsio(cm_node)) + dev->iw_priv_qp_ops->qp_send_lsmm( + &iwqp->sc_qp, + iwqp->ietf_mem.va, + (accept.size + conn_param->private_data_len), + ibmr->lkey); + else + dev->iw_priv_qp_ops->qp_send_lsmm( + &iwqp->sc_qp, + iwqp->ietf_mem.va, + (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN), + ibmr->lkey); + + } else { + if (iwqp->page) + iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); + i40iw_loopback_nop(&iwqp->sc_qp); + } + + if (iwqp->page) + kunmap(iwqp->page); + + iwqp->cm_id = cm_id; + cm_node->cm_id = cm_id; + + cm_id->provider_data = (void *)iwqp; + iwqp->active_conn = 0; + + cm_node->lsmm_size = accept.size + conn_param->private_data_len; + i40iw_cm_init_tsa_conn(iwqp, cm_node); + cm_id->add_ref(cm_id); + i40iw_add_ref(&iwqp->ibqp); + + i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0); + + attr.qp_state = IB_QPS_RTS; + cm_node->qhash_set = false; + i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + if (cm_node->loopbackpartner) { + cm_node->loopbackpartner->pdata.size = conn_param->private_data_len; + + /* copy entire MPA frame to our cm_node's frame */ + memcpy(cm_node->loopbackpartner->pdata_buf, + conn_param->private_data, + conn_param->private_data_len); + i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED); + } + + cm_node->accelerated = 1; + if (cm_node->accept_pend) { + if (!cm_node->listener) + i40iw_pr_err("cm_node->listener NULL for passive node\n"); + atomic_dec(&cm_node->listener->pend_accepts_cnt); + cm_node->accept_pend = 0; + } + return 0; +} + +/** + * i40iw_reject - registered call for connection to be rejected + * @cm_id: cm information for passive connection + * @pdata: private data to be sent + * @pdata_len: private data length + */ +int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) +{ + struct i40iw_device *iwdev; + struct i40iw_cm_node *cm_node; + struct i40iw_cm_node *loopback; + + cm_node = (struct i40iw_cm_node *)cm_id->provider_data; + loopback = cm_node->loopbackpartner; + cm_node->cm_id = cm_id; + cm_node->pdata.size = pdata_len; + + iwdev = to_iwdev(cm_id->device); + if (!iwdev) + return -EINVAL; + cm_node->cm_core->stats_rejects++; + + if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER) + return -EINVAL; + + if (loopback) { + memcpy(&loopback->pdata_buf, pdata, pdata_len); + loopback->pdata.size = pdata_len; + } + + return i40iw_cm_reject(cm_node, pdata, pdata_len); +} + +/** + * i40iw_connect - registered call for connection to be established + * @cm_id: cm information for passive connection + * @conn_param: Information about the connection + */ +int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) +{ + struct ib_qp *ibqp; + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_cm_node *cm_node; + struct i40iw_cm_info cm_info; + struct sockaddr_in *laddr; + struct sockaddr_in *raddr; + struct sockaddr_in6 *laddr6; + struct sockaddr_in6 *raddr6; + int apbvt_set = 0; + enum i40iw_status_code status; + + ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); + if (!ibqp) + return -EINVAL; + iwqp = to_iwqp(ibqp); + if (!iwqp) + return -EINVAL; + iwdev = to_iwdev(iwqp->ibqp.device); + if (!iwdev) + return -EINVAL; + + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; + + if (!(laddr->sin_port) || !(raddr->sin_port)) + return -EINVAL; + + iwqp->active_conn = 1; + iwqp->cm_id = NULL; + cm_id->provider_data = iwqp; + + /* set up the connection params for the node */ + if (cm_id->remote_addr.ss_family == AF_INET) { + cm_info.ipv4 = true; + memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr)); + memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr)); + cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); + cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr); + cm_info.loc_port = ntohs(laddr->sin_port); + cm_info.rem_port = ntohs(raddr->sin_port); + cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr); + } else { + cm_info.ipv4 = false; + i40iw_copy_ip_ntohl(cm_info.loc_addr, + laddr6->sin6_addr.in6_u.u6_addr32); + i40iw_copy_ip_ntohl(cm_info.rem_addr, + raddr6->sin6_addr.in6_u.u6_addr32); + cm_info.loc_port = ntohs(laddr6->sin6_port); + cm_info.rem_port = ntohs(raddr6->sin6_port); + i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL); + } + cm_info.cm_id = cm_id; + if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || + (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, + raddr6->sin6_addr.in6_u.u6_addr32, + sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { + status = i40iw_manage_qhash(iwdev, + &cm_info, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, + true); + if (status) + return -EINVAL; + } + status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD); + if (status) { + i40iw_manage_qhash(iwdev, + &cm_info, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + return -EINVAL; + } + + apbvt_set = 1; + cm_id->add_ref(cm_id); + cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, + conn_param->private_data_len, + (void *)conn_param->private_data, + &cm_info); + if (!cm_node) { + i40iw_manage_qhash(iwdev, + &cm_info, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + + if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core, + cm_info.loc_port)) + i40iw_manage_apbvt(iwdev, + cm_info.loc_port, + I40IW_MANAGE_APBVT_DEL); + cm_id->rem_ref(cm_id); + iwdev->cm_core.stats_connect_errs++; + return -ENOMEM; + } + + i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); + if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && + !cm_node->ord_size) + cm_node->ord_size = 1; + + cm_node->apbvt_set = apbvt_set; + cm_node->qhash_set = true; + iwqp->cm_node = cm_node; + cm_node->iwqp = iwqp; + iwqp->cm_id = cm_id; + i40iw_add_ref(&iwqp->ibqp); + return 0; +} + +/** + * i40iw_create_listen - registered call creating listener + * @cm_id: cm information for passive connection + * @backlog: to max accept pending count + */ +int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog) +{ + struct i40iw_device *iwdev; + struct i40iw_cm_listener *cm_listen_node; + struct i40iw_cm_info cm_info; + enum i40iw_status_code ret; + struct sockaddr_in *laddr; + struct sockaddr_in6 *laddr6; + bool wildcard = false; + + iwdev = to_iwdev(cm_id->device); + if (!iwdev) + return -EINVAL; + + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + memset(&cm_info, 0, sizeof(cm_info)); + if (laddr->sin_family == AF_INET) { + cm_info.ipv4 = true; + cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); + cm_info.loc_port = ntohs(laddr->sin_port); + + if (laddr->sin_addr.s_addr != INADDR_ANY) + cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr); + else + wildcard = true; + + } else { + cm_info.ipv4 = false; + i40iw_copy_ip_ntohl(cm_info.loc_addr, + laddr6->sin6_addr.in6_u.u6_addr32); + cm_info.loc_port = ntohs(laddr6->sin6_port); + if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) + i40iw_netdev_vlan_ipv6(cm_info.loc_addr, + &cm_info.vlan_id, + NULL); + else + wildcard = true; + } + cm_info.backlog = backlog; + cm_info.cm_id = cm_id; + + cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info); + if (!cm_listen_node) { + i40iw_pr_err("cm_listen_node == NULL\n"); + return -ENOMEM; + } + + cm_id->provider_data = cm_listen_node; + + if (!cm_listen_node->reused_node) { + if (wildcard) { + if (cm_info.ipv4) + ret = i40iw_add_mqh_4(iwdev, + &cm_info, + cm_listen_node); + else + ret = i40iw_add_mqh_6(iwdev, + &cm_info, + cm_listen_node); + if (ret) + goto error; + + ret = i40iw_manage_apbvt(iwdev, + cm_info.loc_port, + I40IW_MANAGE_APBVT_ADD); + + if (ret) + goto error; + } else { + ret = i40iw_manage_qhash(iwdev, + &cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, + true); + if (ret) + goto error; + cm_listen_node->qhash_set = true; + ret = i40iw_manage_apbvt(iwdev, + cm_info.loc_port, + I40IW_MANAGE_APBVT_ADD); + if (ret) + goto error; + } + } + cm_id->add_ref(cm_id); + cm_listen_node->cm_core->stats_listen_created++; + return 0; + error: + i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false); + return -EINVAL; +} + +/** + * i40iw_destroy_listen - registered call to destroy listener + * @cm_id: cm information for passive connection + */ +int i40iw_destroy_listen(struct iw_cm_id *cm_id) +{ + struct i40iw_device *iwdev; + + iwdev = to_iwdev(cm_id->device); + if (cm_id->provider_data) + i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true); + else + i40iw_pr_err("cm_id->provider_data was NULL\n"); + + cm_id->rem_ref(cm_id); + + return 0; +} + +/** + * i40iw_cm_event_connected - handle connected active node + * @event: the info for cm_node of connection + */ +static void i40iw_cm_event_connected(struct i40iw_cm_event *event) +{ + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_cm_node *cm_node; + struct i40iw_sc_dev *dev; + struct ib_qp_attr attr; + struct iw_cm_id *cm_id; + int status; + bool read0; + + cm_node = event->cm_node; + cm_id = cm_node->cm_id; + iwqp = (struct i40iw_qp *)cm_id->provider_data; + iwdev = to_iwdev(iwqp->ibqp.device); + dev = &iwdev->sc_dev; + + if (iwqp->destroyed) { + status = -ETIMEDOUT; + goto error; + } + i40iw_cm_init_tsa_conn(iwqp, cm_node); + read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO); + if (iwqp->page) + iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); + dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0); + if (iwqp->page) + kunmap(iwqp->page); + status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0); + if (status) + i40iw_pr_err("send cm event\n"); + + memset(&attr, 0, sizeof(attr)); + attr.qp_state = IB_QPS_RTS; + cm_node->qhash_set = false; + i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + + cm_node->accelerated = 1; + if (cm_node->accept_pend) { + if (!cm_node->listener) + i40iw_pr_err("listener is null for passive node\n"); + atomic_dec(&cm_node->listener->pend_accepts_cnt); + cm_node->accept_pend = 0; + } + return; + +error: + iwqp->cm_id = NULL; + cm_id->provider_data = NULL; + i40iw_send_cm_event(event->cm_node, + cm_id, + IW_CM_EVENT_CONNECT_REPLY, + status); + cm_id->rem_ref(cm_id); + i40iw_rem_ref_cm_node(event->cm_node); +} + +/** + * i40iw_cm_event_reset - handle reset + * @event: the info for cm_node of connection + */ +static void i40iw_cm_event_reset(struct i40iw_cm_event *event) +{ + struct i40iw_cm_node *cm_node = event->cm_node; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct i40iw_qp *iwqp; + + if (!cm_id) + return; + + iwqp = cm_id->provider_data; + if (!iwqp) + return; + + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "reset event %p - cm_id = %p\n", + event->cm_node, cm_id); + iwqp->cm_id = NULL; + + i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET); + i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0); +} + +/** + * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer + * @work: pointer of cm event info. + */ +static void i40iw_cm_event_handler(struct work_struct *work) +{ + struct i40iw_cm_event *event = container_of(work, + struct i40iw_cm_event, + event_work); + struct i40iw_cm_node *cm_node; + + if (!event || !event->cm_node || !event->cm_node->cm_core) + return; + + cm_node = event->cm_node; + + switch (event->type) { + case I40IW_CM_EVENT_MPA_REQ: + i40iw_send_cm_event(cm_node, + cm_node->cm_id, + IW_CM_EVENT_CONNECT_REQUEST, + 0); + break; + case I40IW_CM_EVENT_RESET: + i40iw_cm_event_reset(event); + break; + case I40IW_CM_EVENT_CONNECTED: + if (!event->cm_node->cm_id || + (event->cm_node->state != I40IW_CM_STATE_OFFLOADED)) + break; + i40iw_cm_event_connected(event); + break; + case I40IW_CM_EVENT_MPA_REJECT: + if (!event->cm_node->cm_id || + (cm_node->state == I40IW_CM_STATE_OFFLOADED)) + break; + i40iw_send_cm_event(cm_node, + cm_node->cm_id, + IW_CM_EVENT_CONNECT_REPLY, + -ECONNREFUSED); + break; + case I40IW_CM_EVENT_ABORTED: + if (!event->cm_node->cm_id || + (event->cm_node->state == I40IW_CM_STATE_OFFLOADED)) + break; + i40iw_event_connect_error(event); + break; + default: + i40iw_pr_err("event type = %d\n", event->type); + break; + } + + event->cm_info.cm_id->rem_ref(event->cm_info.cm_id); + i40iw_rem_ref_cm_node(event->cm_node); + kfree(event); +} + +/** + * i40iw_cm_post_event - queue event request for worker thread + * @event: cm node's info for up event call + */ +static void i40iw_cm_post_event(struct i40iw_cm_event *event) +{ + atomic_inc(&event->cm_node->ref_count); + event->cm_info.cm_id->add_ref(event->cm_info.cm_id); + INIT_WORK(&event->event_work, i40iw_cm_event_handler); + + queue_work(event->cm_node->cm_core->event_wq, &event->event_work); +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h new file mode 100644 index 000000000..5f8ceb4a8 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h @@ -0,0 +1,456 @@ +/******************************************************************************* +* +* Copyright (c) 2015 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_CM_H +#define I40IW_CM_H + +#define QUEUE_EVENTS + +#define I40IW_MANAGE_APBVT_DEL 0 +#define I40IW_MANAGE_APBVT_ADD 1 + +#define I40IW_MPA_REQUEST_ACCEPT 1 +#define I40IW_MPA_REQUEST_REJECT 2 + +/* IETF MPA -- defines, enums, structs */ +#define IEFT_MPA_KEY_REQ "MPA ID Req Frame" +#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" +#define IETF_MPA_KEY_SIZE 16 +#define IETF_MPA_VERSION 1 +#define IETF_MAX_PRIV_DATA_LEN 512 +#define IETF_MPA_FRAME_SIZE 20 +#define IETF_RTR_MSG_SIZE 4 +#define IETF_MPA_V2_FLAG 0x10 +#define SNDMARKER_SEQNMASK 0x000001FF + +#define I40IW_MAX_IETF_SIZE 32 + +#define MPA_ZERO_PAD_LEN 4 + +/* IETF RTR MSG Fields */ +#define IETF_PEER_TO_PEER 0x8000 +#define IETF_FLPDU_ZERO_LEN 0x4000 +#define IETF_RDMA0_WRITE 0x8000 +#define IETF_RDMA0_READ 0x4000 +#define IETF_NO_IRD_ORD 0x3FFF + +/* HW-supported IRD sizes*/ +#define I40IW_HW_IRD_SETTING_2 2 +#define I40IW_HW_IRD_SETTING_4 4 +#define I40IW_HW_IRD_SETTING_8 8 +#define I40IW_HW_IRD_SETTING_16 16 +#define I40IW_HW_IRD_SETTING_32 32 +#define I40IW_HW_IRD_SETTING_64 64 + +enum ietf_mpa_flags { + IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ + IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ + IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ +}; + +struct ietf_mpa_v1 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + u8 priv_data[0]; +}; + +#define ietf_mpa_req_resp_frame ietf_mpa_frame + +struct ietf_rtr_msg { + __be16 ctrl_ird; + __be16 ctrl_ord; +}; + +struct ietf_mpa_v2 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + struct ietf_rtr_msg rtr_msg; + u8 priv_data[0]; +}; + +struct i40iw_cm_node; +enum i40iw_timer_type { + I40IW_TIMER_TYPE_SEND, + I40IW_TIMER_TYPE_RECV, + I40IW_TIMER_NODE_CLEANUP, + I40IW_TIMER_TYPE_CLOSE, +}; + +#define I40IW_PASSIVE_STATE_INDICATED 0 +#define I40IW_DO_NOT_SEND_RESET_EVENT 1 +#define I40IW_SEND_RESET_EVENT 2 + +#define MAX_I40IW_IFS 4 + +#define SET_ACK 0x1 +#define SET_SYN 0x2 +#define SET_FIN 0x4 +#define SET_RST 0x8 + +#define TCP_OPTIONS_PADDING 3 + +struct option_base { + u8 optionnum; + u8 length; +}; + +enum option_numbers { + OPTION_NUMBER_END, + OPTION_NUMBER_NONE, + OPTION_NUMBER_MSS, + OPTION_NUMBER_WINDOW_SCALE, + OPTION_NUMBER_SACK_PERM, + OPTION_NUMBER_SACK, + OPTION_NUMBER_WRITE0 = 0xbc +}; + +struct option_mss { + u8 optionnum; + u8 length; + __be16 mss; +}; + +struct option_windowscale { + u8 optionnum; + u8 length; + u8 shiftcount; +}; + +union all_known_options { + char as_end; + struct option_base as_base; + struct option_mss as_mss; + struct option_windowscale as_windowscale; +}; + +struct i40iw_timer_entry { + struct list_head list; + unsigned long timetosend; /* jiffies */ + struct i40iw_puda_buf *sqbuf; + u32 type; + u32 retrycount; + u32 retranscount; + u32 context; + u32 send_retrans; + int close_when_complete; +}; + +#define I40IW_DEFAULT_RETRYS 64 +#define I40IW_DEFAULT_RETRANS 8 +#define I40IW_DEFAULT_TTL 0x40 +#define I40IW_DEFAULT_RTT_VAR 0x6 +#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF +#define I40IW_DEFAULT_REXMIT_THRESH 8 + +#define I40IW_RETRY_TIMEOUT HZ +#define I40IW_SHORT_TIME 10 +#define I40IW_LONG_TIME (2 * HZ) +#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ)) + +#define I40IW_CM_HASHTABLE_SIZE 1024 +#define I40IW_CM_TCP_TIMER_INTERVAL 3000 +#define I40IW_CM_DEFAULT_MTU 1540 +#define I40IW_CM_DEFAULT_FRAME_CNT 10 +#define I40IW_CM_THREAD_STACK_SIZE 256 +#define I40IW_CM_DEFAULT_RCV_WND 64240 +#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc +#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2 +#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A +#define I40IW_CM_FREE_PKT_LO_WATERMARK 2 + +#define I40IW_CM_DEFAULT_MSS 536 + +#define I40IW_CM_DEF_SEQ 0x159bf75f +#define I40IW_CM_DEF_LOCAL_ID 0x3b47 + +#define I40IW_CM_DEF_SEQ2 0x18ed5740 +#define I40IW_CM_DEF_LOCAL_ID2 0xb807 +#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN) + +typedef u32 i40iw_addr_t; + +#define i40iw_cm_tsa_context i40iw_qp_context + +struct i40iw_qp; + +/* cm node transition states */ +enum i40iw_cm_node_state { + I40IW_CM_STATE_UNKNOWN, + I40IW_CM_STATE_INITED, + I40IW_CM_STATE_LISTENING, + I40IW_CM_STATE_SYN_RCVD, + I40IW_CM_STATE_SYN_SENT, + I40IW_CM_STATE_ONE_SIDE_ESTABLISHED, + I40IW_CM_STATE_ESTABLISHED, + I40IW_CM_STATE_ACCEPTING, + I40IW_CM_STATE_MPAREQ_SENT, + I40IW_CM_STATE_MPAREQ_RCVD, + I40IW_CM_STATE_MPAREJ_RCVD, + I40IW_CM_STATE_OFFLOADED, + I40IW_CM_STATE_FIN_WAIT1, + I40IW_CM_STATE_FIN_WAIT2, + I40IW_CM_STATE_CLOSE_WAIT, + I40IW_CM_STATE_TIME_WAIT, + I40IW_CM_STATE_LAST_ACK, + I40IW_CM_STATE_CLOSING, + I40IW_CM_STATE_LISTENER_DESTROYED, + I40IW_CM_STATE_CLOSED +}; + +enum mpa_frame_version { + IETF_MPA_V1 = 1, + IETF_MPA_V2 = 2 +}; + +enum mpa_frame_key { + MPA_KEY_REQUEST, + MPA_KEY_REPLY +}; + +enum send_rdma0 { + SEND_RDMA_READ_ZERO = 1, + SEND_RDMA_WRITE_ZERO = 2 +}; + +enum i40iw_tcpip_pkt_type { + I40IW_PKT_TYPE_UNKNOWN, + I40IW_PKT_TYPE_SYN, + I40IW_PKT_TYPE_SYNACK, + I40IW_PKT_TYPE_ACK, + I40IW_PKT_TYPE_FIN, + I40IW_PKT_TYPE_RST +}; + +/* CM context params */ +struct i40iw_cm_tcp_context { + u8 client; + + u32 loc_seq_num; + u32 loc_ack_num; + u32 rem_ack_num; + u32 rcv_nxt; + + u32 loc_id; + u32 rem_id; + + u32 snd_wnd; + u32 max_snd_wnd; + + u32 rcv_wnd; + u32 mss; + u8 snd_wscale; + u8 rcv_wscale; + + struct timeval sent_ts; +}; + +enum i40iw_cm_listener_state { + I40IW_CM_LISTENER_PASSIVE_STATE = 1, + I40IW_CM_LISTENER_ACTIVE_STATE = 2, + I40IW_CM_LISTENER_EITHER_STATE = 3 +}; + +struct i40iw_cm_listener { + struct list_head list; + struct i40iw_cm_core *cm_core; + u8 loc_mac[ETH_ALEN]; + u32 loc_addr[4]; + u16 loc_port; + u32 map_loc_addr[4]; + u16 map_loc_port; + struct iw_cm_id *cm_id; + atomic_t ref_count; + struct i40iw_device *iwdev; + atomic_t pend_accepts_cnt; + int backlog; + enum i40iw_cm_listener_state listener_state; + u32 reused_node; + u8 user_pri; + u16 vlan_id; + bool qhash_set; + bool ipv4; + struct list_head child_listen_list; + +}; + +struct i40iw_kmem_info { + void *addr; + u32 size; +}; + +/* per connection node and node state information */ +struct i40iw_cm_node { + u32 loc_addr[4], rem_addr[4]; + u16 loc_port, rem_port; + u32 map_loc_addr[4], map_rem_addr[4]; + u16 map_loc_port, map_rem_port; + u16 vlan_id; + enum i40iw_cm_node_state state; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + atomic_t ref_count; + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + struct i40iw_cm_tcp_context tcp_cntxt; + struct i40iw_cm_core *cm_core; + struct i40iw_cm_node *loopbackpartner; + struct i40iw_timer_entry *send_entry; + struct i40iw_timer_entry *close_entry; + spinlock_t retrans_list_lock; /* cm transmit packet */ + enum send_rdma0 send_rdma0_op; + u16 ird_size; + u16 ord_size; + u16 mpav2_ird_ord; + struct iw_cm_id *cm_id; + struct list_head list; + int accelerated; + struct i40iw_cm_listener *listener; + int apbvt_set; + int accept_pend; + struct list_head timer_entry; + struct list_head reset_entry; + atomic_t passive_state; + bool qhash_set; + u8 user_pri; + bool ipv4; + bool snd_mark_en; + u16 lsmm_size; + enum mpa_frame_version mpa_frame_rev; + struct i40iw_kmem_info pdata; + union { + struct ietf_mpa_v1 mpa_frame; + struct ietf_mpa_v2 mpa_v2_frame; + }; + + u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN]; + struct i40iw_kmem_info mpa_hdr; +}; + +/* structure for client or CM to fill when making CM api calls. */ +/* - only need to set relevant data, based on op. */ +struct i40iw_cm_info { + struct iw_cm_id *cm_id; + u16 loc_port; + u16 rem_port; + u32 loc_addr[4]; + u32 rem_addr[4]; + u16 map_loc_port; + u16 map_rem_port; + u32 map_loc_addr[4]; + u32 map_rem_addr[4]; + u16 vlan_id; + int backlog; + u16 user_pri; + bool ipv4; +}; + +/* CM event codes */ +enum i40iw_cm_event_type { + I40IW_CM_EVENT_UNKNOWN, + I40IW_CM_EVENT_ESTABLISHED, + I40IW_CM_EVENT_MPA_REQ, + I40IW_CM_EVENT_MPA_CONNECT, + I40IW_CM_EVENT_MPA_ACCEPT, + I40IW_CM_EVENT_MPA_REJECT, + I40IW_CM_EVENT_MPA_ESTABLISHED, + I40IW_CM_EVENT_CONNECTED, + I40IW_CM_EVENT_RESET, + I40IW_CM_EVENT_ABORTED +}; + +/* event to post to CM event handler */ +struct i40iw_cm_event { + enum i40iw_cm_event_type type; + struct i40iw_cm_info cm_info; + struct work_struct event_work; + struct i40iw_cm_node *cm_node; +}; + +struct i40iw_cm_core { + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + + struct list_head listen_nodes; + struct list_head connected_nodes; + + struct timer_list tcp_timer; + + struct workqueue_struct *event_wq; + struct workqueue_struct *disconn_wq; + + spinlock_t ht_lock; /* manage hash table */ + spinlock_t listen_list_lock; /* listen list */ + + u64 stats_nodes_created; + u64 stats_nodes_destroyed; + u64 stats_listen_created; + u64 stats_listen_destroyed; + u64 stats_listen_nodes_created; + u64 stats_listen_nodes_destroyed; + u64 stats_loopbacks; + u64 stats_accepts; + u64 stats_rejects; + u64 stats_connect_errs; + u64 stats_passive_errs; + u64 stats_pkt_retrans; + u64 stats_backlog_drops; +}; + +int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *sqbuf, + enum i40iw_timer_type type, + int send_retrans, + int close_when_complete); + +int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *); +int i40iw_reject(struct iw_cm_id *, const void *, u8); +int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *); +int i40iw_create_listen(struct iw_cm_id *, int); +int i40iw_destroy_listen(struct iw_cm_id *); + +int i40iw_cm_start(struct i40iw_device *); +int i40iw_cm_stop(struct i40iw_device *); + +int i40iw_arp_table(struct i40iw_device *iwdev, + u32 *ip_addr, + bool ipv4, + u8 *mac_addr, + u32 action); + +#endif /* I40IW_CM_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c new file mode 100644 index 000000000..f05802bf6 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -0,0 +1,4743 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" + +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_vf.h" +#include "i40iw_virtchnl.h" + +/** + * i40iw_insert_wqe_hdr - write wqe header + * @wqe: cqp wqe for header + * @header: header for the cqp wqe + */ +static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) +{ + wmb(); /* make sure WQE is populated before polarity is set */ + set_64bit_val(wqe, 24, header); +} + +/** + * i40iw_get_cqp_reg_info - get head and tail for cqp using registers + * @cqp: struct for cqp hw + * @val: cqp tail register value + * @tail:wqtail register value + * @error: cqp processing err + */ +static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp, + u32 *val, + u32 *tail, + u32 *error) +{ + if (cqp->dev->is_pf) { + *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL); + *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL); + *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR); + } else { + *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1); + *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL); + *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR); + } +} + +/** + * i40iw_cqp_poll_registers - poll cqp registers + * @cqp: struct for cqp hw + * @tail:wqtail register value + * @count: how many times to try for completion + */ +static enum i40iw_status_code i40iw_cqp_poll_registers( + struct i40iw_sc_cqp *cqp, + u32 tail, + u32 count) +{ + u32 i = 0; + u32 newtail, error, val; + + while (i < count) { + i++; + i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error); + if (error) { + error = (cqp->dev->is_pf) ? + i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) : + i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); + return I40IW_ERR_CQP_COMPL_ERROR; + } + if (newtail != tail) { + /* SUCCESS */ + I40IW_RING_MOVE_TAIL(cqp->sq_ring); + return 0; + } + udelay(I40IW_SLEEP_COUNT); + } + return I40IW_ERR_TIMEOUT; +} + +/** + * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer + * @buf: ptr to fpm commit buffer + * @info: ptr to i40iw_hmc_obj_info struct + * + * parses fpm commit info and copy base value + * of hmc objects in hmc_info + */ +static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( + u64 *buf, + struct i40iw_hmc_obj_info *info) +{ + u64 temp; + u32 i, j; + u32 low; + + /* copy base values in obj_info */ + for (i = I40IW_HMC_IW_QP, j = 0; + i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + get_64bit_val(buf, j, &temp); + info[i].base = RS_64_1(temp, 32) * 512; + low = (u32)(temp); + if (low) + info[i].cnt = low; + } + return 0; +} + +/** + * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer + * @buf: ptr to fpm query buffer + * @info: ptr to i40iw_hmc_obj_info struct + * @hmc_fpm_misc: ptr to fpm data + * + * parses fpm query buffer and copy max_cnt and + * size value of hmc objects in hmc_info + */ +static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( + u64 *buf, + struct i40iw_hmc_info *hmc_info, + struct i40iw_hmc_fpm_misc *hmc_fpm_misc) +{ + u64 temp; + struct i40iw_hmc_obj_info *obj_info; + u32 i, j, size; + u16 max_pe_sds; + + obj_info = hmc_info->hmc_obj; + + get_64bit_val(buf, 0, &temp); + hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX); + max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS); + + /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */ + if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID) + max_pe_sds--; + hmc_fpm_misc->max_sds = max_pe_sds; + hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; + + for (i = I40IW_HMC_IW_QP, j = 8; + i <= I40IW_HMC_IW_ARP; i++, j += 8) { + get_64bit_val(buf, j, &temp); + if (i == I40IW_HMC_IW_QP) + obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + else if (i == I40IW_HMC_IW_CQ) + obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + else + obj_info[i].max_cnt = (u32)temp; + + size = (u32)RS_64_1(temp, 32); + obj_info[i].size = ((u64)1 << size); + } + for (i = I40IW_HMC_IW_MR, j = 48; + i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + get_64bit_val(buf, j, &temp); + obj_info[i].max_cnt = (u32)temp; + size = (u32)RS_64_1(temp, 32); + obj_info[i].size = LS_64_1(1, size); + } + + get_64bit_val(buf, 120, &temp); + hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); + get_64bit_val(buf, 120, &temp); + hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); + get_64bit_val(buf, 120, &temp); + hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); + get_64bit_val(buf, 64, &temp); + hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); + if (!hmc_fpm_misc->xf_block_size) + return I40IW_ERR_INVALID_SIZE; + get_64bit_val(buf, 80, &temp); + hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); + if (!hmc_fpm_misc->q1_block_size) + return I40IW_ERR_INVALID_SIZE; + return 0; +} + +/** + * i40iw_sc_pd_init - initialize sc pd struct + * @dev: sc device struct + * @pd: sc pd ptr + * @pd_id: pd_id for allocated pd + */ +static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev, + struct i40iw_sc_pd *pd, + u16 pd_id) +{ + pd->size = sizeof(*pd); + pd->pd_id = pd_id; + pd->dev = dev; +} + +/** + * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size + * @wqsize: size of the wq (sq, rq, srq) to encoded_size + * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's + */ +u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq) +{ + u8 encoded_size = 0; + + /* cqp sq's hw coded value starts from 1 for size of 4 + * while it starts from 0 for qp' wq's. + */ + if (cqpsq) + encoded_size = 1; + wqsize >>= 2; + while (wqsize >>= 1) + encoded_size++; + return encoded_size; +} + +/** + * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair + * @cqp: IWARP control queue pair pointer + * @info: IWARP control queue pair init info pointer + * + * Initializes the object and context buffers for a control Queue Pair. + */ +static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp, + struct i40iw_cqp_init_info *info) +{ + u8 hw_sq_size; + + if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) || + (info->sq_size < I40IW_CQP_SW_SQSIZE_4) || + ((info->sq_size & (info->sq_size - 1)))) + return I40IW_ERR_INVALID_SIZE; + + hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true); + cqp->size = sizeof(*cqp); + cqp->sq_size = info->sq_size; + cqp->hw_sq_size = hw_sq_size; + cqp->sq_base = info->sq; + cqp->host_ctx = info->host_ctx; + cqp->sq_pa = info->sq_pa; + cqp->host_ctx_pa = info->host_ctx_pa; + cqp->dev = info->dev; + cqp->struct_ver = info->struct_ver; + cqp->scratch_array = info->scratch_array; + cqp->polarity = 0; + cqp->en_datacenter_tcp = info->en_datacenter_tcp; + cqp->enabled_vf_count = info->enabled_vf_count; + cqp->hmc_profile = info->hmc_profile; + info->dev->cqp = cqp; + + I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size); + i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, + "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n", + __func__, cqp->sq_size, cqp->hw_sq_size, + cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity); + return 0; +} + +/** + * i40iw_sc_cqp_create - create cqp during bringup + * @cqp: struct for cqp hw + * @disable_pfpdus: if pfpdu to be disabled + * @maj_err: If error, major err number + * @min_err: If error, minor err number + */ +static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, + bool disable_pfpdus, + u16 *maj_err, + u16 *min_err) +{ + u64 temp; + u32 cnt = 0, p1, p2, val = 0, err_code; + enum i40iw_status_code ret_code; + + ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, + &cqp->sdbuf, + 128, + I40IW_SD_BUF_ALIGNMENT); + + if (ret_code) + goto exit; + + temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) | + LS_64(cqp->struct_ver, I40IW_CQPHC_SVER); + + if (disable_pfpdus) + temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS); + + set_64bit_val(cqp->host_ctx, 0, temp); + set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa); + temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) | + LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE); + set_64bit_val(cqp->host_ctx, 16, temp); + set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp); + set_64bit_val(cqp->host_ctx, 32, 0); + set_64bit_val(cqp->host_ctx, 40, 0); + set_64bit_val(cqp->host_ctx, 48, 0); + set_64bit_val(cqp->host_ctx, 56, 0); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX", + cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8); + + p1 = RS_32_1(cqp->host_ctx_pa, 32); + p2 = (u32)cqp->host_ctx_pa; + + if (cqp->dev->is_pf) { + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1); + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2); + } else { + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1); + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2); + } + do { + if (cnt++ > I40IW_DONE_COUNT) { + i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); + ret_code = I40IW_ERR_TIMEOUT; + /* + * read PFPE_CQPERRORCODES register to get the minor + * and major error code + */ + if (cqp->dev->is_pf) + err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES); + else + err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); + *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE); + *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE); + goto exit; + } + udelay(I40IW_SLEEP_COUNT); + if (cqp->dev->is_pf) + val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS); + else + val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1); + } while (!val); + +exit: + if (!ret_code) + cqp->process_cqp_sds = i40iw_update_sds_noccq; + return ret_code; +} + +/** + * i40iw_sc_cqp_post_sq - post of cqp's sq + * @cqp: struct for cqp hw + */ +void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) +{ + if (cqp->dev->is_pf) + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); + else + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); + + i40iw_debug(cqp->dev, + I40IW_DEBUG_WQE, + "%s: HEAD_TAIL[%04d,%04d,%04d]\n", + __func__, + cqp->sq_ring.head, + cqp->sq_ring.tail, + cqp->sq_ring.size); +} + +/** + * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @wqe_idx: we index of cqp ring + */ +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +{ + u64 *wqe = NULL; + u32 wqe_idx; + enum i40iw_status_code ret_code; + + if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { + i40iw_debug(cqp->dev, + I40IW_DEBUG_WQE, + "%s: ring is full head %x tail %x size %x\n", + __func__, + cqp->sq_ring.head, + cqp->sq_ring.tail, + cqp->sq_ring.size); + return NULL; + } + I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code); + if (ret_code) + return NULL; + if (!wqe_idx) + cqp->polarity = !cqp->polarity; + + wqe = cqp->sq_base[wqe_idx].elem; + cqp->scratch_array[wqe_idx] = scratch; + I40IW_CQP_INIT_WQE(wqe); + + return wqe; +} + +/** + * i40iw_sc_cqp_destroy - destroy cqp during close + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp) +{ + u32 cnt = 0, val = 1; + enum i40iw_status_code ret_code = 0; + u32 cqpstat_addr; + + if (cqp->dev->is_pf) { + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0); + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0); + cqpstat_addr = I40E_PFPE_CCQPSTATUS; + } else { + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0); + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0); + cqpstat_addr = I40E_VFPE_CCQPSTATUS1; + } + do { + if (cnt++ > I40IW_DONE_COUNT) { + ret_code = I40IW_ERR_TIMEOUT; + break; + } + udelay(I40IW_SLEEP_COUNT); + val = i40iw_rd32(cqp->dev->hw, cqpstat_addr); + } while (val); + + i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); + return ret_code; +} + +/** + * i40iw_sc_ccq_arm - enable intr for control cq + * @ccq: ccq sc struct + */ +static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next_se; + u8 arm_seq_num; + + /* write to cq doorbell shadow area */ + /* arm next se should always be zero */ + get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val); + + sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT); + arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE); + + arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM); + arm_seq_num++; + + temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) | + LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) | + LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) | + LS_64(1, I40IW_CQ_DBSA_ARM_NEXT); + + set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val); + + wmb(); /* make sure shadow area is updated before arming */ + + if (ccq->dev->is_pf) + i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id); + else + i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id); +} + +/** + * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry + * @ccq: ccq sc struct + * @info: completion q entry to return + */ +static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info( + struct i40iw_sc_cq *ccq, + struct i40iw_ccq_cqe_info *info) +{ + u64 qp_ctx, temp, temp1; + u64 *cqe; + struct i40iw_sc_cqp *cqp; + u32 wqe_idx; + u8 polarity; + enum i40iw_status_code ret_code = 0; + + if (ccq->cq_uk.avoid_mem_cflct) + cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk); + else + cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk); + + get_64bit_val(cqe, 24, &temp); + polarity = (u8)RS_64(temp, I40IW_CQ_VALID); + if (polarity != ccq->cq_uk.polarity) + return I40IW_ERR_QUEUE_EMPTY; + + get_64bit_val(cqe, 8, &qp_ctx); + cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx; + info->error = (bool)RS_64(temp, I40IW_CQ_ERROR); + info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR); + if (info->error) { + info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR); + info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR); + } + wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX); + info->scratch = cqp->scratch_array[wqe_idx]; + + get_64bit_val(cqe, 16, &temp1); + info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL); + get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1); + info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE); + info->cqp = cqp; + + /* move the head for cq */ + I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); + if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0) + ccq->cq_uk.polarity ^= 1; + + /* update cq tail in cq shadow memory also */ + I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); + set_64bit_val(ccq->cq_uk.shadow_area, + 0, + I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring)); + wmb(); /* write shadow area before tail */ + I40IW_RING_MOVE_TAIL(cqp->sq_ring); + return ret_code; +} + +/** + * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ + * @cqp: struct for cqp hw + * @op_code: cqp opcode for completion + * @info: completion q entry to return + */ +static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done( + struct i40iw_sc_cqp *cqp, + u8 op_code, + struct i40iw_ccq_cqe_info *compl_info) +{ + struct i40iw_ccq_cqe_info info; + struct i40iw_sc_cq *ccq; + enum i40iw_status_code ret_code = 0; + u32 cnt = 0; + + memset(&info, 0, sizeof(info)); + ccq = cqp->dev->ccq; + while (1) { + if (cnt++ > I40IW_DONE_COUNT) + return I40IW_ERR_TIMEOUT; + + if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) { + udelay(I40IW_SLEEP_COUNT); + continue; + } + + if (info.error) { + ret_code = I40IW_ERR_CQP_COMPL_ERROR; + break; + } + /* check if opcode is cq create */ + if (op_code != info.op_code) { + i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, + "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n", + __func__, op_code, info.op_code); + } + /* success, exit out of the loop */ + if (op_code == info.op_code) + break; + } + + if (compl_info) + memcpy(compl_info, &info, sizeof(*compl_info)); + + return ret_code; +} + +/** + * i40iw_sc_manage_push_page - Handle push page + * @cqp: struct for cqp hw + * @info: push page info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_manage_push_page( + struct i40iw_sc_cqp *cqp, + struct i40iw_cqp_manage_push_page_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT) + return I40IW_ERR_INVALID_PUSH_PAGE_INDEX; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, info->qs_handle); + + header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) | + LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | + LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_manage_hmc_pm_func_table - manage of function table + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @vf_index: vf index for cqp + * @free_pm_fcn: function number + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 vf_index, + bool free_pm_fcn, + bool post_sq) +{ + u64 *wqe; + u64 header; + + if (vf_index >= I40IW_MAX_VF_PER_PF) + return I40IW_ERR_INVALID_VF_ID; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) | + LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) | + LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_profile_type: type of profile to set + * @vf_num: vf number for profile + * @post_sq: flag for cqp db to ring + * @poll_registers: flag to poll register for cqp completion + */ +static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_profile_type, + u8 vf_num, bool post_sq, + bool poll_registers) +{ + u64 *wqe; + u64 header; + u32 val, tail, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, + (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) | + LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM))); + + header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + if (poll_registers) + ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000); + else + ret_code = i40iw_sc_poll_for_cqp_op_done(cqp, + I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, + NULL); + } + + return ret_code; +} + +/** + * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL); +} + +/** + * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL); +} + +/** + * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_fn_id: hmc function id + * @commit_fpm_mem; Memory for fpm values + * @post_sq: flag for cqp db to ring + * @wait_type: poll ccq or cqp registers for cqp completion + */ +static enum i40iw_status_code i40iw_sc_commit_fpm_values( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_fn_id, + struct i40iw_dma_mem *commit_fpm_mem, + bool post_sq, + u8 wait_type) +{ + u64 *wqe; + u64 header; + u32 tail, val, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, hmc_fn_id); + set_64bit_val(wqe, 32, commit_fpm_mem->pa); + + header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + + if (wait_type == I40IW_CQP_WAIT_POLL_REGS) + ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); + else if (wait_type == I40IW_CQP_WAIT_POLL_CQ) + ret_code = i40iw_sc_commit_fpm_values_done(cqp); + } + + return ret_code; +} + +/** + * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL); +} + +/** + * i40iw_sc_query_fpm_values - cqp wqe query fpm values + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_fn_id: hmc function id + * @query_fpm_mem: memory for return fpm values + * @post_sq: flag for cqp db to ring + * @wait_type: poll ccq or cqp registers for cqp completion + */ +static enum i40iw_status_code i40iw_sc_query_fpm_values( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_fn_id, + struct i40iw_dma_mem *query_fpm_mem, + bool post_sq, + u8 wait_type) +{ + u64 *wqe; + u64 header; + u32 tail, val, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, hmc_fn_id); + set_64bit_val(wqe, 32, query_fpm_mem->pa); + + header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + /* read the tail from CQP_TAIL register */ + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + if (wait_type == I40IW_CQP_WAIT_POLL_REGS) + ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); + else if (wait_type == I40IW_CQP_WAIT_POLL_CQ) + ret_code = i40iw_sc_query_fpm_values_done(cqp); + } + + return ret_code; +} + +/** + * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry + * @cqp: struct for cqp hw + * @info: arp entry information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_add_arp_cache_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_add_arp_cache_entry_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp, header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 8, info->reach_max); + + temp = info->mac_addr[5] | + LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | + LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | + LS_64_1(info->mac_addr[0], 40); + + set_64bit_val(wqe, 16, temp); + + header = info->arp_index | + LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | + LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) | + LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_del_arp_cache_entry - dele arp cache entry + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @arp_index: arp index to delete arp entry + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_del_arp_cache_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u16 arp_index, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + header = arp_index | + LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @arp_index: arp index to delete arp entry + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_query_arp_cache_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u16 arp_index, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + header = arp_index | + LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_MAT_QUERY) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries + * @cqp: struct for cqp hw + * @info: info for apbvt entry to add or delete + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_manage_apbvt_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_apbvt_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, info->port); + + header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) | + LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_manage_qhash_table_entry - manage quad hash entries + * @cqp: struct for cqp hw + * @info: info for quad hash to manage + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + * + * This is called before connection establishment is started. For passive connections, when + * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local + * ip address and tcp port. When SYN is received (passive connections) or + * sent (active connections), this routine is called with entry type of + * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info. + * + * When iwarp connection is done and its state moves to RTS, the quad hash entry in + * the hardware will point to iwarp's qp number and requires no calls from the driver. + */ +static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_qhash_table_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 qw1 = 0; + u64 qw2 = 0; + u64 temp; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = info->mac_addr[5] | + LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | + LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | + LS_64_1(info->mac_addr[0], 40); + + set_64bit_val(wqe, 0, temp); + + qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) | + LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT); + if (info->ipv4_valid) { + set_64bit_val(wqe, + 48, + LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3)); + } else { + set_64bit_val(wqe, + 56, + LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) | + LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1)); + + set_64bit_val(wqe, + 48, + LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) | + LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3)); + } + qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE); + if (info->vlan_valid) + qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID); + set_64bit_val(wqe, 16, qw2); + if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) { + qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT); + if (!info->ipv4_valid) { + set_64bit_val(wqe, + 40, + LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) | + LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1)); + set_64bit_val(wqe, + 32, + LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) | + LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3)); + } else { + set_64bit_val(wqe, + 32, + LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3)); + } + } + + set_64bit_val(wqe, 8, qw1); + temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) | + LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) | + LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) | + LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) | + LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) | + LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE); + + i40iw_insert_wqe_hdr(wqe, temp); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry + * @cqp: struct for cqp hw + * @info:mac addr info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_local_mac_ipaddr_entry_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp, header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + temp = info->mac_addr[5] | + LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | + LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | + LS_64_1(info->mac_addr[0], 40); + + set_64bit_val(wqe, 32, temp); + + header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) | + LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @entry_idx: index of mac entry + * @ ignore_ref_count: to force mac adde delete + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 entry_idx, + u8 ignore_ref_count, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) | + LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | + LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cqp_nop - send a nop wqe + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_ceq_init - initialize ceq + * @ceq: ceq sc structure + * @info: ceq initialization info + */ +static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq, + struct i40iw_ceq_init_info *info) +{ + u32 pble_obj_cnt; + + if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) || + (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES)) + return I40IW_ERR_INVALID_SIZE; + + if (info->ceq_id >= I40IW_MAX_CEQID) + return I40IW_ERR_INVALID_CEQ_ID; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + ceq->size = sizeof(*ceq); + ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base; + ceq->ceq_id = info->ceq_id; + ceq->dev = info->dev; + ceq->elem_cnt = info->elem_cnt; + ceq->ceq_elem_pa = info->ceqe_pa; + ceq->virtual_map = info->virtual_map; + + ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); + ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); + ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); + + ceq->tph_en = info->tph_en; + ceq->tph_val = info->tph_val; + ceq->polarity = 1; + I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); + ceq->dev->ceq[info->ceq_id] = ceq; + + return 0; +} + +/** + * i40iw_sc_ceq_create - create ceq wqe + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + cqp = ceq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, ceq->elem_cnt); + set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); + set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = ceq->ceq_id | + LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) | + LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) | + LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) | + LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete + * @ceq: ceq sc structure + */ +static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = ceq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL); +} + +/** + * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete + * @ceq: ceq sc structure + */ +static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = ceq->dev->cqp; + cqp->process_cqp_sds = i40iw_update_sds_noccq; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL); +} + +/** + * i40iw_sc_cceq_create - create cceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch) +{ + enum i40iw_status_code ret_code; + + ret_code = i40iw_sc_ceq_create(ceq, scratch, true); + if (!ret_code) + ret_code = i40iw_sc_cceq_create_done(ceq); + return ret_code; +} + +/** + * i40iw_sc_ceq_destroy - destroy ceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + cqp = ceq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, ceq->elem_cnt); + set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx); + header = ceq->ceq_id | + LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) | + LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) | + LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) | + LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_process_ceq - process ceq + * @dev: sc device struct + * @ceq: ceq sc structure + */ +static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq) +{ + u64 temp; + u64 *ceqe; + struct i40iw_sc_cq *cq = NULL; + u8 polarity; + + ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq); + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)RS_64(temp, I40IW_CEQE_VALID); + if (polarity != ceq->polarity) + return cq; + + cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1); + + I40IW_RING_MOVE_TAIL(ceq->ceq_ring); + if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0) + ceq->polarity ^= 1; + + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id); + else + i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id); + + return cq; +} + +/** + * i40iw_sc_aeq_init - initialize aeq + * @aeq: aeq structure ptr + * @info: aeq initialization info + */ +static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq, + struct i40iw_aeq_init_info *info) +{ + u32 pble_obj_cnt; + + if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) || + (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES)) + return I40IW_ERR_INVALID_SIZE; + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + aeq->size = sizeof(*aeq); + aeq->polarity = 1; + aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base; + aeq->dev = info->dev; + aeq->elem_cnt = info->elem_cnt; + + aeq->aeq_elem_pa = info->aeq_elem_pa; + I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); + info->dev->aeq = aeq; + + aeq->virtual_map = info->virtual_map; + aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); + aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); + aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); + info->dev->aeq = aeq; + return 0; +} + +/** + * i40iw_sc_aeq_create - create aeq + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = aeq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, aeq->elem_cnt); + set_64bit_val(wqe, 32, + (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); + set_64bit_val(wqe, 48, + (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); + + header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) | + LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) | + LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_aeq_destroy - destroy aeq during close + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = aeq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, aeq->elem_cnt); + set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx); + header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) | + LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) | + LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_get_next_aeqe - get next aeq entry + * @aeq: aeq structure ptr + * @info: aeqe info to be returned + */ +static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, + struct i40iw_aeqe_info *info) +{ + u64 temp, compl_ctx; + u64 *aeqe; + u16 wqe_idx; + u8 ae_src; + u8 polarity; + + aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq); + get_64bit_val(aeqe, 0, &compl_ctx); + get_64bit_val(aeqe, 8, &temp); + polarity = (u8)RS_64(temp, I40IW_AEQE_VALID); + + if (aeq->polarity != polarity) + return I40IW_ERR_QUEUE_EMPTY; + + i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16); + + ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC); + wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX); + info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID); + info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE); + info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE); + info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE); + info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA); + info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW); + switch (ae_src) { + case I40IW_AE_SOURCE_RQ: + case I40IW_AE_SOURCE_RQ_0011: + info->qp = true; + info->wqe_idx = wqe_idx; + info->compl_ctx = compl_ctx; + break; + case I40IW_AE_SOURCE_CQ: + case I40IW_AE_SOURCE_CQ_0110: + case I40IW_AE_SOURCE_CQ_1010: + case I40IW_AE_SOURCE_CQ_1110: + info->cq = true; + info->compl_ctx = LS_64_1(compl_ctx, 1); + break; + case I40IW_AE_SOURCE_SQ: + case I40IW_AE_SOURCE_SQ_0111: + info->qp = true; + info->sq = true; + info->wqe_idx = wqe_idx; + info->compl_ctx = compl_ctx; + break; + case I40IW_AE_SOURCE_IN_RR_WR: + case I40IW_AE_SOURCE_IN_RR_WR_1011: + info->qp = true; + info->compl_ctx = compl_ctx; + info->in_rdrsp_wr = true; + break; + case I40IW_AE_SOURCE_OUT_RR: + case I40IW_AE_SOURCE_OUT_RR_1111: + info->qp = true; + info->compl_ctx = compl_ctx; + info->out_rdrsp = true; + break; + default: + break; + } + I40IW_RING_MOVE_TAIL(aeq->aeq_ring); + if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0) + aeq->polarity ^= 1; + return 0; +} + +/** + * i40iw_sc_repost_aeq_entries - repost completed aeq entries + * @dev: sc device struct + * @count: allocate count + */ +static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev, + u32 count) +{ + if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT) + return I40IW_ERR_INVALID_SIZE; + + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count); + else + i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count); + + return 0; +} + +/** + * i40iw_sc_aeq_create_done - create aeq + * @aeq: aeq structure ptr + */ +static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = aeq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL); +} + +/** + * i40iw_sc_aeq_destroy_done - destroy of aeq during close + * @aeq: aeq structure ptr + */ +static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = aeq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL); +} + +/** + * i40iw_sc_ccq_init - initialize control cq + * @cq: sc's cq ctruct + * @info: info for control cq initialization + */ +static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq, + struct i40iw_ccq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE) + return I40IW_ERR_INVALID_SIZE; + + if (info->ceq_id > I40IW_MAX_CEQID) + return I40IW_ERR_INVALID_CEQ_ID; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cq->cq_pa = info->cq_pa; + cq->cq_uk.cq_base = info->cq_base; + cq->shadow_area_pa = info->shadow_area_pa; + cq->cq_uk.shadow_area = info->shadow_area; + cq->shadow_read_threshold = info->shadow_read_threshold; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + cq->cq_uk.cq_size = info->num_elem; + cq->cq_type = I40IW_CQ_TYPE_CQP; + cq->ceqe_mask = info->ceqe_mask; + I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); + + cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ + cq->ceq_id_valid = info->ceq_id_valid; + cq->tph_en = info->tph_en; + cq->tph_val = info->tph_val; + cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; + + cq->pbl_list = info->pbl_list; + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + cq->cq_uk.polarity = true; + + /* following are only for iw cqs so initialize them to zero */ + cq->cq_uk.cqe_alloc_reg = NULL; + info->dev->ccq = cq; + return 0; +} + +/** + * i40iw_sc_ccq_create_done - poll cqp for ccq create + * @ccq: ccq sc struct + */ +static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = ccq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL); +} + +/** + * i40iw_sc_ccq_create - create control cq + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @check_overflow: overlow flag for ccq + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq, + u64 scratch, + bool check_overflow, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + enum i40iw_status_code ret_code; + + cqp = ccq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(ccq, 1)); + set_64bit_val(wqe, 16, + LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa)); + set_64bit_val(wqe, 40, ccq->shadow_area_pa); + set_64bit_val(wqe, 48, + (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, + LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = ccq->cq_uk.cq_id | + LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + ret_code = i40iw_sc_ccq_create_done(ccq); + if (ret_code) + return ret_code; + } + cqp->process_cqp_sds = i40iw_cqp_sds_cmd; + + return 0; +} + +/** + * i40iw_sc_ccq_destroy - destroy ccq during close + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + enum i40iw_status_code ret_code = 0; + u32 tail, val, error; + + cqp = ccq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(ccq, 1)); + set_64bit_val(wqe, 40, ccq->shadow_area_pa); + + header = ccq->cq_uk.cq_id | + LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); + } + + return ret_code; +} + +/** + * i40iw_sc_cq_init - initialize completion q + * @cq: cq struct + * @info: cq initialization info + */ +static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq, + struct i40iw_cq_init_info *info) +{ + u32 __iomem *cqe_alloc_reg = NULL; + enum i40iw_status_code ret_code; + u32 pble_obj_cnt; + u32 arm_offset; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cq->cq_pa = info->cq_base_pa; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1; + if (i40iw_get_hw_addr(cq->dev)) + cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) + + arm_offset); + info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg; + ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info); + if (ret_code) + return ret_code; + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->ceqe_mask = info->ceqe_mask; + cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP; + + cq->shadow_area_pa = info->shadow_area_pa; + cq->shadow_read_threshold = info->shadow_read_threshold; + + cq->ceq_id_valid = info->ceq_id_valid; + cq->tph_en = info->tph_en; + cq->tph_val = info->tph_val; + + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + + return 0; +} + +/** + * i40iw_sc_cq_create - create completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @check_overflow: flag for overflow check + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq, + u64 scratch, + bool check_overflow, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + if (cq->cq_uk.cq_id > I40IW_MAX_CQID) + return I40IW_ERR_INVALID_CQ_ID; + + if (cq->ceq_id > I40IW_MAX_CEQID) + return I40IW_ERR_INVALID_CEQ_ID; + + cqp = cq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 0, cq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, + 16, + LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + + set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); + + set_64bit_val(wqe, 40, cq->shadow_area_pa); + set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = cq->cq_uk.cq_id | + LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cq_destroy - destroy completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + cqp = cq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 0, cq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, 40, cq->shadow_area_pa); + set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); + + header = cq->cq_uk.cq_id | + LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cq_modify - modify a Completion Queue + * @cq: cq struct + * @info: modification info struct + * @scratch: + * @post_sq: flag to post to sq + */ +static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq, + struct i40iw_modify_cq_info *info, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + u32 cq_size, ceq_id, first_pm_pbl_idx; + u8 pbl_chunk_size; + bool virtual_map, ceq_id_valid, check_overflow; + u32 pble_obj_cnt; + + if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID)) + return I40IW_ERR_INVALID_CEQ_ID; + + pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->cq_resize && info->virtual_map && + (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cqp = cq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + cq->pbl_list = info->pbl_list; + cq->cq_pa = info->cq_pa; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + + cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size; + if (info->ceq_change) { + ceq_id_valid = true; + ceq_id = info->ceq_id; + } else { + ceq_id_valid = cq->ceq_id_valid; + ceq_id = ceq_id_valid ? cq->ceq_id : 0; + } + virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map; + first_pm_pbl_idx = (info->cq_resize ? + (info->virtual_map ? info->first_pm_pbl_idx : 0) : + (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); + pbl_chunk_size = (info->cq_resize ? + (info->virtual_map ? info->pbl_chunk_size : 0) : + (cq->virtual_map ? cq->pbl_chunk_size : 0)); + check_overflow = info->check_overflow_change ? info->check_overflow : + cq->check_overflow; + cq->cq_uk.cq_size = cq_size; + cq->ceq_id_valid = ceq_id_valid; + cq->ceq_id = ceq_id; + cq->virtual_map = virtual_map; + cq->first_pm_pbl_idx = first_pm_pbl_idx; + cq->pbl_chunk_size = pbl_chunk_size; + cq->check_overflow = check_overflow; + + set_64bit_val(wqe, 0, cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, 16, + LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); + set_64bit_val(wqe, 40, cq->shadow_area_pa); + set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = cq->cq_uk.cq_id | + LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) | + LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_init - initialize qp + * @qp: sc qp + * @info: initialization qp info + */ +static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp, + struct i40iw_qp_init_info *info) +{ + u32 __iomem *wqe_alloc_reg = NULL; + enum i40iw_status_code ret_code; + u32 pble_obj_cnt; + u8 wqe_size; + u32 offset; + + qp->dev = info->pd->dev; + qp->sq_pa = info->sq_pa; + qp->rq_pa = info->rq_pa; + qp->hw_host_ctx_pa = info->host_ctx_pa; + qp->q2_pa = info->q2_pa; + qp->shadow_area_pa = info->shadow_area_pa; + + qp->q2_buf = info->q2; + qp->pd = info->pd; + qp->hw_host_ctx = info->host_ctx; + offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1; + if (i40iw_get_hw_addr(qp->pd->dev)) + wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + + offset); + + info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg; + ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info); + if (ret_code) + return ret_code; + qp->virtual_map = info->virtual_map; + + pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) || + (info->virtual_map && (info->rq_pa >= pble_obj_cnt))) + return I40IW_ERR_INVALID_PBLE_INDEX; + + qp->llp_stream_handle = (void *)(-1); + qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP; + + qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, + false); + i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n", + __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size); + ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, + &wqe_size); + if (ret_code) + return ret_code; + qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size * + (wqe_size / I40IW_QP_WQE_MIN_SIZE), false); + i40iw_debug(qp->dev, I40IW_DEBUG_WQE, + "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", + __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); + qp->sq_tph_val = info->sq_tph_val; + qp->rq_tph_val = info->rq_tph_val; + qp->sq_tph_en = info->sq_tph_en; + qp->rq_tph_en = info->rq_tph_en; + qp->rcv_tph_en = info->rcv_tph_en; + qp->xmit_tph_en = info->xmit_tph_en; + qp->qs_handle = qp->pd->dev->qs_handle; + qp->exception_lan_queue = qp->pd->dev->exception_lan_queue; + + return 0; +} + +/** + * i40iw_sc_qp_create - create qp + * @qp: sc qp + * @info: qp create info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_create( + struct i40iw_sc_qp *qp, + struct i40iw_create_qp_info *info, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) || + (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID)) + return I40IW_ERR_INVALID_QP_ID; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + + set_64bit_val(wqe, 40, qp->shadow_area_pa); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | + LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) | + LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) | + LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | + LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | + LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | + LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | + LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_modify - modify qp cqp wqe + * @qp: sc qp + * @info: modify qp info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_modify( + struct i40iw_sc_qp *qp, + struct i40iw_modify_qp_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + u8 term_actions = 0; + u8 term_len = 0; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) { + if (info->dont_send_fin) + term_actions += I40IWQP_TERM_SEND_TERM_ONLY; + if (info->dont_send_term) + term_actions += I40IWQP_TERM_SEND_FIN_ONLY; + if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) || + (term_actions == I40IWQP_TERM_SEND_TERM_ONLY)) + term_len = info->termlen; + } + + set_64bit_val(wqe, + 8, + LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | + LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); + + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 40, qp->shadow_area_pa); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) | + LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) | + LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) | + LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) | + LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | + LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | + LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | + LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | + LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | + LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | + LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | + LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) | + LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | + LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_destroy - cqp destroy qp + * @qp: sc qp + * @scratch: u64 saved to be used during cqp completion + * @remove_hash_idx: flag if to remove hash idx + * @ignore_mw_bnd: memory window bind flag + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_destroy( + struct i40iw_sc_qp *qp, + u64 scratch, + bool remove_hash_idx, + bool ignore_mw_bnd, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 40, qp->shadow_area_pa); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) | + LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) | + LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_flush_wqes - flush qp's wqe + * @qp: sc qp + * @info: dlush information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_flush_wqes( + struct i40iw_sc_qp *qp, + struct i40iw_qp_flush_info *info, + u64 scratch, + bool post_sq) +{ + u64 temp = 0; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + bool flush_sq = false, flush_rq = false; + + if (info->rq && !qp->flush_rq) + flush_rq = true; + + if (info->sq && !qp->flush_sq) + flush_sq = true; + + qp->flush_sq |= flush_sq; + qp->flush_rq |= flush_rq; + if (!flush_sq && !flush_rq) { + if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR) + return 0; + } + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + if (info->userflushcode) { + if (flush_rq) { + temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) | + LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR); + } + if (flush_sq) { + temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) | + LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR); + } + } + set_64bit_val(wqe, 16, temp); + + temp = (info->generate_ae) ? + info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0; + + set_64bit_val(wqe, 8, temp); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) | + LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) | + LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) | + LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) | + LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_upload_context - upload qp's context + * @dev: sc device struct + * @info: upload context info ptr for return + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_upload_context( + struct i40iw_sc_dev *dev, + struct i40iw_upload_context_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, info->buf_pa); + + header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) | + LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) | + LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) | + LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) | + LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_setctx - set qp's context + * @qp: sc qp + * @qp_ctx: context ptr + * @info: ctx info + */ +static enum i40iw_status_code i40iw_sc_qp_setctx( + struct i40iw_sc_qp *qp, + u64 *qp_ctx, + struct i40iw_qp_host_ctx_info *info) +{ + struct i40iwarp_offload_info *iw; + struct i40iw_tcp_offload_info *tcp; + u64 qw0, qw3, qw7 = 0; + + iw = info->iwarp_info; + tcp = info->tcp_info; + qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) | + LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) | + LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) | + LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) | + LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) | + LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) | + LS_64(info->push_idx, I40IWQPC_PPIDX) | + LS_64(info->push_mode_en, I40IWQPC_PMENA); + + set_64bit_val(qp_ctx, 8, qp->sq_pa); + set_64bit_val(qp_ctx, 16, qp->rq_pa); + + qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) | + LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | + LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE); + + set_64bit_val(qp_ctx, + 128, + LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX)); + + set_64bit_val(qp_ctx, + 136, + LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) | + LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM)); + + set_64bit_val(qp_ctx, + 168, + LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX)); + set_64bit_val(qp_ctx, + 176, + LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | + LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | + LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) | + LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE)); + + if (info->iwarp_info_valid) { + qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) | + LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER); + + qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX); + set_64bit_val(qp_ctx, 144, qp->q2_pa); + set_64bit_val(qp_ctx, + 152, + LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT)); + + /* + * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an + *advertisable IRD of 64 + */ + iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD; + set_64bit_val(qp_ctx, + 160, + LS_64(iw->ord_size, I40IWQPC_ORDSIZE) | + LS_64(iw->ird_size, I40IWQPC_IRDSIZE) | + LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) | + LS_64(iw->rd_enable, I40IWQPC_RDOK) | + LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) | + LS_64(iw->bind_en, I40IWQPC_BINDEN) | + LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) | + LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) | + LS_64(1, I40IWQPC_IWARPMODE) | + LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) | + LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) | + LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) | + LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) | + LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET)); + } + if (info->tcp_info_valid) { + qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) | + LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) | + LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) | + LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) | + LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) | + LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) | + LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH); + + qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) | + LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) | + LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) | + LS_64(tcp->tos, I40IWQPC_TOS) | + LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) | + LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM); + + qp->src_mac_addr_idx = tcp->src_mac_addr_idx; + set_64bit_val(qp_ctx, + 32, + LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) | + LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3)); + + set_64bit_val(qp_ctx, + 40, + LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) | + LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1)); + + set_64bit_val(qp_ctx, + 48, + LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) | + LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) | + LS_64(tcp->arp_idx, I40IWQPC_ARPIDX)); + + qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) | + LS_64(tcp->wscale, I40IWQPC_WSCALE) | + LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) | + LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) | + LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) | + LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) | + LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE); + + set_64bit_val(qp_ctx, + 72, + LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) | + LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE)); + set_64bit_val(qp_ctx, + 80, + LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) | + LS_64(tcp->snd_wnd, I40IWQPC_SNDWND)); + + set_64bit_val(qp_ctx, + 88, + LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) | + LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND)); + set_64bit_val(qp_ctx, + 96, + LS_64(tcp->snd_max, I40IWQPC_SNDMAX) | + LS_64(tcp->snd_una, I40IWQPC_SNDUNA)); + set_64bit_val(qp_ctx, + 104, + LS_64(tcp->srtt, I40IWQPC_SRTT) | + LS_64(tcp->rtt_var, I40IWQPC_RTTVAR)); + set_64bit_val(qp_ctx, + 112, + LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) | + LS_64(tcp->cwnd, I40IWQPC_CWND)); + set_64bit_val(qp_ctx, + 120, + LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) | + LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2)); + set_64bit_val(qp_ctx, + 128, + LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) | + LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH)); + set_64bit_val(qp_ctx, + 184, + LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) | + LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2)); + set_64bit_val(qp_ctx, + 192, + LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) | + LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0)); + } + + set_64bit_val(qp_ctx, 0, qw0); + set_64bit_val(qp_ctx, 24, qw3); + set_64bit_val(qp_ctx, 56, qw7); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE", + qp_ctx, I40IW_QP_CTX_SIZE); + return 0; +} + +/** + * i40iw_sc_alloc_stag - mr stag alloc + * @dev: sc device struct + * @info: stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_alloc_stag( + struct i40iw_sc_dev *dev, + struct i40iw_allocate_stag_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 8, + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) | + LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN)); + set_64bit_val(wqe, + 16, + LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); + set_64bit_val(wqe, + 40, + LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX)); + + header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_STAG_MR) | + LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | + LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) | + LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) | + LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | + LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) | + LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mr_reg_non_shared - non-shared mr registration + * @dev: sc device struct + * @info: mr info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_mr_reg_non_shared( + struct i40iw_sc_dev *dev, + struct i40iw_reg_ns_stag_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp; + struct i40iw_sc_cqp *cqp; + u64 header; + u32 pble_obj_cnt; + bool remote_access; + u8 addr_type; + + if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | + I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) + remote_access = true; + else + remote_access = false; + + pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo; + set_64bit_val(wqe, 0, temp); + + set_64bit_val(wqe, + 8, + LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) | + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); + + set_64bit_val(wqe, + 16, + LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) | + LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); + if (!info->chunk_size) { + set_64bit_val(wqe, 32, info->reg_addr_pa); + set_64bit_val(wqe, 48, 0); + } else { + set_64bit_val(wqe, 32, 0); + set_64bit_val(wqe, 48, info->first_pm_pbl_index); + } + set_64bit_val(wqe, 40, info->hmc_fcn_index); + set_64bit_val(wqe, 56, 0); + + addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0; + header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_STAG_MR) | + LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) | + LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) | + LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | + LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | + LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) | + LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) | + LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mr_reg_shared - registered shared memory region + * @dev: sc device struct + * @info: info for shared memory registeration + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_mr_reg_shared( + struct i40iw_sc_dev *dev, + struct i40iw_register_shared_stag *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 temp, va64, fbo, header; + u32 va32; + bool remote_access; + u8 addr_type; + + if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | + I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) + remote_access = true; + else + remote_access = false; + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + va64 = (uintptr_t)(info->va); + va32 = (u32)(va64 & 0x00000000FFFFFFFF); + fbo = (u64)(va32 & (4096 - 1)); + + set_64bit_val(wqe, + 0, + (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo)); + + set_64bit_val(wqe, + 8, + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); + temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) | + LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) | + LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX); + set_64bit_val(wqe, 16, temp); + + addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0; + header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_STAG_MR) | + LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | + LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | + LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_dealloc_stag - deallocate stag + * @dev: sc device struct + * @info: dealloc stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_dealloc_stag( + struct i40iw_sc_dev *dev, + struct i40iw_dealloc_stag_info *info, + u64 scratch, + bool post_sq) +{ + u64 header; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 8, + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); + set_64bit_val(wqe, + 16, + LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); + + header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(info->mr, I40IW_CQPSQ_STAG_MR) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_query_stag - query hardware for stag + * @dev: sc device struct + * @scratch: u64 saved to be used during cqp completion + * @stag_index: stag index for query + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev, + u64 scratch, + u32 stag_index, + bool post_sq) +{ + u64 header; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 16, + LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX)); + + header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mw_alloc - mw allocate + * @dev: sc device struct + * @scratch: u64 saved to be used during cqp completion + * @mw_stag_index:stag index + * @pd_id: pd is for this mw + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_mw_alloc( + struct i40iw_sc_dev *dev, + u64 scratch, + u32 mw_stag_index, + u16 pd_id, + bool post_sq) +{ + u64 header; + struct i40iw_sc_cqp *cqp; + u64 *wqe; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID)); + set_64bit_val(wqe, + 16, + LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX)); + + header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_send_lsmm - send last streaming mode message + * @qp: sc qp struct + * @lsmm_buf: buffer with lsmm message + * @size: size of lsmm buffer + * @stag: stag of lsmm buffer + */ +static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp, + void *lsmm_buf, + u32 size, + i40iw_stag stag) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); + + set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG))); + + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | + LS_64(1, I40IWQPSQ_STREAMMODE) | + LS_64(1, I40IWQPSQ_WAITFORRCVPDU) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); +} + +/** + * i40iw_sc_send_lsmm_nostag - for privilege qp + * @qp: sc qp struct + * @lsmm_buf: buffer with lsmm message + * @size: size of lsmm buffer + */ +static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp, + void *lsmm_buf, + u32 size) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); + + set_64bit_val(wqe, 8, size); + + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | + LS_64(1, I40IWQPSQ_STREAMMODE) | + LS_64(1, I40IWQPSQ_WAITFORRCVPDU) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); +} + +/** + * i40iw_sc_send_rtt - send last read0 or write0 + * @qp: sc qp struct + * @read: Do read0 or write0 + */ +static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + if (read) { + header = LS_64(0x1234, I40IWQPSQ_REMSTAG) | + LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + set_64bit_val(wqe, 8, ((u64)0xabcd << 32)); + } else { + header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + } + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); +} + +/** + * i40iw_sc_post_wqe0 - send wqe with opcode + * @qp: sc qp struct + * @opcode: opcode to use for wqe0 + */ +static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + switch (opcode) { + case I40IWQP_OP_NOP: + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + break; + case I40IWQP_OP_RDMA_SEND: + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) | + LS_64(1, I40IWQPSQ_STREAMMODE) | + LS_64(1, I40IWQPSQ_WAITFORRCVPDU); + + i40iw_insert_wqe_hdr(wqe, header); + break; + default: + i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n", + __func__); + break; + } + return 0; +} + +/** + * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info + * @dev : ptr to i40iw_dev struct + * @hmc_fn_id: hmc function id + */ +enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id) +{ + struct i40iw_hmc_info *hmc_info; + struct i40iw_dma_mem query_fpm_mem; + struct i40iw_virt_mem virt_mem; + struct i40iw_vfdev *vf_dev = NULL; + u32 mem_size; + enum i40iw_status_code ret_code = 0; + bool poll_registers = true; + u16 iw_vf_idx; + u8 wait_type; + + if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID || + (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID)) + return I40IW_ERR_INVALID_HMCFN_ID; + + i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id, + dev->hmc_fn_id); + if (hmc_fn_id == dev->hmc_fn_id) { + hmc_info = dev->hmc_info; + query_fpm_mem.pa = dev->fpm_query_buf_pa; + query_fpm_mem.va = dev->fpm_query_buf; + } else { + vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id); + if (!vf_dev) + return I40IW_ERR_INVALID_VF_ID; + + hmc_info = &vf_dev->hmc_info; + iw_vf_idx = vf_dev->iw_vf_idx; + i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev, + hmc_info, hmc_info->hmc_obj); + if (!vf_dev->fpm_query_buf) { + if (!dev->vf_fpm_query_buf[iw_vf_idx].va) { + ret_code = i40iw_alloc_query_fpm_buf(dev, + &dev->vf_fpm_query_buf[iw_vf_idx]); + if (ret_code) + return ret_code; + } + vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va; + vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa; + } + query_fpm_mem.pa = vf_dev->fpm_query_buf_pa; + query_fpm_mem.va = vf_dev->fpm_query_buf; + /** + * It is HARDWARE specific: + * this call is done by PF for VF and + * i40iw_sc_query_fpm_values needs ccq poll + * because PF ccq is already created. + */ + poll_registers = false; + } + + hmc_info->hmc_fn_id = hmc_fn_id; + + if (hmc_fn_id != dev->hmc_fn_id) { + ret_code = + i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id); + } else { + wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS : + (u8)I40IW_CQP_WAIT_POLL_CQ; + + ret_code = i40iw_sc_query_fpm_values( + dev->cqp, + 0, + hmc_info->hmc_fn_id, + &query_fpm_mem, + true, + wait_type); + } + if (ret_code) + return ret_code; + + /* parse the fpm_query_buf and fill hmc obj info */ + ret_code = + i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va, + hmc_info, + &dev->hmc_fpm_misc); + if (ret_code) + return ret_code; + i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER", + query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE); + + if (hmc_fn_id != dev->hmc_fn_id) { + i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id); + + /* parse the fpm_commit_buf and fill hmc obj info */ + i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj); + mem_size = sizeof(struct i40iw_hmc_sd_entry) * + (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index); + ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size); + if (ret_code) + return ret_code; + hmc_info->sd_table.sd_entry = virt_mem.va; + } + + /* fill size of objects which are fixed */ + hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4; + hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4; + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8; + hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; + hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; + + return ret_code; +} + +/** + * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and + * populates fpm base address in hmc_info + * @dev : ptr to i40iw_dev struct + * @hmc_fn_id: hmc function id + */ +static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev, + u8 hmc_fn_id) +{ + struct i40iw_hmc_info *hmc_info; + struct i40iw_hmc_obj_info *obj_info; + u64 *buf; + struct i40iw_dma_mem commit_fpm_mem; + u32 i, j; + enum i40iw_status_code ret_code = 0; + bool poll_registers = true; + u8 wait_type; + + if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID || + (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID)) + return I40IW_ERR_INVALID_HMCFN_ID; + + if (hmc_fn_id == dev->hmc_fn_id) { + hmc_info = dev->hmc_info; + } else { + hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id); + poll_registers = false; + } + if (!hmc_info) + return I40IW_ERR_BAD_PTR; + + obj_info = hmc_info->hmc_obj; + buf = dev->fpm_commit_buf; + + /* copy cnt values in commit buf */ + for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; + i++, j += 8) + set_64bit_val(buf, j, (u64)obj_info[i].cnt); + + set_64bit_val(buf, 40, 0); /* APBVT rsvd */ + + commit_fpm_mem.pa = dev->fpm_commit_buf_pa; + commit_fpm_mem.va = dev->fpm_commit_buf; + wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS : + (u8)I40IW_CQP_WAIT_POLL_CQ; + ret_code = i40iw_sc_commit_fpm_values( + dev->cqp, + 0, + hmc_info->hmc_fn_id, + &commit_fpm_mem, + true, + wait_type); + + /* parse the fpm_commit_buf and fill hmc obj info */ + if (!ret_code) + ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, hmc_info->hmc_obj); + + i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER", + commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE); + + return ret_code; +} + +/** + * cqp_sds_wqe_fill - fill cqp wqe doe sd + * @cqp: struct for cqp hw + * @info; sd info for wqe + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, + struct i40iw_update_sds_info *info, + u64 scratch) +{ + u64 data; + u64 header; + u64 *wqe; + int mem_entries, wqe_entries; + struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + I40IW_CQP_INIT_WQE(wqe); + wqe_entries = (info->cnt > 3) ? 3 : info->cnt; + mem_entries = info->cnt - wqe_entries; + + header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | + LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); + + if (mem_entries) { + memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4)); + data = sdbuf->pa; + } else { + data = 0; + } + data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID); + + set_64bit_val(wqe, 16, data); + + switch (wqe_entries) { + case 3: + set_64bit_val(wqe, 48, + (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) | + LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); + + set_64bit_val(wqe, 56, info->entry[2].data); + /* fallthrough */ + case 2: + set_64bit_val(wqe, 32, + (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | + LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); + + set_64bit_val(wqe, 40, info->entry[1].data); + /* fallthrough */ + case 1: + set_64bit_val(wqe, 0, + LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); + + set_64bit_val(wqe, 8, info->entry[0].data); + break; + default: + break; + } + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + return 0; +} + +/** + * i40iw_update_pe_sds - cqp wqe for sd + * @dev: ptr to i40iw_dev struct + * @info: sd info for sd's + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info, + u64 scratch) +{ + struct i40iw_sc_cqp *cqp = dev->cqp; + enum i40iw_status_code ret_code; + + ret_code = cqp_sds_wqe_fill(cqp, info, scratch); + if (!ret_code) + i40iw_sc_cqp_post_sq(cqp); + + return ret_code; +} + +/** + * i40iw_update_sds_noccq - update sd before ccq created + * @dev: sc device struct + * @info: sd info for sd's + */ +enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info) +{ + u32 error, val, tail; + struct i40iw_sc_cqp *cqp = dev->cqp; + enum i40iw_status_code ret_code; + + ret_code = cqp_sds_wqe_fill(cqp, info, 0); + if (ret_code) + return ret_code; + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + i40iw_sc_cqp_post_sq(cqp); + ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); + + return ret_code; +} + +/** + * i40iw_sc_suspend_qp - suspend qp for param change + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, + u64 scratch) +{ + u64 header; + u64 *wqe; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) | + LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_resume_qp - resume qp after suspend + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, + u64 scratch) +{ + u64 header; + u64 *wqe; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 16, + LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE)); + + header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) | + LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_fn_id: hmc function id + * @post_sq: flag for cqp db to ring + * @poll_registers: flag to poll register for cqp completion + */ +enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_fn_id, + bool post_sq, + bool poll_registers) +{ + u64 header; + u64 *wqe; + u32 tail, val, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 16, + LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID)); + + header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) { + ret_code = I40IW_ERR_CQP_COMPL_ERROR; + return ret_code; + } + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + if (poll_registers) + /* check for cqp sq tail update */ + ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); + else + ret_code = i40iw_sc_poll_for_cqp_op_done(cqp, + I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, + NULL); + } + + return ret_code; +} + +/** + * i40iw_ring_full - check if cqp ring is full + * @cqp: struct for cqp hw + */ +static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp) +{ + return I40IW_RING_FULL_ERR(cqp->sq_ring); +} + +/** + * i40iw_config_fpm_values - configure HMC objects + * @dev: sc device struct + * @qp_count: desired qp count + */ +enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count) +{ + struct i40iw_virt_mem virt_mem; + u32 i, mem_size; + u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted; + u32 powerof2; + u64 sd_needed, bytes_needed; + u32 loop_count = 0; + + struct i40iw_hmc_info *hmc_info; + struct i40iw_hmc_fpm_misc *hmc_fpm_misc; + enum i40iw_status_code ret_code = 0; + + hmc_info = dev->hmc_info; + hmc_fpm_misc = &dev->hmc_fpm_misc; + + ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "i40iw_sc_init_iw_hmc returned error_code = %d\n", + ret_code); + return ret_code; + } + + bytes_needed = 0; + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) { + hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; + bytes_needed += + (hmc_info->hmc_obj[i].max_cnt) * (hmc_info->hmc_obj[i].size); + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n", + __func__, i, hmc_info->hmc_obj[i].max_cnt, + hmc_info->hmc_obj[i].size); + } + sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up */ + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n", + __func__, sd_needed, hmc_info->first_sd_index); + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n", + __func__, bytes_needed, hmc_info->sd_table.sd_cnt, + hmc_fpm_misc->max_sds); + + qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt); + qpwantedoriginal = qpwanted; + mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt; + pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt; + + i40iw_debug(dev, I40IW_DEBUG_HMC, + "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n", + qp_count, hmc_fpm_misc->max_sds, + hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt); + + do { + ++loop_count; + hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted; + hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt = + min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt); + hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */ + hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt = + qpwanted * hmc_fpm_misc->ht_multiplier; + hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt = + hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt; + hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1; + hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted; + + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted; + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted; + hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt = + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; + hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt = + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; + hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt = + ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket; + hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00; + hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00; + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted; + + /* How much memory is needed for all the objects. */ + bytes_needed = 0; + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) + bytes_needed += + (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size); + sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; + if ((loop_count > 1000) || + ((!(loop_count % 10)) && + (qpwanted > qpwantedoriginal * 2 / 3))) { + if (qpwanted > FPM_MULTIPLIER) { + qpwanted -= FPM_MULTIPLIER; + powerof2 = 1; + while (powerof2 < qpwanted) + powerof2 *= 2; + powerof2 /= 2; + qpwanted = powerof2; + } else { + qpwanted /= 2; + } + } + if (mrwanted > FPM_MULTIPLIER * 10) + mrwanted -= FPM_MULTIPLIER * 10; + if (pblewanted > FPM_MULTIPLIER * 1000) + pblewanted -= FPM_MULTIPLIER * 1000; + } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000); + + bytes_needed = 0; + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) { + bytes_needed += (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size); + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n", + __func__, i, hmc_info->hmc_obj[i].cnt, + hmc_info->hmc_obj[i].size); + } + sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up not truncate. */ + + i40iw_debug(dev, I40IW_DEBUG_HMC, + "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n", + loop_count, sd_needed, + hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt); + + ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "configure_iw_fpm returned error_code[x%08X]\n", + i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1)); + return ret_code; + } + + hmc_info->sd_table.sd_cnt = (u32)sd_needed; + + mem_size = sizeof(struct i40iw_hmc_sd_entry) * + (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1); + ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: failed to allocate memory for sd_entry buffer\n", + __func__); + return ret_code; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + + return ret_code; +} + +/** + * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available + * @dev: rdma device + * @pcmdinfo: cqp command info + */ +static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev, + struct cqp_commands_info *pcmdinfo) +{ + enum i40iw_status_code status; + struct i40iw_dma_mem values_mem; + + dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; + switch (pcmdinfo->cqp_cmd) { + case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY: + status = i40iw_sc_del_local_mac_ipaddr_entry( + pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp, + pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch, + pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx, + pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count, + pcmdinfo->post_sq); + break; + case OP_CEQ_DESTROY: + status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, + pcmdinfo->in.u.ceq_destroy.scratch, + pcmdinfo->post_sq); + break; + case OP_AEQ_DESTROY: + status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, + pcmdinfo->in.u.aeq_destroy.scratch, + pcmdinfo->post_sq); + + break; + case OP_DELETE_ARP_CACHE_ENTRY: + status = i40iw_sc_del_arp_cache_entry( + pcmdinfo->in.u.del_arp_cache_entry.cqp, + pcmdinfo->in.u.del_arp_cache_entry.scratch, + pcmdinfo->in.u.del_arp_cache_entry.arp_index, + pcmdinfo->post_sq); + break; + case OP_MANAGE_APBVT_ENTRY: + status = i40iw_sc_manage_apbvt_entry( + pcmdinfo->in.u.manage_apbvt_entry.cqp, + &pcmdinfo->in.u.manage_apbvt_entry.info, + pcmdinfo->in.u.manage_apbvt_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_CEQ_CREATE: + status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, + pcmdinfo->in.u.ceq_create.scratch, + pcmdinfo->post_sq); + break; + case OP_AEQ_CREATE: + status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, + pcmdinfo->in.u.aeq_create.scratch, + pcmdinfo->post_sq); + break; + case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY: + status = i40iw_sc_alloc_local_mac_ipaddr_entry( + pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp, + pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_ADD_LOCAL_MAC_IPADDR_ENTRY: + status = i40iw_sc_add_local_mac_ipaddr_entry( + pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp, + &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info, + pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_MANAGE_QHASH_TABLE_ENTRY: + status = i40iw_sc_manage_qhash_table_entry( + pcmdinfo->in.u.manage_qhash_table_entry.cqp, + &pcmdinfo->in.u.manage_qhash_table_entry.info, + pcmdinfo->in.u.manage_qhash_table_entry.scratch, + pcmdinfo->post_sq); + + break; + case OP_QP_MODIFY: + status = i40iw_sc_qp_modify( + pcmdinfo->in.u.qp_modify.qp, + &pcmdinfo->in.u.qp_modify.info, + pcmdinfo->in.u.qp_modify.scratch, + pcmdinfo->post_sq); + + break; + case OP_QP_UPLOAD_CONTEXT: + status = i40iw_sc_qp_upload_context( + pcmdinfo->in.u.qp_upload_context.dev, + &pcmdinfo->in.u.qp_upload_context.info, + pcmdinfo->in.u.qp_upload_context.scratch, + pcmdinfo->post_sq); + + break; + case OP_CQ_CREATE: + status = i40iw_sc_cq_create( + pcmdinfo->in.u.cq_create.cq, + pcmdinfo->in.u.cq_create.scratch, + pcmdinfo->in.u.cq_create.check_overflow, + pcmdinfo->post_sq); + break; + case OP_CQ_DESTROY: + status = i40iw_sc_cq_destroy( + pcmdinfo->in.u.cq_destroy.cq, + pcmdinfo->in.u.cq_destroy.scratch, + pcmdinfo->post_sq); + + break; + case OP_QP_CREATE: + status = i40iw_sc_qp_create( + pcmdinfo->in.u.qp_create.qp, + &pcmdinfo->in.u.qp_create.info, + pcmdinfo->in.u.qp_create.scratch, + pcmdinfo->post_sq); + break; + case OP_QP_DESTROY: + status = i40iw_sc_qp_destroy( + pcmdinfo->in.u.qp_destroy.qp, + pcmdinfo->in.u.qp_destroy.scratch, + pcmdinfo->in.u.qp_destroy.remove_hash_idx, + pcmdinfo->in.u.qp_destroy. + ignore_mw_bnd, + pcmdinfo->post_sq); + + break; + case OP_ALLOC_STAG: + status = i40iw_sc_alloc_stag( + pcmdinfo->in.u.alloc_stag.dev, + &pcmdinfo->in.u.alloc_stag.info, + pcmdinfo->in.u.alloc_stag.scratch, + pcmdinfo->post_sq); + break; + case OP_MR_REG_NON_SHARED: + status = i40iw_sc_mr_reg_non_shared( + pcmdinfo->in.u.mr_reg_non_shared.dev, + &pcmdinfo->in.u.mr_reg_non_shared.info, + pcmdinfo->in.u.mr_reg_non_shared.scratch, + pcmdinfo->post_sq); + + break; + case OP_DEALLOC_STAG: + status = i40iw_sc_dealloc_stag( + pcmdinfo->in.u.dealloc_stag.dev, + &pcmdinfo->in.u.dealloc_stag.info, + pcmdinfo->in.u.dealloc_stag.scratch, + pcmdinfo->post_sq); + + break; + case OP_MW_ALLOC: + status = i40iw_sc_mw_alloc( + pcmdinfo->in.u.mw_alloc.dev, + pcmdinfo->in.u.mw_alloc.scratch, + pcmdinfo->in.u.mw_alloc.mw_stag_index, + pcmdinfo->in.u.mw_alloc.pd_id, + pcmdinfo->post_sq); + + break; + case OP_QP_FLUSH_WQES: + status = i40iw_sc_qp_flush_wqes( + pcmdinfo->in.u.qp_flush_wqes.qp, + &pcmdinfo->in.u.qp_flush_wqes.info, + pcmdinfo->in.u.qp_flush_wqes. + scratch, pcmdinfo->post_sq); + break; + case OP_ADD_ARP_CACHE_ENTRY: + status = i40iw_sc_add_arp_cache_entry( + pcmdinfo->in.u.add_arp_cache_entry.cqp, + &pcmdinfo->in.u.add_arp_cache_entry.info, + pcmdinfo->in.u.add_arp_cache_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_MANAGE_PUSH_PAGE: + status = i40iw_sc_manage_push_page( + pcmdinfo->in.u.manage_push_page.cqp, + &pcmdinfo->in.u.manage_push_page.info, + pcmdinfo->in.u.manage_push_page.scratch, + pcmdinfo->post_sq); + break; + case OP_UPDATE_PE_SDS: + /* case I40IW_CQP_OP_UPDATE_PE_SDS */ + status = i40iw_update_pe_sds( + pcmdinfo->in.u.update_pe_sds.dev, + &pcmdinfo->in.u.update_pe_sds.info, + pcmdinfo->in.u.update_pe_sds. + scratch); + + break; + case OP_MANAGE_HMC_PM_FUNC_TABLE: + status = i40iw_sc_manage_hmc_pm_func_table( + pcmdinfo->in.u.manage_hmc_pm.dev->cqp, + pcmdinfo->in.u.manage_hmc_pm.scratch, + (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id, + pcmdinfo->in.u.manage_hmc_pm.info.free_fcn, + true); + break; + case OP_SUSPEND: + status = i40iw_sc_suspend_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case OP_RESUME: + status = i40iw_sc_resume_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case OP_MANAGE_VF_PBLE_BP: + status = i40iw_manage_vf_pble_bp( + pcmdinfo->in.u.manage_vf_pble_bp.cqp, + &pcmdinfo->in.u.manage_vf_pble_bp.info, + pcmdinfo->in.u.manage_vf_pble_bp.scratch, true); + break; + case OP_QUERY_FPM_VALUES: + values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa; + values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va; + status = i40iw_sc_query_fpm_values( + pcmdinfo->in.u.query_fpm_values.cqp, + pcmdinfo->in.u.query_fpm_values.scratch, + pcmdinfo->in.u.query_fpm_values.hmc_fn_id, + &values_mem, true, I40IW_CQP_WAIT_EVENT); + break; + case OP_COMMIT_FPM_VALUES: + values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa; + values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va; + status = i40iw_sc_commit_fpm_values( + pcmdinfo->in.u.commit_fpm_values.cqp, + pcmdinfo->in.u.commit_fpm_values.scratch, + pcmdinfo->in.u.commit_fpm_values.hmc_fn_id, + &values_mem, + true, + I40IW_CQP_WAIT_EVENT); + break; + default: + status = I40IW_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * i40iw_process_cqp_cmd - process all cqp commands + * @dev: sc device struct + * @pcmdinfo: cqp command info + */ +enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev, + struct cqp_commands_info *pcmdinfo) +{ + enum i40iw_status_code status = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) + status = i40iw_exec_cqp_cmd(dev, pcmdinfo); + else + list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +/** + * i40iw_process_bh - called from tasklet for cqp list + * @dev: sc device struct + */ +enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev) +{ + enum i40iw_status_code status = 0; + struct cqp_commands_info *pcmdinfo; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) { + pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); + + status = i40iw_exec_cqp_cmd(dev, pcmdinfo); + if (status) + break; + } + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +/** + * i40iw_iwarp_opcode - determine if incoming is rdma layer + * @info: aeq info for the packet + * @pkt: packet for error + */ +static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt) +{ + u16 *mpa; + u32 opcode = 0xffffffff; + + if (info->q2_data_written) { + mpa = (u16 *)pkt; + opcode = ntohs(mpa[1]) & 0xf; + } + return opcode; +} + +/** + * i40iw_locate_mpa - return pointer to mpa in the pkt + * @pkt: packet with data + */ +static u8 *i40iw_locate_mpa(u8 *pkt) +{ + /* skip over ethernet header */ + pkt += I40IW_MAC_HLEN; + + /* Skip over IP and TCP headers */ + pkt += 4 * (pkt[0] & 0x0f); + pkt += 4 * ((pkt[12] >> 4) & 0x0f); + return pkt; +} + +/** + * i40iw_setup_termhdr - termhdr for terminate pkt + * @qp: sc qp ptr for pkt + * @hdr: term hdr + * @opcode: flush opcode for termhdr + * @layer_etype: error layer + error type + * @err: error cod ein the header + */ +static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp, + struct i40iw_terminate_hdr *hdr, + enum i40iw_flush_opcode opcode, + u8 layer_etype, + u8 err) +{ + qp->flush_code = opcode; + hdr->layer_etype = layer_etype; + hdr->error_code = err; +} + +/** + * i40iw_bld_terminate_hdr - build terminate message header + * @qp: qp associated with received terminate AE + * @info: the struct contiaing AE information + */ +static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp, + struct i40iw_aeqe_info *info) +{ + u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; + u16 ddp_seg_len; + int copy_len = 0; + u8 is_tagged = 0; + enum i40iw_flush_opcode flush_code = FLUSH_INVALID; + u32 opcode; + struct i40iw_terminate_hdr *termhdr; + + termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf; + memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); + + if (info->q2_data_written) { + /* Use data from offending packet to fill in ddp & rdma hdrs */ + pkt = i40iw_locate_mpa(pkt); + ddp_seg_len = ntohs(*(u16 *)pkt); + if (ddp_seg_len) { + copy_len = 2; + termhdr->hdrct = DDP_LEN_FLAG; + if (pkt[2] & 0x80) { + is_tagged = 1; + if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { + copy_len += TERM_DDP_LEN_TAGGED; + termhdr->hdrct |= DDP_HDR_FLAG; + } + } else { + if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { + copy_len += TERM_DDP_LEN_UNTAGGED; + termhdr->hdrct |= DDP_HDR_FLAG; + } + + if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) { + if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) { + copy_len += TERM_RDMA_LEN; + termhdr->hdrct |= RDMA_HDR_FLAG; + } + } + } + } + } + + opcode = i40iw_iwarp_opcode(info, pkt); + + switch (info->ae_id) { + case I40IW_AE_AMP_UNALLOCATED_STAG: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + if (opcode == I40IW_OP_TYPE_RDMA_WRITE) + i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG); + else + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); + break; + case I40IW_AE_AMP_BOUNDS_VIOLATION: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + if (info->q2_data_written) + i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS); + else + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS); + break; + case I40IW_AE_AMP_BAD_PD: + switch (opcode) { + case I40IW_OP_TYPE_RDMA_WRITE: + i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG); + break; + case I40IW_OP_TYPE_SEND_INV: + case I40IW_OP_TYPE_SEND_SOL_INV: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG); + break; + default: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG); + } + break; + case I40IW_AE_AMP_INVALID_STAG: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); + break; + case I40IW_AE_AMP_BAD_QP: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN); + break; + case I40IW_AE_AMP_BAD_STAG_KEY: + case I40IW_AE_AMP_BAD_STAG_INDEX: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + switch (opcode) { + case I40IW_OP_TYPE_SEND_INV: + case I40IW_OP_TYPE_SEND_SOL_INV: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG); + break; + default: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG); + } + break; + case I40IW_AE_AMP_RIGHTS_VIOLATION: + case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: + case I40IW_AE_PRIV_OPERATION_DENIED: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS); + break; + case I40IW_AE_AMP_TO_WRAP: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP); + break; + case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, + (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER); + break; + case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); + break; + case I40IW_AE_LLP_SEGMENT_TOO_LARGE: + case I40IW_AE_LLP_SEGMENT_TOO_SMALL: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, + (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); + break; + case I40IW_AE_LCE_QP_CATASTROPHIC: + case I40IW_AE_DDP_NO_L_BIT: + i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR, + (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); + break; + case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN: + case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE); + break; + case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG); + break; + case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: + if (is_tagged) + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER); + else + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER); + break; + case I40IW_AE_DDP_UBE_INVALID_MO: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO); + break; + case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF); + break; + case I40IW_AE_DDP_UBE_INVALID_QN: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN); + break; + case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER); + break; + case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP); + break; + default: + i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED); + break; + } + + if (copy_len) + memcpy(termhdr + 1, pkt, copy_len); + + if (flush_code && !info->in_rdrsp_wr) + qp->sq_flush = (info->sq) ? true : false; + + return sizeof(struct i40iw_terminate_hdr) + copy_len; +} + +/** + * i40iw_terminate_send_fin() - Send fin for terminate message + * @qp: qp associated with received terminate AE + */ +void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp) +{ + /* Send the fin only */ + i40iw_term_modify_qp(qp, + I40IW_QP_STATE_TERMINATE, + I40IWQP_TERM_SEND_FIN_ONLY, + 0); +} + +/** + * i40iw_terminate_connection() - Bad AE and send terminate to remote QP + * @qp: qp associated with received terminate AE + * @info: the struct contiaing AE information + */ +void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info) +{ + u8 termlen = 0; + + if (qp->term_flags & I40IW_TERM_SENT) + return; /* Sanity check */ + + /* Eventtype can change from bld_terminate_hdr */ + qp->eventtype = TERM_EVENT_QP_FATAL; + termlen = i40iw_bld_terminate_hdr(qp, info); + i40iw_terminate_start_timer(qp); + qp->term_flags |= I40IW_TERM_SENT; + i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE, + I40IWQP_TERM_SEND_TERM_ONLY, termlen); +} + +/** + * i40iw_terminate_received - handle terminate received AE + * @qp: qp associated with received terminate AE + * @info: the struct contiaing AE information + */ +void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info) +{ + u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; + u32 *mpa; + u8 ddp_ctl; + u8 rdma_ctl; + u16 aeq_id = 0; + struct i40iw_terminate_hdr *termhdr; + + mpa = (u32 *)i40iw_locate_mpa(pkt); + if (info->q2_data_written) { + /* did not validate the frame - do it now */ + ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; + rdma_ctl = ntohl(mpa[0]) & 0xff; + if ((ddp_ctl & 0xc0) != 0x40) + aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC; + else if ((ddp_ctl & 0x03) != 1) + aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION; + else if (ntohl(mpa[2]) != 2) + aeq_id = I40IW_AE_DDP_UBE_INVALID_QN; + else if (ntohl(mpa[3]) != 1) + aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN; + else if (ntohl(mpa[4]) != 0) + aeq_id = I40IW_AE_DDP_UBE_INVALID_MO; + else if ((rdma_ctl & 0xc0) != 0x40) + aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; + + info->ae_id = aeq_id; + if (info->ae_id) { + /* Bad terminate recvd - send back a terminate */ + i40iw_terminate_connection(qp, info); + return; + } + } + + qp->term_flags |= I40IW_TERM_RCVD; + qp->eventtype = TERM_EVENT_QP_FATAL; + termhdr = (struct i40iw_terminate_hdr *)&mpa[5]; + if (termhdr->layer_etype == RDMAP_REMOTE_PROT || + termhdr->layer_etype == RDMAP_REMOTE_OP) { + i40iw_terminate_done(qp, 0); + } else { + i40iw_terminate_start_timer(qp); + i40iw_terminate_send_fin(qp); + } +} + +/** + * i40iw_hw_stat_init - Initiliaze HW stats table + * @devstat: pestat struct + * @fcn_idx: PCI fn id + * @hw: PF i40iw_hw structure. + * @is_pf: Is it a PF? + * + * Populate the HW stat table with register offset addr for each + * stat. And start the perioidic stats timer. + */ +static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat, + u8 fcn_idx, + struct i40iw_hw *hw, bool is_pf) +{ + u32 stat_reg_offset; + u32 stat_index; + struct i40iw_dev_hw_stat_offsets *stat_table = + &devstat->hw_stat_offsets; + struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats; + + devstat->hw = hw; + + if (is_pf) { + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = + I40E_GLPES_PFIP4RXDISCARD(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = + I40E_GLPES_PFIP4RXTRUNC(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = + I40E_GLPES_PFIP4TXNOROUTE(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = + I40E_GLPES_PFIP6RXDISCARD(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = + I40E_GLPES_PFIP6RXTRUNC(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = + I40E_GLPES_PFIP6TXNOROUTE(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] = + I40E_GLPES_PFTCPRTXSEG(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = + I40E_GLPES_PFTCPRXOPTERR(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = + I40E_GLPES_PFTCPRXPROTOERR(fcn_idx); + + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] = + I40E_GLPES_PFIP4RXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] = + I40E_GLPES_PFIP4RXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] = + I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] = + I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] = + I40E_GLPES_PFIP4TXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] = + I40E_GLPES_PFIP4TXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] = + I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] = + I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] = + I40E_GLPES_PFIP6RXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] = + I40E_GLPES_PFIP6RXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] = + I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] = + I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] = + I40E_GLPES_PFIP6TXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_PFIP6TXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_PFIP6TXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] = + I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] = + I40E_GLPES_PFTCPRXSEGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] = + I40E_GLPES_PFTCPTXSEGLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] = + I40E_GLPES_PFRDMARXRDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] = + I40E_GLPES_PFRDMARXSNDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] = + I40E_GLPES_PFRDMARXWRSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] = + I40E_GLPES_PFRDMATXRDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] = + I40E_GLPES_PFRDMATXSNDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] = + I40E_GLPES_PFRDMATXWRSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] = + I40E_GLPES_PFRDMAVBNDLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] = + I40E_GLPES_PFRDMAVINVLO(fcn_idx); + } else { + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = + I40E_GLPES_VFIP4RXDISCARD(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = + I40E_GLPES_VFIP4RXTRUNC(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = + I40E_GLPES_VFIP4TXNOROUTE(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = + I40E_GLPES_VFIP6RXDISCARD(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = + I40E_GLPES_VFIP6RXTRUNC(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = + I40E_GLPES_VFIP6TXNOROUTE(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] = + I40E_GLPES_VFTCPRTXSEG(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = + I40E_GLPES_VFTCPRXOPTERR(fcn_idx); + stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = + I40E_GLPES_VFTCPRXPROTOERR(fcn_idx); + + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] = + I40E_GLPES_VFIP4RXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] = + I40E_GLPES_VFIP4RXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] = + I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] = + I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] = + I40E_GLPES_VFIP4TXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] = + I40E_GLPES_VFIP4TXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] = + I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] = + I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] = + I40E_GLPES_VFIP6RXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] = + I40E_GLPES_VFIP6RXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] = + I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] = + I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] = + I40E_GLPES_VFIP6TXOCTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_VFIP6TXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_VFIP6TXPKTSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] = + I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] = + I40E_GLPES_VFTCPRXSEGSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] = + I40E_GLPES_VFTCPTXSEGLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] = + I40E_GLPES_VFRDMARXRDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] = + I40E_GLPES_VFRDMARXSNDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] = + I40E_GLPES_VFRDMARXWRSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] = + I40E_GLPES_VFRDMATXRDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] = + I40E_GLPES_VFRDMATXSNDSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] = + I40E_GLPES_VFRDMATXWRSLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] = + I40E_GLPES_VFRDMAVBNDLO(fcn_idx); + stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] = + I40E_GLPES_VFRDMAVINVLO(fcn_idx); + } + + for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64; + stat_index++) { + stat_reg_offset = stat_table->stat_offset_64[stat_index]; + last_rd_stats->stat_value_64[stat_index] = + readq(devstat->hw->hw_addr + stat_reg_offset); + } + + for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32; + stat_index++) { + stat_reg_offset = stat_table->stat_offset_32[stat_index]; + last_rd_stats->stat_value_32[stat_index] = + i40iw_rd32(devstat->hw, stat_reg_offset); + } +} + +/** + * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs. + * @devstat: pestat struct + * @index: index in HW stat table which contains offset reg-addr + * @value: hw stat value + */ +static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat, + enum i40iw_hw_stat_index_32b index, + u64 *value) +{ + struct i40iw_dev_hw_stat_offsets *stat_table = + &devstat->hw_stat_offsets; + struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats; + struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; + u64 new_stat_value = 0; + u32 stat_reg_offset = stat_table->stat_offset_32[index]; + + new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset); + /*roll-over case */ + if (new_stat_value < last_rd_stats->stat_value_32[index]) + hw_stats->stat_value_32[index] += new_stat_value; + else + hw_stats->stat_value_32[index] += + new_stat_value - last_rd_stats->stat_value_32[index]; + last_rd_stats->stat_value_32[index] = new_stat_value; + *value = hw_stats->stat_value_32[index]; +} + +/** + * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs. + * @devstat: pestat struct + * @index: index in HW stat table which contains offset reg-addr + * @value: hw stat value + */ +static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat, + enum i40iw_hw_stat_index_64b index, + u64 *value) +{ + struct i40iw_dev_hw_stat_offsets *stat_table = + &devstat->hw_stat_offsets; + struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats; + struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; + u64 new_stat_value = 0; + u32 stat_reg_offset = stat_table->stat_offset_64[index]; + + new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset); + /*roll-over case */ + if (new_stat_value < last_rd_stats->stat_value_64[index]) + hw_stats->stat_value_64[index] += new_stat_value; + else + hw_stats->stat_value_64[index] += + new_stat_value - last_rd_stats->stat_value_64[index]; + last_rd_stats->stat_value_64[index] = new_stat_value; + *value = hw_stats->stat_value_64[index]; +} + +/** + * i40iw_hw_stat_read_all - read all HW stat counters + * @devstat: pestat struct + * @stat_values: hw stats structure + * + * Read all the HW stat counters and populates hw_stats structure + * of passed-in dev's pestat as well as copy created in stat_values. + */ +static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat, + struct i40iw_dev_hw_stats *stat_values) +{ + u32 stat_index; + + for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32; + stat_index++) + i40iw_hw_stat_read_32(devstat, stat_index, + &stat_values->stat_value_32[stat_index]); + for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64; + stat_index++) + i40iw_hw_stat_read_64(devstat, stat_index, + &stat_values->stat_value_64[stat_index]); +} + +/** + * i40iw_hw_stat_refresh_all - Update all HW stat structs + * @devstat: pestat struct + * @stat_values: hw stats structure + * + * Read all the HW stat counters to refresh values in hw_stats structure + * of passed-in dev's pestat + */ +static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat) +{ + u64 stat_value; + u32 stat_index; + + for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32; + stat_index++) + i40iw_hw_stat_read_32(devstat, stat_index, &stat_value); + for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64; + stat_index++) + i40iw_hw_stat_read_64(devstat, stat_index, &stat_value); +} + +static struct i40iw_cqp_ops iw_cqp_ops = { + i40iw_sc_cqp_init, + i40iw_sc_cqp_create, + i40iw_sc_cqp_post_sq, + i40iw_sc_cqp_get_next_send_wqe, + i40iw_sc_cqp_destroy, + i40iw_sc_poll_for_cqp_op_done +}; + +static struct i40iw_ccq_ops iw_ccq_ops = { + i40iw_sc_ccq_init, + i40iw_sc_ccq_create, + i40iw_sc_ccq_destroy, + i40iw_sc_ccq_create_done, + i40iw_sc_ccq_get_cqe_info, + i40iw_sc_ccq_arm +}; + +static struct i40iw_ceq_ops iw_ceq_ops = { + i40iw_sc_ceq_init, + i40iw_sc_ceq_create, + i40iw_sc_cceq_create_done, + i40iw_sc_cceq_destroy_done, + i40iw_sc_cceq_create, + i40iw_sc_ceq_destroy, + i40iw_sc_process_ceq +}; + +static struct i40iw_aeq_ops iw_aeq_ops = { + i40iw_sc_aeq_init, + i40iw_sc_aeq_create, + i40iw_sc_aeq_destroy, + i40iw_sc_get_next_aeqe, + i40iw_sc_repost_aeq_entries, + i40iw_sc_aeq_create_done, + i40iw_sc_aeq_destroy_done +}; + +/* iwarp pd ops */ +static struct i40iw_pd_ops iw_pd_ops = { + i40iw_sc_pd_init, +}; + +static struct i40iw_priv_qp_ops iw_priv_qp_ops = { + i40iw_sc_qp_init, + i40iw_sc_qp_create, + i40iw_sc_qp_modify, + i40iw_sc_qp_destroy, + i40iw_sc_qp_flush_wqes, + i40iw_sc_qp_upload_context, + i40iw_sc_qp_setctx, + i40iw_sc_send_lsmm, + i40iw_sc_send_lsmm_nostag, + i40iw_sc_send_rtt, + i40iw_sc_post_wqe0, +}; + +static struct i40iw_priv_cq_ops iw_priv_cq_ops = { + i40iw_sc_cq_init, + i40iw_sc_cq_create, + i40iw_sc_cq_destroy, + i40iw_sc_cq_modify, +}; + +static struct i40iw_mr_ops iw_mr_ops = { + i40iw_sc_alloc_stag, + i40iw_sc_mr_reg_non_shared, + i40iw_sc_mr_reg_shared, + i40iw_sc_dealloc_stag, + i40iw_sc_query_stag, + i40iw_sc_mw_alloc +}; + +static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = { + i40iw_sc_manage_push_page, + i40iw_sc_manage_hmc_pm_func_table, + i40iw_sc_set_hmc_resource_profile, + i40iw_sc_commit_fpm_values, + i40iw_sc_query_fpm_values, + i40iw_sc_static_hmc_pages_allocated, + i40iw_sc_add_arp_cache_entry, + i40iw_sc_del_arp_cache_entry, + i40iw_sc_query_arp_cache_entry, + i40iw_sc_manage_apbvt_entry, + i40iw_sc_manage_qhash_table_entry, + i40iw_sc_alloc_local_mac_ipaddr_entry, + i40iw_sc_add_local_mac_ipaddr_entry, + i40iw_sc_del_local_mac_ipaddr_entry, + i40iw_sc_cqp_nop, + i40iw_sc_commit_fpm_values_done, + i40iw_sc_query_fpm_values_done, + i40iw_sc_manage_hmc_pm_func_table_done, + i40iw_sc_suspend_qp, + i40iw_sc_resume_qp +}; + +static struct i40iw_hmc_ops iw_hmc_ops = { + i40iw_sc_init_iw_hmc, + i40iw_sc_parse_fpm_query_buf, + i40iw_sc_configure_iw_fpm, + i40iw_sc_parse_fpm_commit_buf, + i40iw_sc_create_hmc_obj, + i40iw_sc_del_hmc_obj, + NULL, + NULL +}; + +static const struct i40iw_device_pestat_ops iw_device_pestat_ops = { + i40iw_hw_stat_init, + i40iw_hw_stat_read_32, + i40iw_hw_stat_read_64, + i40iw_hw_stat_read_all, + i40iw_hw_stat_refresh_all +}; + +/** + * i40iw_device_init_pestat - Initialize the pestat structure + * @dev: pestat struct + */ +enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat) +{ + devstat->ops = iw_device_pestat_ops; + return 0; +} + +/** + * i40iw_device_init - Initialize IWARP device + * @dev: IWARP device pointer + * @info: IWARP init info + */ +enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev, + struct i40iw_device_init_info *info) +{ + u32 val; + u32 vchnl_ver = 0; + u16 hmc_fcn = 0; + enum i40iw_status_code ret_code = 0; + u8 db_size; + + spin_lock_init(&dev->cqp_lock); + INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for the cqp commands backlog. */ + + i40iw_device_init_uk(&dev->dev_uk); + + dev->debug_mask = info->debug_mask; + + ret_code = i40iw_device_init_pestat(&dev->dev_pestat); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s: i40iw_device_init_pestat failed\n", __func__); + return ret_code; + } + dev->hmc_fn_id = info->hmc_fn_id; + dev->qs_handle = info->qs_handle; + dev->exception_lan_queue = info->exception_lan_queue; + dev->is_pf = info->is_pf; + + dev->fpm_query_buf_pa = info->fpm_query_buf_pa; + dev->fpm_query_buf = info->fpm_query_buf; + + dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; + dev->fpm_commit_buf = info->fpm_commit_buf; + + dev->hw = info->hw; + dev->hw->hw_addr = info->bar0; + + val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID); + dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID); + + if (dev->is_pf) { + dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat, + dev->hmc_fn_id, dev->hw, true); + spin_lock_init(&dev->dev_pestat.stats_lock); + /*start the periodic stats_timer */ + i40iw_hw_stats_start_timer(dev); + val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL); + db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE); + if ((db_size != I40IW_PE_DB_SIZE_4M) && + (db_size != I40IW_PE_DB_SIZE_8M)) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s: PE doorbell is not enabled in CSR val 0x%x\n", + __func__, val); + ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED; + return ret_code; + } + dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET; + dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf; + } else { + dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET; + } + + dev->cqp_ops = &iw_cqp_ops; + dev->ccq_ops = &iw_ccq_ops; + dev->ceq_ops = &iw_ceq_ops; + dev->aeq_ops = &iw_aeq_ops; + dev->cqp_misc_ops = &iw_cqp_misc_ops; + dev->iw_pd_ops = &iw_pd_ops; + dev->iw_priv_qp_ops = &iw_priv_qp_ops; + dev->iw_priv_cq_ops = &iw_priv_cq_ops; + dev->mr_ops = &iw_mr_ops; + dev->hmc_ops = &iw_hmc_ops; + dev->vchnl_if.vchnl_send = info->vchnl_send; + if (dev->vchnl_if.vchnl_send) + dev->vchnl_up = true; + else + dev->vchnl_up = false; + if (!dev->is_pf) { + dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf; + ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver); + if (!ret_code) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s: Get Channel version rc = 0x%0x, version is %u\n", + __func__, ret_code, vchnl_ver); + ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn); + if (!ret_code) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n", + __func__, ret_code, hmc_fcn); + dev->hmc_fn_id = (u8)hmc_fcn; + } + } + } + dev->iw_vf_cqp_ops = &iw_vf_cqp_ops; + + return ret_code; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h new file mode 100644 index 000000000..aab88d65f --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -0,0 +1,1713 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_D_H +#define I40IW_D_H + +#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024) +#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024) + +#define I40IW_PUSH_OFFSET (4 * 1024 * 1024) +#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16 +#define I40IW_VF_PUSH_OFFSET ((8 + 64) * 1024) +#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2 + +#define I40IW_PE_DB_SIZE_4M 1 +#define I40IW_PE_DB_SIZE_8M 2 + +#define I40IW_DDP_VER 1 +#define I40IW_RDMAP_VER 1 + +#define I40IW_RDMA_MODE_RDMAC 0 +#define I40IW_RDMA_MODE_IETF 1 + +#define I40IW_QP_STATE_INVALID 0 +#define I40IW_QP_STATE_IDLE 1 +#define I40IW_QP_STATE_RTS 2 +#define I40IW_QP_STATE_CLOSING 3 +#define I40IW_QP_STATE_RESERVED 4 +#define I40IW_QP_STATE_TERMINATE 5 +#define I40IW_QP_STATE_ERROR 6 + +#define I40IW_STAG_STATE_INVALID 0 +#define I40IW_STAG_STATE_VALID 1 + +#define I40IW_STAG_TYPE_SHARED 0 +#define I40IW_STAG_TYPE_NONSHARED 1 + +#define I40IW_MAX_USER_PRIORITY 8 + +#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits) +#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits) +#define LS_32_1(val, bits) (u32)(val << bits) +#define RS_32_1(val, bits) (u32)(val >> bits) +#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) + +#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK)) + +#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT) +#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK)) +#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT) + +#define TERM_DDP_LEN_TAGGED 14 +#define TERM_DDP_LEN_UNTAGGED 18 +#define TERM_RDMA_LEN 28 +#define RDMA_OPCODE_MASK 0x0f +#define RDMA_READ_REQ_OPCODE 1 +#define Q2_BAD_FRAME_OFFSET 72 +#define CQE_MAJOR_DRV 0x8000 + +#define I40IW_TERM_SENT 0x01 +#define I40IW_TERM_RCVD 0x02 +#define I40IW_TERM_DONE 0x04 +#define I40IW_MAC_HLEN 14 + +#define I40IW_INVALID_WQE_INDEX 0xffffffff + +#define I40IW_CQP_WAIT_POLL_REGS 1 +#define I40IW_CQP_WAIT_POLL_CQ 2 +#define I40IW_CQP_WAIT_EVENT 3 + +#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64) + +#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \ + ( \ + &((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \ + ) +#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \ + ( \ + &(((struct i40iw_extended_cqe *) \ + ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \ + ) + +#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \ + ( \ + &_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)] \ + ) + +#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \ + ( \ + &_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \ + ) + +#define I40IW_AE_SOURCE_RQ 0x1 +#define I40IW_AE_SOURCE_RQ_0011 0x3 + +#define I40IW_AE_SOURCE_CQ 0x2 +#define I40IW_AE_SOURCE_CQ_0110 0x6 +#define I40IW_AE_SOURCE_CQ_1010 0xA +#define I40IW_AE_SOURCE_CQ_1110 0xE + +#define I40IW_AE_SOURCE_SQ 0x5 +#define I40IW_AE_SOURCE_SQ_0111 0x7 + +#define I40IW_AE_SOURCE_IN_RR_WR 0x9 +#define I40IW_AE_SOURCE_IN_RR_WR_1011 0xB +#define I40IW_AE_SOURCE_OUT_RR 0xD +#define I40IW_AE_SOURCE_OUT_RR_1111 0xF + +#define I40IW_TCP_STATE_NON_EXISTENT 0 +#define I40IW_TCP_STATE_CLOSED 1 +#define I40IW_TCP_STATE_LISTEN 2 +#define I40IW_STATE_SYN_SEND 3 +#define I40IW_TCP_STATE_SYN_RECEIVED 4 +#define I40IW_TCP_STATE_ESTABLISHED 5 +#define I40IW_TCP_STATE_CLOSE_WAIT 6 +#define I40IW_TCP_STATE_FIN_WAIT_1 7 +#define I40IW_TCP_STATE_CLOSING 8 +#define I40IW_TCP_STATE_LAST_ACK 9 +#define I40IW_TCP_STATE_FIN_WAIT_2 10 +#define I40IW_TCP_STATE_TIME_WAIT 11 +#define I40IW_TCP_STATE_RESERVED_1 12 +#define I40IW_TCP_STATE_RESERVED_2 13 +#define I40IW_TCP_STATE_RESERVED_3 14 +#define I40IW_TCP_STATE_RESERVED_4 15 + +/* ILQ CQP hash table fields */ +#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32 +#define I40IW_CQPSQ_QHASH_VLANID_MASK \ + ((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT) + +#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32 +#define I40IW_CQPSQ_QHASH_QPN_MASK \ + ((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT) + +#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0 +#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT) + +#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16 +#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \ + ((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT) + +#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0 +#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \ + ((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32 +#define I40IW_CQPSQ_QHASH_ADDR0_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0 +#define I40IW_CQPSQ_QHASH_ADDR1_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32 +#define I40IW_CQPSQ_QHASH_ADDR2_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0 +#define I40IW_CQPSQ_QHASH_ADDR3_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT) + +#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63 +#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \ + ((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT) +#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32 +#define I40IW_CQPSQ_QHASH_OPCODE_MASK \ + ((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT) + +#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61 +#define I40IW_CQPSQ_QHASH_MANAGE_MASK \ + ((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT) + +#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60 +#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \ + ((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT) + +#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59 +#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \ + ((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT) + +#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42 +#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \ + ((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT) +/* CQP Host Context */ +#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0 +#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT) + +#define I40IW_CQPHC_SQSIZE_SHIFT 8 +#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT) + +#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1 +#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT) + +#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32 +#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT) + +#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0 +#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT) + +#define I40IW_CQPHC_SVER_SHIFT 24 +#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT) + +#define I40IW_CQPHC_SQBASE_SHIFT 9 +#define I40IW_CQPHC_SQBASE_MASK \ + (0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT) + +#define I40IW_CQPHC_QPCTX_SHIFT 0 +#define I40IW_CQPHC_QPCTX_MASK \ + (0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT) +#define I40IW_CQPHC_SVER 1 + +#define I40IW_CQP_SW_SQSIZE_4 4 +#define I40IW_CQP_SW_SQSIZE_2048 2048 + +/* iWARP QP Doorbell shadow area */ +#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0 +#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \ + (0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT) + +/* Completion Queue Doorbell shadow area */ +#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0 +#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT) + +#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0 +#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \ + (0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT) + +#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14 +#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT) + +#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15 +#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT) + +#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16 +#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \ + (0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT) + +/* CQP and iWARP Completion Queue */ +#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CCQ_OPRETVAL_SHIFT 0 +#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT) + +#define I40IW_CQ_MINERR_SHIFT 0 +#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT) + +#define I40IW_CQ_MAJERR_SHIFT 16 +#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT) + +#define I40IW_CQ_WQEIDX_SHIFT 32 +#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT) + +#define I40IW_CQ_ERROR_SHIFT 55 +#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT) + +#define I40IW_CQ_SQ_SHIFT 62 +#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT) + +#define I40IW_CQ_VALID_SHIFT 63 +#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT) + +#define I40IWCQ_PAYLDLEN_SHIFT 0 +#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT) + +#define I40IWCQ_TCPSEQNUM_SHIFT 32 +#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT) + +#define I40IWCQ_INVSTAG_SHIFT 0 +#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT) + +#define I40IWCQ_QPID_SHIFT 32 +#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT) + +#define I40IWCQ_PSHDROP_SHIFT 51 +#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT) + +#define I40IWCQ_SRQ_SHIFT 52 +#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT) + +#define I40IWCQ_STAG_SHIFT 53 +#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT) + +#define I40IWCQ_SOEVENT_SHIFT 54 +#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT) + +#define I40IWCQ_OP_SHIFT 56 +#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT) + +/* CEQE format */ +#define I40IW_CEQE_CQCTX_SHIFT 0 +#define I40IW_CEQE_CQCTX_MASK \ + (0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT) + +#define I40IW_CEQE_VALID_SHIFT 63 +#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT) + +/* AEQE format */ +#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_AEQE_QPCQID_SHIFT 0 +#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT) + +#define I40IW_AEQE_WQDESCIDX_SHIFT 18 +#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT) + +#define I40IW_AEQE_OVERFLOW_SHIFT 33 +#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT) + +#define I40IW_AEQE_AECODE_SHIFT 34 +#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT) + +#define I40IW_AEQE_AESRC_SHIFT 50 +#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT) + +#define I40IW_AEQE_IWSTATE_SHIFT 54 +#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT) + +#define I40IW_AEQE_TCPSTATE_SHIFT 57 +#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT) + +#define I40IW_AEQE_Q2DATA_SHIFT 61 +#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT) + +#define I40IW_AEQE_VALID_SHIFT 63 +#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT) + +/* CQP SQ WQES */ +#define I40IW_QP_TYPE_IWARP 1 +#define I40IW_QP_TYPE_UDA 2 +#define I40IW_QP_TYPE_CQP 4 + +#define I40IW_CQ_TYPE_IWARP 1 +#define I40IW_CQ_TYPE_ILQ 2 +#define I40IW_CQ_TYPE_IEQ 3 +#define I40IW_CQ_TYPE_CQP 4 + +#define I40IWQP_TERM_SEND_TERM_AND_FIN 0 +#define I40IWQP_TERM_SEND_TERM_ONLY 1 +#define I40IWQP_TERM_SEND_FIN_ONLY 2 +#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN 3 + +#define I40IW_CQP_OP_CREATE_QP 0 +#define I40IW_CQP_OP_MODIFY_QP 0x1 +#define I40IW_CQP_OP_DESTROY_QP 0x02 +#define I40IW_CQP_OP_CREATE_CQ 0x03 +#define I40IW_CQP_OP_MODIFY_CQ 0x04 +#define I40IW_CQP_OP_DESTROY_CQ 0x05 +#define I40IW_CQP_OP_CREATE_SRQ 0x06 +#define I40IW_CQP_OP_MODIFY_SRQ 0x07 +#define I40IW_CQP_OP_DESTROY_SRQ 0x08 +#define I40IW_CQP_OP_ALLOC_STAG 0x09 +#define I40IW_CQP_OP_REG_MR 0x0a +#define I40IW_CQP_OP_QUERY_STAG 0x0b +#define I40IW_CQP_OP_REG_SMR 0x0c +#define I40IW_CQP_OP_DEALLOC_STAG 0x0d +#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e +#define I40IW_CQP_OP_MANAGE_ARP 0x0f +#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10 +#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11 +#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12 +#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13 +#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14 +#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15 +#define I40IW_CQP_OP_CREATE_CEQ 0x16 +#define I40IW_CQP_OP_DESTROY_CEQ 0x18 +#define I40IW_CQP_OP_CREATE_AEQ 0x19 +#define I40IW_CQP_OP_DESTROY_AEQ 0x1b +#define I40IW_CQP_OP_CREATE_ADDR_VECT 0x1c +#define I40IW_CQP_OP_MODIFY_ADDR_VECT 0x1d +#define I40IW_CQP_OP_DESTROY_ADDR_VECT 0x1e +#define I40IW_CQP_OP_UPDATE_PE_SDS 0x1f +#define I40IW_CQP_OP_QUERY_FPM_VALUES 0x20 +#define I40IW_CQP_OP_COMMIT_FPM_VALUES 0x21 +#define I40IW_CQP_OP_FLUSH_WQES 0x22 +#define I40IW_CQP_OP_MANAGE_APBVT 0x23 +#define I40IW_CQP_OP_NOP 0x24 +#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25 +#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP 0x26 +#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP 0x27 +#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP 0x28 +#define I40IW_CQP_OP_SUSPEND_QP 0x29 +#define I40IW_CQP_OP_RESUME_QP 0x2a +#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b +#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d + +#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16 +#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT) + +#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32 +#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT) + +#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56 +#define I40IW_UDA_QPSQ_MACLEN_MASK \ + ((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT) + +#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48 +#define I40IW_UDA_QPSQ_IPLEN_MASK \ + ((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT) + +#define I40IW_UDA_QPSQ_L4T_SHIFT 30 +#define I40IW_UDA_QPSQ_L4T_MASK \ + ((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT) + +#define I40IW_UDA_QPSQ_IIPT_SHIFT 28 +#define I40IW_UDA_QPSQ_IIPT_MASK \ + ((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT) + +#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24 +#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT) + +#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0 +#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT) + +#define I40IW_UDA_QPSQ_VALID_SHIFT 63 +#define I40IW_UDA_QPSQ_VALID_MASK \ + ((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT) + +#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62 +#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT) + +#define I40IW_UDA_PAYLOADLEN_SHIFT 0 +#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT) + +#define I40IW_UDA_HDRLEN_SHIFT 16 +#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT) + +#define I40IW_VLAN_TAG_VALID_SHIFT 50 +#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT) + +#define I40IW_UDA_L3PROTO_SHIFT 0 +#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT) + +#define I40IW_UDA_L4PROTO_SHIFT 16 +#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT) + +#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44 +#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \ + ((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT) + +/* CQP SQ WQE common fields */ +#define I40IW_CQPSQ_OPCODE_SHIFT 32 +#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT) + +#define I40IW_CQPSQ_WQEVALID_SHIFT 63 +#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT) + +#define I40IW_CQPSQ_TPHVAL_SHIFT 0 +#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT) + +#define I40IW_CQPSQ_TPHEN_SHIFT 60 +#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT) + +#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK + +/* Create/Modify/Destroy QP */ + +#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32 +#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT) + +#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48 +#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT) + +#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_QP_QPID_SHIFT 0 +#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL) +/* I40IWCQ_QPID_MASK */ + +#define I40IW_CQPSQ_QP_OP_SHIFT 32 +#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK + +#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42 +#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT) + +#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43 +#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT) + +#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44 +#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT) + +#define I40IW_CQPSQ_QP_VQ_SHIFT 45 +#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT) + +#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46 +#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK \ + (1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT) + +#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47 +#define I40IW_CQPSQ_QP_CQNUMVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT) + +#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48 +#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT) + +#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52 +#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT) + +#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53 +#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT) + +#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54 +#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \ + (1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT) + +#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55 +#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK \ + (1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT) + +#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56 +#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT) + +#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58 +#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT) + +#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59 +#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT) + +#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60 +#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \ + (0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT) + +#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK + +/* Create/Modify/Destroy CQ */ +#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0 +#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT) + +#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0 +#define I40IW_CQPSQ_CQ_CQCTX_MASK \ + (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT) + +#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0 +#define I40IW_CQPSQ_CQ_CQCTX_MASK \ + (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT) + +#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0 +#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK \ + (0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT) + +#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24 +#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT) + +#define I40IW_CQPSQ_CQ_OP_SHIFT 32 +#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT) + +#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43 +#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT) + +#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44 +#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT) + +#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46 +#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK \ + (1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT) + +#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47 +#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT) + +#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48 +#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK \ + (1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT) + +#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49 +#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK \ + (1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT) + +#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61 +#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK \ + (1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT) + +/* Create/Modify/Destroy Shared Receive Queue */ + +#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0 +#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT) + +#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4 +#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \ + (0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT) + +#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32 +#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK \ + (0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT) + +#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16 +#define I40IW_CQPSQ_SRQ_PDID_MASK \ + (0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT) + +#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0 +#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT) + +#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT +#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK + +#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT +#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK + +#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61 +#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK \ + (1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT) + +#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6 +#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK \ + (0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT) + +#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT) + +/* Allocate/Register/Register Shared/Deallocate Stag */ +#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0 +#define I40IW_CQPSQ_STAG_STAGLEN_MASK \ + (0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT) + +#define I40IW_CQPSQ_STAG_PDID_SHIFT 48 +#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT) + +#define I40IW_CQPSQ_STAG_KEY_SHIFT 0 +#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT) + +#define I40IW_CQPSQ_STAG_IDX_SHIFT 8 +#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT) + +#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32 +#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK \ + (0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT) + +#define I40IW_CQPSQ_STAG_MR_SHIFT 43 +#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT) + +#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46 +#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \ + (1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT) + +#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48 +#define I40IW_CQPSQ_STAG_ARIGHTS_MASK \ + (0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT) + +#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53 +#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK \ + (1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT) + +#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59 +#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \ + (1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT) + +#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60 +#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK \ + (1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT) + +#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61 +#define I40IW_CQPSQ_STAG_USEPFRID_MASK \ + (1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT) + +#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0 +#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \ + (0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT) + +#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT) + +/* Query stag */ +#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT +#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK + +/* Allocate Local IP Address Entry */ + +/* Manage Local IP Address Table - MLIPA */ +#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0 +#define I40IW_CQPSQ_MLIPA_IPV4_MASK \ + (0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0 +#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK \ + (0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42 +#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43 +#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT) + +#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62 +#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61 +#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0 +#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8 +#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16 +#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24 +#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32 +#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40 +#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT) + +/* Manage ARP Table - MAT */ +#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0 +#define I40IW_CQPSQ_MAT_REACHMAX_MASK \ + (0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT) + +#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0 +#define I40IW_CQPSQ_MAT_MACADDR_MASK \ + (0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT) + +#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0 +#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK \ + (0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT) + +#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42 +#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \ + (1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT) + +#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43 +#define I40IW_CQPSQ_MAT_PERMANENT_MASK \ + (1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT) + +#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44 +#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT) + +/* Manage VF PBLE Backing Pages - MVPBP*/ +#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0 +#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \ + (0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT) + +#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16 +#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \ + (0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT) + +#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32 +#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \ + (0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT) + +#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62 +#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \ + (0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT) + +#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3 +#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \ + (0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT) + +/* Manage Push Page - MPP */ +#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff + +#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0 +#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \ + I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT) + +#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0 +#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT) + +#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62 +#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT) + +/* Upload Context - UCTX */ +#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0 +#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT) + +#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48 +#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT) + +#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61 +#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \ + (1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT) + +#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62 +#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK \ + (1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT) + +/* Manage HMC PM Function Table - MHMC */ +#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0 +#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT) + +#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62 +#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK \ + (1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT) + +/* Set HMC Resource Profile - SHMCRP */ +#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0 +#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \ + (0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT) +#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32 +#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT) + +/* Create/Destroy CEQ */ +#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0 +#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \ + (0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT) + +#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0 +#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT) + +#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47 +#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT) + +#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT) + +/* Create/Destroy AEQ */ +#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0 +#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \ + (0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT) + +#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47 +#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT) + +#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT) + +/* Commit FPM Values - CFPM */ +#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0 +#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT) + +/* Flush WQEs - FWQE */ +#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0 +#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT) + +#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16 +#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \ + (0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT) + +#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0 +#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \ + (0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16 +#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \ + (0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32 +#define I40IW_CQPSQ_FWQE_SQMNERR_MASK \ + (0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48 +#define I40IW_CQPSQ_FWQE_SQMJERR_MASK \ + (0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0 +#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT) + +#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59 +#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL << \ + I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT) + +#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60 +#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK \ + (1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT) + +#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61 +#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT) + +#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62 +#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT) + +/* Manage Accelerated Port Table - MAPT */ +#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0 +#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT) + +#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62 +#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT) + +/* Update Protocol Engine SDs */ +#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0 +#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT) + +#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0 +#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK \ + (0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT) + +#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32 +#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \ + (0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT) +#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0 +#define I40IW_CQPSQ_UPESD_HMCFNID_MASK \ + (0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT) + +#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63 +#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK \ + ((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT) + +#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0 +#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK \ + (0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT) + +#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7 +#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK \ + (0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT) + +/* Suspend QP */ +#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0 +#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL) +/* I40IWCQ_QPID_MASK */ + +/* Resume QP */ +#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0 +#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK \ + (0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT) + +#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0 +#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL) +/* I40IWCQ_QPID_MASK */ + +/* IW QP Context */ +#define I40IWQPC_DDP_VER_SHIFT 0 +#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT) + +#define I40IWQPC_SNAP_SHIFT 2 +#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT) + +#define I40IWQPC_IPV4_SHIFT 3 +#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT) + +#define I40IWQPC_NONAGLE_SHIFT 4 +#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT) + +#define I40IWQPC_INSERTVLANTAG_SHIFT 5 +#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT) + +#define I40IWQPC_USESRQ_SHIFT 6 +#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT) + +#define I40IWQPC_TIMESTAMP_SHIFT 7 +#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT) + +#define I40IWQPC_RQWQESIZE_SHIFT 8 +#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT) + +#define I40IWQPC_INSERTL2TAG2_SHIFT 11 +#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT) + +#define I40IWQPC_LIMIT_SHIFT 12 +#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT) + +#define I40IWQPC_DROPOOOSEG_SHIFT 15 +#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT) + +#define I40IWQPC_DUPACK_THRESH_SHIFT 16 +#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT) + +#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19 +#define I40IWQPC_ERR_RQ_IDX_VALID_MASK (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT) + +#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19 +#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT) + +#define I40IWQPC_RCVTPHEN_SHIFT 28 +#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT) + +#define I40IWQPC_XMITTPHEN_SHIFT 29 +#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT) + +#define I40IWQPC_RQTPHEN_SHIFT 30 +#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT) + +#define I40IWQPC_SQTPHEN_SHIFT 31 +#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT) + +#define I40IWQPC_PPIDX_SHIFT 32 +#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT) + +#define I40IWQPC_PMENA_SHIFT 47 +#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT) + +#define I40IWQPC_RDMAP_VER_SHIFT 62 +#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT) + +#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_TTL_SHIFT 0 +#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT) + +#define I40IWQPC_RQSIZE_SHIFT 8 +#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT) + +#define I40IWQPC_SQSIZE_SHIFT 12 +#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT) + +#define I40IWQPC_SRCMACADDRIDX_SHIFT 16 +#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT) + +#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23 +#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT) + +#define I40IWQPC_TOS_SHIFT 24 +#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT) + +#define I40IWQPC_SRCPORTNUM_SHIFT 32 +#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT) + +#define I40IWQPC_DESTPORTNUM_SHIFT 48 +#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT) + +#define I40IWQPC_DESTIPADDR0_SHIFT 32 +#define I40IWQPC_DESTIPADDR0_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT) + +#define I40IWQPC_DESTIPADDR1_SHIFT 0 +#define I40IWQPC_DESTIPADDR1_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT) + +#define I40IWQPC_DESTIPADDR2_SHIFT 32 +#define I40IWQPC_DESTIPADDR2_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT) + +#define I40IWQPC_DESTIPADDR3_SHIFT 0 +#define I40IWQPC_DESTIPADDR3_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT) + +#define I40IWQPC_SNDMSS_SHIFT 16 +#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT) + +#define I40IWQPC_VLANTAG_SHIFT 32 +#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT) + +#define I40IWQPC_ARPIDX_SHIFT 48 +#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT) + +#define I40IWQPC_FLOWLABEL_SHIFT 0 +#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT) + +#define I40IWQPC_WSCALE_SHIFT 20 +#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT) + +#define I40IWQPC_KEEPALIVE_SHIFT 21 +#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT) + +#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22 +#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT) + +#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23 +#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK \ + (1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT) + +#define I40IWQPC_TCPSTATE_SHIFT 28 +#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT) + +#define I40IWQPC_RCVSCALE_SHIFT 32 +#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT) + +#define I40IWQPC_SNDSCALE_SHIFT 40 +#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT) + +#define I40IWQPC_PDIDX_SHIFT 48 +#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT) + +#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16 +#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK \ + (0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT) + +#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24 +#define I40IWQPC_KEEPALIVE_INTERVAL_MASK \ + (0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT) + +#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0 +#define I40IWQPC_TIMESTAMP_RECENT_MASK \ + (0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT) + +#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32 +#define I40IWQPC_TIMESTAMP_AGE_MASK \ + (0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT) + +#define I40IWQPC_SNDNXT_SHIFT 0 +#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT) + +#define I40IWQPC_SNDWND_SHIFT 32 +#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT) + +#define I40IWQPC_RCVNXT_SHIFT 0 +#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT) + +#define I40IWQPC_RCVWND_SHIFT 32 +#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT) + +#define I40IWQPC_SNDMAX_SHIFT 0 +#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT) + +#define I40IWQPC_SNDUNA_SHIFT 32 +#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT) + +#define I40IWQPC_SRTT_SHIFT 0 +#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT) + +#define I40IWQPC_RTTVAR_SHIFT 32 +#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT) + +#define I40IWQPC_SSTHRESH_SHIFT 0 +#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT) + +#define I40IWQPC_CWND_SHIFT 32 +#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT) + +#define I40IWQPC_SNDWL1_SHIFT 0 +#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT) + +#define I40IWQPC_SNDWL2_SHIFT 32 +#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT) + +#define I40IWQPC_ERR_RQ_IDX_SHIFT 32 +#define I40IWQPC_ERR_RQ_IDX_MASK (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT) + +#define I40IWQPC_MAXSNDWND_SHIFT 0 +#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT) + +#define I40IWQPC_REXMIT_THRESH_SHIFT 48 +#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT) + +#define I40IWQPC_TXCQNUM_SHIFT 0 +#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT) + +#define I40IWQPC_RXCQNUM_SHIFT 32 +#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT) + +#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_LASTBYTESENT_SHIFT 0 +#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT) + +#define I40IWQPC_SRQID_SHIFT 32 +#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT) + +#define I40IWQPC_ORDSIZE_SHIFT 0 +#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT) + +#define I40IWQPC_IRDSIZE_SHIFT 16 +#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT) + +#define I40IWQPC_WRRDRSPOK_SHIFT 20 +#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT) + +#define I40IWQPC_RDOK_SHIFT 21 +#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT) + +#define I40IWQPC_SNDMARKERS_SHIFT 22 +#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT) + +#define I40IWQPC_BINDEN_SHIFT 23 +#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT) + +#define I40IWQPC_FASTREGEN_SHIFT 24 +#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT) + +#define I40IWQPC_PRIVEN_SHIFT 25 +#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT) + +#define I40IWQPC_LSMMPRESENT_SHIFT 26 +#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT) + +#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27 +#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT) + +#define I40IWQPC_IWARPMODE_SHIFT 28 +#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT) + +#define I40IWQPC_RCVMARKERS_SHIFT 29 +#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT) + +#define I40IWQPC_ALIGNHDRS_SHIFT 30 +#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT) + +#define I40IWQPC_RCVNOMPACRC_SHIFT 31 +#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT) + +#define I40IWQPC_RCVMARKOFFSET_SHIFT 33 +#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT) + +#define I40IWQPC_SNDMARKOFFSET_SHIFT 48 +#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT) + +#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_SQTPHVAL_SHIFT 0 +#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT) + +#define I40IWQPC_RQTPHVAL_SHIFT 8 +#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT) + +#define I40IWQPC_QSHANDLE_SHIFT 16 +#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT) + +#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32 +#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL << \ + I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0 +#define I40IWQPC_LOCAL_IPADDR3_MASK \ + (0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32 +#define I40IWQPC_LOCAL_IPADDR2_MASK \ + (0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0 +#define I40IWQPC_LOCAL_IPADDR1_MASK \ + (0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32 +#define I40IWQPC_LOCAL_IPADDR0_MASK \ + (0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT) + +/* wqe size considering 32 bytes per wqe*/ +#define I40IWQP_SW_MIN_WQSIZE 4 /* 128 bytes */ +#define I40IWQP_SW_MAX_WQSIZE 16384 /* 524288 bytes */ + +#define I40IWQP_OP_RDMA_WRITE 0 +#define I40IWQP_OP_RDMA_READ 1 +#define I40IWQP_OP_RDMA_SEND 3 +#define I40IWQP_OP_RDMA_SEND_INV 4 +#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5 +#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6 +#define I40IWQP_OP_BIND_MW 8 +#define I40IWQP_OP_FAST_REGISTER 9 +#define I40IWQP_OP_LOCAL_INVALIDATE 10 +#define I40IWQP_OP_RDMA_READ_LOC_INV 11 +#define I40IWQP_OP_NOP 12 + +#define I40IW_RSVD_SHIFT 41 +#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT) + +/* iwarp QP SQ WQE common fields */ +#define I40IWQPSQ_OPCODE_SHIFT 32 +#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT) + +#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38 +#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT) + +#define I40IWQPSQ_PUSHWQE_SHIFT 56 +#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT) + +#define I40IWQPSQ_STREAMMODE_SHIFT 58 +#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT) + +#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59 +#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT) + +#define I40IWQPSQ_READFENCE_SHIFT 60 +#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT) + +#define I40IWQPSQ_LOCALFENCE_SHIFT 61 +#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT) + +#define I40IWQPSQ_SIGCOMPL_SHIFT 62 +#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT) + +#define I40IWQPSQ_VALID_SHIFT 63 +#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT) + +#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPSQ_FRAG_LEN_SHIFT 0 +#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT) + +#define I40IWQPSQ_FRAG_STAG_SHIFT 32 +#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT) + +#define I40IWQPSQ_REMSTAGINV_SHIFT 0 +#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT) + +#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57 +#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT) + +#define I40IWQPSQ_INLINEDATALEN_SHIFT 48 +#define I40IWQPSQ_INLINEDATALEN_MASK \ + (0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT) + +/* iwarp send with push mode */ +#define I40IWQPSQ_WQDESCIDX_SHIFT 0 +#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT) + +/* rdma write */ +#define I40IWQPSQ_REMSTAG_SHIFT 0 +#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT) + +#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK + +/* memory window */ +#define I40IWQPSQ_STAGRIGHTS_SHIFT 48 +#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT) + +#define I40IWQPSQ_VABASEDTO_SHIFT 53 +#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT) + +#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0 +#define I40IWQPSQ_PARENTMRSTAG_MASK \ + (0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT) + +#define I40IWQPSQ_MWSTAG_SHIFT 32 +#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT) + +#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK + +/* Local Invalidate */ +#define I40IWQPSQ_LOCSTAG_SHIFT 32 +#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT) + +/* Fast Register */ +#define I40IWQPSQ_STAGKEY_SHIFT 0 +#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT) + +#define I40IWQPSQ_STAGINDEX_SHIFT 8 +#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT) + +#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43 +#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT) + +#define I40IWQPSQ_LPBLSIZE_SHIFT 44 +#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT) + +#define I40IWQPSQ_HPAGESIZE_SHIFT 46 +#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT) + +#define I40IWQPSQ_STAGLEN_SHIFT 0 +#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT) + +#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48 +#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK \ + (0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT) + +#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0 +#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK \ + (0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT) + +#define I40IWQPSQ_PBLADDR_SHIFT 12 +#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT) + +/* iwarp QP RQ WQE common fields */ +#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT +#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK + +#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT +#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK + +#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT +#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK + +#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT +#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK + +#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT +#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK + +/* Query FPM CQP buf */ +#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_QPS_MASK \ + (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_CQS_MASK \ + (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT) + +#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0 +#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK \ + (0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT) + +#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32 +#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \ + (0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_QPS_MASK \ + (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_CQS_MASK \ + (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_CEQS_MASK \ + (0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT) + +#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32 +#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK \ + (0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT) + +#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32 +#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK \ + (0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT) + +#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16 +#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK \ + (0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT) + +#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32 +#define I40IW_QUERY_FPM_TIMERBUCKET_MASK \ + (0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT) + +/* Static HMC pages allocated buf */ +#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0 +#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK \ + (0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT) + +#define I40IW_HW_PAGE_SIZE 4096 +#define I40IW_DONE_COUNT 1000 +#define I40IW_SLEEP_COUNT 10 + +enum { + I40IW_QUEUES_ALIGNMENT_MASK = (128 - 1), + I40IW_AEQ_ALIGNMENT_MASK = (256 - 1), + I40IW_Q2_ALIGNMENT_MASK = (256 - 1), + I40IW_CEQ_ALIGNMENT_MASK = (256 - 1), + I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), + I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), + I40IW_SHADOWAREA_MASK = (128 - 1), + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = 0, + I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = 0 +}; + +enum i40iw_alignment { + I40IW_CQP_ALIGNMENT = 0x200, + I40IW_AEQ_ALIGNMENT = 0x100, + I40IW_CEQ_ALIGNMENT = 0x100, + I40IW_CQ0_ALIGNMENT = 0x100, + I40IW_SD_BUF_ALIGNMENT = 0x100 +}; + +#define I40IW_QP_WQE_MIN_SIZE 32 +#define I40IW_QP_WQE_MAX_SIZE 128 + +#define I40IW_CQE_QTYPE_RQ 0 +#define I40IW_CQE_QTYPE_SQ 1 + +#define I40IW_RING_INIT(_ring, _size) \ + { \ + (_ring).head = 0; \ + (_ring).tail = 0; \ + (_ring).size = (_size); \ + } +#define I40IW_RING_GETSIZE(_ring) ((_ring).size) +#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head) +#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail) + +#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if (!I40IW_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = I40IW_ERR_RING_FULL; \ + } \ + } + +#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = I40IW_ERR_RING_FULL; \ + } \ + } + +#define I40IW_RING_MOVE_TAIL(_ring) \ + (_ring).tail = ((_ring).tail + 1) % (_ring).size + +#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \ + (_ring).tail = ((_ring).tail + (_count)) % (_ring).size + +#define I40IW_RING_SET_TAIL(_ring, _pos) \ + (_ring).tail = (_pos) % (_ring).size + +#define I40IW_RING_FULL_ERR(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1)) \ + ) + +#define I40IW_ERR_RING_FULL2(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2)) \ + ) + +#define I40IW_ERR_RING_FULL3(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3)) \ + ) + +#define I40IW_RING_MORE_WORK(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) != 0) \ + ) + +#define I40IW_RING_WORK_AVAILABLE(_ring) \ + ( \ + (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \ + ) + +#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \ + ( \ + ((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \ + ) + +#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \ + { \ + index = I40IW_RING_GETCURRENT_HEAD(_ring); \ + I40IW_RING_MOVE_HEAD(_ring, _retcode); \ + } + +/* Async Events codes */ +#define I40IW_AE_AMP_UNALLOCATED_STAG 0x0102 +#define I40IW_AE_AMP_INVALID_STAG 0x0103 +#define I40IW_AE_AMP_BAD_QP 0x0104 +#define I40IW_AE_AMP_BAD_PD 0x0105 +#define I40IW_AE_AMP_BAD_STAG_KEY 0x0106 +#define I40IW_AE_AMP_BAD_STAG_INDEX 0x0107 +#define I40IW_AE_AMP_BOUNDS_VIOLATION 0x0108 +#define I40IW_AE_AMP_RIGHTS_VIOLATION 0x0109 +#define I40IW_AE_AMP_TO_WRAP 0x010a +#define I40IW_AE_AMP_FASTREG_SHARED 0x010b +#define I40IW_AE_AMP_FASTREG_VALID_STAG 0x010c +#define I40IW_AE_AMP_FASTREG_MW_STAG 0x010d +#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e +#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW 0x010f +#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH 0x0110 +#define I40IW_AE_AMP_INVALIDATE_SHARED 0x0111 +#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112 +#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113 +#define I40IW_AE_AMP_MWBIND_VALID_STAG 0x0114 +#define I40IW_AE_AMP_MWBIND_OF_MR_STAG 0x0115 +#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116 +#define I40IW_AE_AMP_MWBIND_TO_MW_STAG 0x0117 +#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118 +#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119 +#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a +#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b +#define I40IW_AE_AMP_WQE_INVALID_PARAMETER 0x0130 +#define I40IW_AE_BAD_CLOSE 0x0201 +#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202 +#define I40IW_AE_CQ_OPERATION_ERROR 0x0203 +#define I40IW_AE_PRIV_OPERATION_DENIED 0x011c +#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205 +#define I40IW_AE_STAG_ZERO_INVALID 0x0206 +#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207 +#define I40IW_AE_SRQ_LIMIT 0x0209 +#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a +#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b +#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220 +#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 +#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID 0x0302 +#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 +#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 +#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305 +#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306 +#define I40IW_AE_DDP_UBE_INVALID_QN 0x0307 +#define I40IW_AE_DDP_NO_L_BIT 0x0308 +#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311 +#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312 +#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313 +#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314 +#define I40IW_AE_INVALID_ARP_ENTRY 0x0401 +#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402 +#define I40IW_AE_STALE_ARP_ENTRY 0x0403 +#define I40IW_AE_INVALID_WQE_LENGTH 0x0404 +#define I40IW_AE_INVALID_MAC_ENTRY 0x0405 +#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501 +#define I40IW_AE_LLP_CONNECTION_RESET 0x0502 +#define I40IW_AE_LLP_FIN_RECEIVED 0x0503 +#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504 +#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505 +#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506 +#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507 +#define I40IW_AE_LLP_SYN_RECEIVED 0x0508 +#define I40IW_AE_LLP_TERMINATE_RECEIVED 0x0509 +#define I40IW_AE_LLP_TOO_MANY_RETRIES 0x050a +#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b +#define I40IW_AE_LLP_DOUBT_REACHABILITY 0x050c +#define I40IW_AE_LLP_RX_VLAN_MISMATCH 0x050d +#define I40IW_AE_RESOURCE_EXHAUSTION 0x0520 +#define I40IW_AE_RESET_SENT 0x0601 +#define I40IW_AE_TERMINATE_SENT 0x0602 +#define I40IW_AE_RESET_NOT_SENT 0x0603 +#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700 +#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701 +#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702 +#define I40IW_AE_UDA_XMIT_FRAG_SEQ 0x0800 +#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0801 +#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH 0x0802 +#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900 + +#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1 +#define OP_CEQ_DESTROY 2 +#define OP_AEQ_DESTROY 3 +#define OP_DELETE_ARP_CACHE_ENTRY 4 +#define OP_MANAGE_APBVT_ENTRY 5 +#define OP_CEQ_CREATE 6 +#define OP_AEQ_CREATE 7 +#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY 8 +#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY 9 +#define OP_MANAGE_QHASH_TABLE_ENTRY 10 +#define OP_QP_MODIFY 11 +#define OP_QP_UPLOAD_CONTEXT 12 +#define OP_CQ_CREATE 13 +#define OP_CQ_DESTROY 14 +#define OP_QP_CREATE 15 +#define OP_QP_DESTROY 16 +#define OP_ALLOC_STAG 17 +#define OP_MR_REG_NON_SHARED 18 +#define OP_DEALLOC_STAG 19 +#define OP_MW_ALLOC 20 +#define OP_QP_FLUSH_WQES 21 +#define OP_ADD_ARP_CACHE_ENTRY 22 +#define OP_MANAGE_PUSH_PAGE 23 +#define OP_UPDATE_PE_SDS 24 +#define OP_MANAGE_HMC_PM_FUNC_TABLE 25 +#define OP_SUSPEND 26 +#define OP_RESUME 27 +#define OP_MANAGE_VF_PBLE_BP 28 +#define OP_QUERY_FPM_VALUES 29 +#define OP_COMMIT_FPM_VALUES 30 +#define OP_SIZE_CQP_STAT_ARRAY 31 + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c new file mode 100644 index 000000000..5484cbf55 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c @@ -0,0 +1,821 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_vf.h" +#include "i40iw_virtchnl.h" + +/** + * i40iw_find_sd_index_limit - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40iw_hmc_rsrc_type. + */ + +static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info, + u32 type, + u32 idx, + u32 cnt, + u32 *sd_idx, + u32 *sd_limit) +{ + u64 fpm_addr, fpm_limit; + + fpm_addr = hmc_info->hmc_obj[(type)].base + + hmc_info->hmc_obj[type].size * idx; + fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt; + *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE); + *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE); + *sd_limit += 1; +} + +/** + * i40iw_find_pd_index_limit - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40iw_hmc_rsrc_type. + */ + +static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info, + u32 type, + u32 idx, + u32 cnt, + u32 *pd_idx, + u32 *pd_limit) +{ + u64 fpm_adr, fpm_limit; + + fpm_adr = hmc_info->hmc_obj[type].base + + hmc_info->hmc_obj[type].size * idx; + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); + *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE); + *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE); + *(pd_limit) += 1; +} + +/** + * i40iw_set_sd_entry - setup entry for sd programming + * @pa: physical addr + * @idx: sd index + * @type: paged or direct sd + * @entry: sd entry ptr + */ +static inline void i40iw_set_sd_entry(u64 pa, + u32 idx, + enum i40iw_sd_entry_type type, + struct update_sd_entry *entry) +{ + entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | + (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) << + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | + (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); + entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15)); +} + +/** + * i40iw_clr_sd_entry - setup entry for sd clear + * @idx: sd index + * @type: paged or direct sd + * @entry: sd entry ptr + */ +static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type, + struct update_sd_entry *entry) +{ + entry->data = (I40IW_HMC_MAX_BP_COUNT << + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | + (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) << + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); + entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15)); +} + +/** + * i40iw_hmc_sd_one - setup 1 sd entry for cqp + * @dev: pointer to the device structure + * @hmc_fn_id: hmc's function id + * @pa: physical addr + * @sd_idx: sd index + * @type: paged or direct sd + * @setsd: flag to set or clear sd + */ +enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, + u8 hmc_fn_id, + u64 pa, u32 sd_idx, + enum i40iw_sd_entry_type type, + bool setsd) +{ + struct i40iw_update_sds_info sdinfo; + + sdinfo.cnt = 1; + sdinfo.hmc_fn_id = hmc_fn_id; + if (setsd) + i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry); + else + i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry); + + return dev->cqp->process_cqp_sds(dev, &sdinfo); +} + +/** + * i40iw_hmc_sd_grp - setup group od sd entries for cqp + * @dev: pointer to the device structure + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: sd index + * @sd_cnt: number of sd entries + * @setsd: flag to set or clear sd + */ +static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev, + struct i40iw_hmc_info *hmc_info, + u32 sd_index, + u32 sd_cnt, + bool setsd) +{ + struct i40iw_hmc_sd_entry *sd_entry; + struct i40iw_update_sds_info sdinfo; + u64 pa; + u32 i; + enum i40iw_status_code ret_code = 0; + + memset(&sdinfo, 0, sizeof(sdinfo)); + sdinfo.hmc_fn_id = hmc_info->hmc_fn_id; + for (i = sd_index; i < sd_index + sd_cnt; i++) { + sd_entry = &hmc_info->sd_table.sd_entry[i]; + if (!sd_entry || + (!sd_entry->valid && setsd) || + (sd_entry->valid && !setsd)) + continue; + if (setsd) { + pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ? + sd_entry->u.pd_table.pd_page_addr.pa : + sd_entry->u.bp.addr.pa; + i40iw_set_sd_entry(pa, i, sd_entry->entry_type, + &sdinfo.entry[sdinfo.cnt]); + } else { + i40iw_clr_sd_entry(i, sd_entry->entry_type, + &sdinfo.entry[sdinfo.cnt]); + } + sdinfo.cnt++; + if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) { + ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "i40iw_hmc_sd_grp: sd_programming failed err=%d\n", + ret_code); + return ret_code; + } + sdinfo.cnt = 0; + } + } + if (sdinfo.cnt) + ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); + + return ret_code; +} + +/** + * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id + * @dev: pointer to the device structure + * @hmc_fn_id: hmc's function id + */ +struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id) +{ + struct i40iw_vfdev *vf_dev = NULL; + u16 idx; + + for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) { + if (dev->vf_dev[idx] && + ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) { + vf_dev = dev->vf_dev[idx]; + break; + } + } + return vf_dev; +} + +/** + * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id + * @dev: pointer to the device structure + * @hmc_fn_id: hmc's function id + */ +struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev, + u8 hmc_fn_id) +{ + struct i40iw_hmc_info *hmc_info = NULL; + u16 idx; + + for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) { + if (dev->vf_dev[idx] && + ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) { + hmc_info = &dev->vf_dev[idx]->hmc_info; + break; + } + } + return hmc_info; +} + +/** + * i40iw_hmc_finish_add_sd_reg - program sd entries for objects + * @dev: pointer to the device structure + * @info: create obj info + */ +static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info) +{ + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) + return I40IW_ERR_INVALID_HMC_OBJ_INDEX; + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) + return I40IW_ERR_INVALID_HMC_OBJ_COUNT; + + if (!info->add_sd_cnt) + return 0; + + return i40iw_hmc_sd_grp(dev, info->hmc_info, + info->hmc_info->sd_indexes[0], + info->add_sd_cnt, true); +} + +/** + * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects + * @dev: pointer to the device structure + * @info: pointer to i40iw_hmc_iw_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + */ +enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info) +{ + struct i40iw_hmc_sd_entry *sd_entry; + u32 sd_idx, sd_lmt; + u32 pd_idx = 0, pd_lmt = 0; + u32 pd_idx1 = 0, pd_lmt1 = 0; + u32 i, j; + bool pd_error = false; + enum i40iw_status_code ret_code = 0; + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) + return I40IW_ERR_INVALID_HMC_OBJ_INDEX; + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: error type %u, start = %u, req cnt %u, cnt = %u\n", + __func__, info->rsrc_type, info->start_idx, info->count, + info->hmc_info->hmc_obj[info->rsrc_type].cnt); + return I40IW_ERR_INVALID_HMC_OBJ_COUNT; + } + + if (!dev->is_pf) + return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count); + + i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + return I40IW_ERR_INVALID_SD_INDEX; + } + i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, &pd_lmt); + + for (j = sd_idx; j < sd_lmt; j++) { + ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info, + j, + info->entry_type, + I40IW_HMC_DIRECT_BP_SIZE); + if (ret_code) + goto exit_sd_error; + sd_entry = &info->hmc_info->sd_table.sd_entry[j]; + + if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) && + ((dev->hmc_info == info->hmc_info) && + (info->rsrc_type != I40IW_HMC_IW_PBLE))) { + pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, + (j + 1) * I40IW_HMC_MAX_BP_COUNT); + for (i = pd_idx1; i < pd_lmt1; i++) { + /* update the pd table entry */ + ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info, + i, NULL); + if (ret_code) { + pd_error = true; + break; + } + } + if (pd_error) { + while (i && (i > pd_idx1)) { + i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1), + info->is_pf); + i--; + } + } + } + if (sd_entry->valid) + continue; + + info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j; + info->add_sd_cnt++; + sd_entry->valid = true; + } + return i40iw_hmc_finish_add_sd_reg(dev, info); + +exit_sd_error: + while (j && (j > sd_idx)) { + sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; + switch (sd_entry->entry_type) { + case I40IW_SD_TYPE_PAGED: + pd_idx1 = max(pd_idx, + (j - 1) * I40IW_HMC_MAX_BP_COUNT); + pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) + i40iw_prep_remove_pd_page(info->hmc_info, i); + break; + case I40IW_SD_TYPE_DIRECT: + i40iw_prep_remove_pd_page(info->hmc_info, (j - 1)); + break; + default: + ret_code = I40IW_ERR_INVALID_SD_TYPE; + break; + } + j--; + } + + return ret_code; +} + +/** + * i40iw_finish_del_sd_reg - delete sd entries for objects + * @dev: pointer to the device structure + * @info: dele obj info + * @reset: true if called before reset + */ +static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *info, + bool reset) +{ + struct i40iw_hmc_sd_entry *sd_entry; + enum i40iw_status_code ret_code = 0; + u32 i, sd_idx; + struct i40iw_dma_mem *mem; + + if (dev->is_pf && !reset) + ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info, + info->hmc_info->sd_indexes[0], + info->del_sd_cnt, false); + + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__); + + for (i = 0; i < info->del_sd_cnt; i++) { + sd_idx = info->hmc_info->sd_indexes[i]; + sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx]; + if (!sd_entry) + continue; + mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ? + &sd_entry->u.pd_table.pd_page_addr : + &sd_entry->u.bp.addr; + + if (!mem || !mem->va) + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__); + else + i40iw_free_dma_mem(dev->hw, mem); + } + return ret_code; +} + +/** + * i40iw_del_iw_hmc_obj - remove pe hmc objects + * @dev: pointer to the device structure + * @info: pointer to i40iw_hmc_del_obj_info struct + * @reset: true if called before reset + * + * This will de-populate the SDs and PDs. It frees + * the memory for PDS and backing storage. After this function is returned, + * caller should deallocate memory allocated previously for + * book-keeping information about PDs and backing storage. + */ +enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *info, + bool reset) +{ + struct i40iw_hmc_pd_table *pd_table; + u32 sd_idx, sd_lmt; + u32 pd_idx, pd_lmt, rel_pd_idx; + u32 i, j; + enum i40iw_status_code ret_code = 0; + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n", + __func__, info->start_idx, info->rsrc_type, + info->hmc_info->hmc_obj[info->rsrc_type].cnt); + return I40IW_ERR_INVALID_HMC_OBJ_INDEX; + } + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n", + __func__, info->start_idx, info->count, + info->rsrc_type, + info->hmc_info->hmc_obj[info->rsrc_type].cnt); + return I40IW_ERR_INVALID_HMC_OBJ_COUNT; + } + if (!dev->is_pf) { + ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0, + info->count); + if (info->rsrc_type != I40IW_HMC_IW_PBLE) + return ret_code; + } + + i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, &pd_lmt); + + for (j = pd_idx; j < pd_lmt; j++) { + sd_idx = j / I40IW_HMC_PD_CNT_IN_SD; + + if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type != + I40IW_SD_TYPE_PAGED) + continue; + + rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD; + pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + if (pd_table->pd_entry[rel_pd_idx].valid) { + ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j, + info->is_pf); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__); + return ret_code; + } + } + } + + i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__); + return I40IW_ERR_INVALID_SD_INDEX; + } + + for (i = sd_idx; i < sd_lmt; i++) { + if (!info->hmc_info->sd_table.sd_entry[i].valid) + continue; + switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { + case I40IW_SD_TYPE_DIRECT: + ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i); + if (!ret_code) { + info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; + info->del_sd_cnt++; + } + break; + case I40IW_SD_TYPE_PAGED: + ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i); + if (!ret_code) { + info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; + info->del_sd_cnt++; + } + break; + default: + break; + } + } + return i40iw_finish_del_sd_reg(dev, info, reset); +} + +/** + * i40iw_add_sd_table_entry - Adds a segment descriptor to the table + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: segment descriptor index to manipulate + * @type: what type of segment descriptor we're manipulating + * @direct_mode_sz: size to alloc in direct mode + */ +enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, + u32 sd_index, + enum i40iw_sd_entry_type type, + u64 direct_mode_sz) +{ + enum i40iw_status_code ret_code = 0; + struct i40iw_hmc_sd_entry *sd_entry; + bool dma_mem_alloc_done = false; + struct i40iw_dma_mem mem; + u64 alloc_len; + + sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; + if (!sd_entry->valid) { + if (type == I40IW_SD_TYPE_PAGED) + alloc_len = I40IW_HMC_PAGED_BP_SIZE; + else + alloc_len = direct_mode_sz; + + /* allocate a 4K pd page or 2M backing page */ + ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len, + I40IW_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + dma_mem_alloc_done = true; + if (type == I40IW_SD_TYPE_PAGED) { + ret_code = i40iw_allocate_virt_mem(hw, + &sd_entry->u.pd_table.pd_entry_virt_mem, + sizeof(struct i40iw_hmc_pd_entry) * 512); + if (ret_code) + goto exit; + sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *) + sd_entry->u.pd_table.pd_entry_virt_mem.va; + + memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem)); + } else { + memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem)); + sd_entry->u.bp.sd_pd_index = sd_index; + } + + hmc_info->sd_table.sd_entry[sd_index].entry_type = type; + + I40IW_INC_SD_REFCNT(&hmc_info->sd_table); + } + if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT) + I40IW_INC_BP_REFCNT(&sd_entry->u.bp); +exit: + if (ret_code) + if (dma_mem_alloc_done) + i40iw_free_dma_mem(hw, &mem); + + return ret_code; +} + +/** + * i40iw_add_pd_table_entry - Adds page descriptor to the specified table + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. + * + * This function: + * 1. Initializes the pd entry + * 2. Adds pd_entry in the pd_table + * 3. Mark the entry valid in i40iw_hmc_pd_entry structure + * 4. Initializes the pd_entry's ref count to 1 + * assumptions: + * 1. The memory for pd should be pinned down, physically contiguous and + * aligned on 4K boundary and zeroed memory. + * 2. It should be 4K in size. + */ +enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, + u32 pd_index, + struct i40iw_dma_mem *rsrc_pg) +{ + enum i40iw_status_code ret_code = 0; + struct i40iw_hmc_pd_table *pd_table; + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_dma_mem mem; + struct i40iw_dma_mem *page = &mem; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + u64 page_desc; + + if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) + return I40IW_ERR_INVALID_PAGE_DESC_INDEX; + + sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD); + if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED) + return 0; + + rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD); + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + if (!pd_entry->valid) { + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + ret_code = i40iw_allocate_dma_mem(hw, page, + I40IW_HMC_PAGED_BP_SIZE, + I40IW_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + return ret_code; + pd_entry->rsrc_pg = false; + } + + memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem)); + pd_entry->bp.sd_pd_index = pd_index; + pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED; + page_desc = page->pa | 0x1; + + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + + memcpy(pd_addr, &page_desc, sizeof(*pd_addr)); + + pd_entry->sd_index = sd_idx; + pd_entry->valid = true; + I40IW_INC_PD_REFCNT(pd_table); + if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID) + I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx); + else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id) + I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx, + hmc_info->hmc_fn_id); + } + I40IW_INC_BP_REFCNT(&pd_entry->bp); + + return 0; +} + +/** + * i40iw_remove_pd_bp - remove a backing page from a page descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: distinguishes a VF from a PF + * + * This function: + * 1. Marks the entry in pd table (for paged address mode) or in sd table + * (for direct address mode) invalid. + * 2. Write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for the pd _entry + * assumptions: + * 1. Caller can deallocate the memory used by backing storage after this + * function returns. + */ +enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, + u32 idx, + bool is_pf) +{ + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_hmc_pd_table *pd_table; + struct i40iw_hmc_sd_entry *sd_entry; + u32 sd_idx, rel_pd_idx; + struct i40iw_dma_mem *mem; + u64 *pd_addr; + + sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD; + rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD; + if (sd_idx >= hmc_info->sd_table.sd_cnt) + return I40IW_ERR_INVALID_PAGE_DESC_INDEX; + + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED) + return I40IW_ERR_INVALID_SD_TYPE; + + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + I40IW_DEC_BP_REFCNT(&pd_entry->bp); + if (pd_entry->bp.ref_cnt) + return 0; + + pd_entry->valid = false; + I40IW_DEC_PD_REFCNT(pd_table); + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + memset(pd_addr, 0, sizeof(u64)); + if (is_pf) + I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); + else + I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, + hmc_info->hmc_fn_id); + + if (!pd_entry->rsrc_pg) { + mem = &pd_entry->bp.addr; + if (!mem || !mem->va) + return I40IW_ERR_PARAM; + i40iw_free_dma_mem(hw, mem); + } + if (!pd_table->ref_cnt) + i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); + + return 0; +} + +/** + * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + */ +enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx) +{ + struct i40iw_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40IW_DEC_BP_REFCNT(&sd_entry->u.bp); + if (sd_entry->u.bp.ref_cnt) + return I40IW_ERR_NOT_READY; + + I40IW_DEC_SD_REFCNT(&hmc_info->sd_table); + sd_entry->valid = false; + + return 0; +} + +/** + * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry. + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + */ +enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, + u32 idx) +{ + struct i40iw_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + if (sd_entry->u.pd_table.ref_cnt) + return I40IW_ERR_NOT_READY; + + sd_entry->valid = false; + I40IW_DEC_SD_REFCNT(&hmc_info->sd_table); + + return 0; +} + +/** + * i40iw_pf_init_vfhmc - + * @vf_cnt_array: array of cnt values of iwarp hmc objects + * @vf_hmc_fn_id: hmc function id ofr vf driver + * @dev: pointer to i40iw_dev struct + * + * Called by pf driver to initialize hmc_info for vf driver instance. + */ +enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, + u8 vf_hmc_fn_id, + u32 *vf_cnt_array) +{ + struct i40iw_hmc_info *hmc_info; + enum i40iw_status_code ret_code = 0; + u32 i; + + if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) || + (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID + + I40IW_MAX_PE_ENABLED_VF_COUNT)) { + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n", + __func__, vf_hmc_fn_id); + return I40IW_ERR_INVALID_HMCFN_ID; + } + + ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id); + if (ret_code) + return ret_code; + + hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id); + + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) + if (vf_cnt_array) + hmc_info->hmc_obj[i].cnt = + vf_cnt_array[i - I40IW_HMC_IW_QP]; + else + hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; + + return 0; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h new file mode 100644 index 000000000..4c3fdd875 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.h @@ -0,0 +1,241 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_HMC_H +#define I40IW_HMC_H + +#include "i40iw_d.h" + +struct i40iw_hw; +enum i40iw_status_code; + +#define I40IW_HMC_MAX_BP_COUNT 512 +#define I40IW_MAX_SD_ENTRIES 11 +#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA + +#define I40IW_HMC_INFO_SIGNATURE 0x484D5347 +#define I40IW_HMC_PD_CNT_IN_SD 512 +#define I40IW_HMC_DIRECT_BP_SIZE 0x200000 +#define I40IW_HMC_MAX_SD_COUNT 4096 +#define I40IW_HMC_PAGED_BP_SIZE 4096 +#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40IW_FIRST_VF_FPM_ID 16 +#define FPM_MULTIPLIER 1024 + +#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + */ +#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + i40iw_wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + * @hmc_fn_id: VF's function id + */ +#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \ + i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \ + ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +struct i40iw_hmc_obj_info { + u64 base; + u32 max_cnt; + u32 cnt; + u64 size; +}; + +enum i40iw_sd_entry_type { + I40IW_SD_TYPE_INVALID = 0, + I40IW_SD_TYPE_PAGED = 1, + I40IW_SD_TYPE_DIRECT = 2 +}; + +struct i40iw_hmc_bp { + enum i40iw_sd_entry_type entry_type; + struct i40iw_dma_mem addr; + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40iw_hmc_pd_entry { + struct i40iw_hmc_bp bp; + u32 sd_index; + bool rsrc_pg; + bool valid; +}; + +struct i40iw_hmc_pd_table { + struct i40iw_dma_mem pd_page_addr; + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_virt_mem pd_entry_virt_mem; + u32 ref_cnt; + u32 sd_index; +}; + +struct i40iw_hmc_sd_entry { + enum i40iw_sd_entry_type entry_type; + bool valid; + + union { + struct i40iw_hmc_pd_table pd_table; + struct i40iw_hmc_bp bp; + } u; +}; + +struct i40iw_hmc_sd_table { + struct i40iw_virt_mem addr; + u32 sd_cnt; + u32 ref_cnt; + struct i40iw_hmc_sd_entry *sd_entry; +}; + +struct i40iw_hmc_info { + u32 signature; + u8 hmc_fn_id; + u16 first_sd_index; + + struct i40iw_hmc_obj_info *hmc_obj; + struct i40iw_virt_mem hmc_obj_virt_mem; + struct i40iw_hmc_sd_table sd_table; + u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT]; +}; + +struct update_sd_entry { + u64 cmd; + u64 data; +}; + +struct i40iw_update_sds_info { + u32 cnt; + u8 hmc_fn_id; + struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES]; +}; + +struct i40iw_ccq_cqe_info; +struct i40iw_hmc_fcn_info { + void (*callback_fcn)(struct i40iw_sc_dev *, void *, + struct i40iw_ccq_cqe_info *); + void *cqp_callback_param; + u32 vf_id; + u16 iw_vf_idx; + bool free_fcn; +}; + +enum i40iw_hmc_rsrc_type { + I40IW_HMC_IW_QP = 0, + I40IW_HMC_IW_CQ = 1, + I40IW_HMC_IW_SRQ = 2, + I40IW_HMC_IW_HTE = 3, + I40IW_HMC_IW_ARP = 4, + I40IW_HMC_IW_APBVT_ENTRY = 5, + I40IW_HMC_IW_MR = 6, + I40IW_HMC_IW_XF = 7, + I40IW_HMC_IW_XFFL = 8, + I40IW_HMC_IW_Q1 = 9, + I40IW_HMC_IW_Q1FL = 10, + I40IW_HMC_IW_TIMER = 11, + I40IW_HMC_IW_FSIMC = 12, + I40IW_HMC_IW_FSIAV = 13, + I40IW_HMC_IW_PBLE = 14, + I40IW_HMC_IW_MAX = 15, +}; + +struct i40iw_hmc_create_obj_info { + struct i40iw_hmc_info *hmc_info; + struct i40iw_virt_mem add_sd_virt_mem; + u32 rsrc_type; + u32 start_idx; + u32 count; + u32 add_sd_cnt; + enum i40iw_sd_entry_type entry_type; + bool is_pf; +}; + +struct i40iw_hmc_del_obj_info { + struct i40iw_hmc_info *hmc_info; + struct i40iw_virt_mem del_sd_virt_mem; + u32 rsrc_type; + u32 start_idx; + u32 count; + u32 del_sd_cnt; + bool is_pf; +}; + +enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf, + struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size); +enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info); +enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *info, + bool reset); +enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id, + u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type, + bool setsd); +enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info); +struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id); +struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev, + u8 hmc_fn_id); +enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, u32 sd_index, + enum i40iw_sd_entry_type type, u64 direct_mode_sz); +enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, u32 pd_index, + struct i40iw_dma_mem *rsrc_pg); +enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf); +enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx); +enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx); + +#define ENTER_SHARED_FUNCTION() +#define EXIT_SHARED_FUNCTION() + +#endif /* I40IW_HMC_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c new file mode 100644 index 000000000..9fd302425 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -0,0 +1,730 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> + +#include "i40iw.h" + +/** + * i40iw_initialize_hw_resources - initialize hw resource during open + * @iwdev: iwarp device + */ +u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev) +{ + unsigned long num_pds; + u32 resources_size; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 arp_table_size; + u32 mrdrvbits; + void *resource_ptr; + + max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt; + max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt; + arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt; + iwdev->max_cqe = 0xFFFFF; + num_pds = max_qp * 4; + resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size; + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size); + resources_size += sizeof(struct i40iw_qp **) * max_qp; + iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL); + + if (!iwdev->mem_resources) + return -ENOMEM; + + iwdev->max_qp = max_qp; + iwdev->max_mr = max_mr; + iwdev->max_cq = max_cq; + iwdev->max_pd = num_pds; + iwdev->arp_table_size = arp_table_size; + iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources; + resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size); + + iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | + IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS; + + iwdev->allocated_qps = resource_ptr; + iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)]; + iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)]; + iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)]; + iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)]; + iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]); + set_bit(0, iwdev->allocated_mrs); + set_bit(0, iwdev->allocated_qps); + set_bit(0, iwdev->allocated_cqs); + set_bit(0, iwdev->allocated_pds); + set_bit(0, iwdev->allocated_arps); + + /* Following for ILQ/IEQ */ + set_bit(1, iwdev->allocated_qps); + set_bit(1, iwdev->allocated_cqs); + set_bit(1, iwdev->allocated_pds); + set_bit(2, iwdev->allocated_cqs); + set_bit(2, iwdev->allocated_pds); + + spin_lock_init(&iwdev->resource_lock); + mrdrvbits = 24 - get_count_order(iwdev->max_mr); + iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); + return 0; +} + +/** + * i40iw_cqp_ce_handler - handle cqp completions + * @iwdev: iwarp device + * @arm: flag to arm after completions + * @cq: cq for cqp completions + */ +static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm) +{ + struct i40iw_cqp_request *cqp_request; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + u32 cqe_count = 0; + struct i40iw_ccq_cqe_info info; + int ret; + + do { + memset(&info, 0, sizeof(info)); + ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info); + if (ret) + break; + cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch; + if (info.error) + i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", + info.op_code, info.maj_err_code, info.min_err_code); + if (cqp_request) { + cqp_request->compl_info.maj_err_code = info.maj_err_code; + cqp_request->compl_info.min_err_code = info.min_err_code; + cqp_request->compl_info.op_ret_val = info.op_ret_val; + cqp_request->compl_info.error = info.error; + + if (cqp_request->waiting) { + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); + } else { + if (cqp_request->callback_fcn) + cqp_request->callback_fcn(cqp_request, 1); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); + } + } + + cqe_count++; + } while (1); + + if (arm && cqe_count) { + i40iw_process_bh(dev); + dev->ccq_ops->ccq_arm(cq); + } +} + +/** + * i40iw_iwarp_ce_handler - handle iwarp completions + * @iwdev: iwarp device + * @iwcp: iwarp cq receiving event + */ +static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev, + struct i40iw_sc_cq *iwcq) +{ + struct i40iw_cq *i40iwcq = iwcq->back_cq; + + if (i40iwcq->ibcq.comp_handler) + i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq, + i40iwcq->ibcq.cq_context); +} + +/** + * i40iw_puda_ce_handler - handle puda completion events + * @iwdev: iwarp device + * @cq: puda completion q for event + */ +static void i40iw_puda_ce_handler(struct i40iw_device *iwdev, + struct i40iw_sc_cq *cq) +{ + struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev; + enum i40iw_status_code status; + u32 compl_error; + + do { + status = i40iw_puda_poll_completion(dev, cq, &compl_error); + if (status == I40IW_ERR_QUEUE_EMPTY) + break; + if (status) { + i40iw_pr_err("puda status = %d\n", status); + break; + } + if (compl_error) { + i40iw_pr_err("puda compl_err =0x%x\n", compl_error); + break; + } + } while (1); + + dev->ccq_ops->ccq_arm(cq); +} + +/** + * i40iw_process_ceq - handle ceq for completions + * @iwdev: iwarp device + * @ceq: ceq having cq for completion + */ +void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_sc_ceq *sc_ceq; + struct i40iw_sc_cq *cq; + bool arm = true; + + sc_ceq = &ceq->sc_ceq; + do { + cq = dev->ceq_ops->process_ceq(dev, sc_ceq); + if (!cq) + break; + + if (cq->cq_type == I40IW_CQ_TYPE_CQP) + i40iw_cqp_ce_handler(iwdev, cq, arm); + else if (cq->cq_type == I40IW_CQ_TYPE_IWARP) + i40iw_iwarp_ce_handler(iwdev, cq); + else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) || + (cq->cq_type == I40IW_CQ_TYPE_IEQ)) + i40iw_puda_ce_handler(iwdev, cq); + } while (1); +} + +/** + * i40iw_next_iw_state - modify qp state + * @iwqp: iwarp qp to modify + * @state: next state for qp + * @del_hash: del hash + * @term: term message + * @termlen: length of term message + */ +void i40iw_next_iw_state(struct i40iw_qp *iwqp, + u8 state, + u8 del_hash, + u8 term, + u8 termlen) +{ + struct i40iw_modify_qp_info info; + + memset(&info, 0, sizeof(info)); + info.next_iwarp_state = state; + info.remove_hash_idx = del_hash; + info.cq_num_valid = true; + info.arp_cache_idx_valid = true; + info.dont_send_term = true; + info.dont_send_fin = true; + info.termlen = termlen; + + if (term & I40IWQP_TERM_SEND_TERM_ONLY) + info.dont_send_term = false; + if (term & I40IWQP_TERM_SEND_FIN_ONLY) + info.dont_send_fin = false; + if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) + info.reset_tcp_conn = true; + i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); +} + +/** + * i40iw_process_aeq - handle aeq events + * @iwdev: iwarp device + */ +void i40iw_process_aeq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_aeq *aeq = &iwdev->aeq; + struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq; + struct i40iw_aeqe_info aeinfo; + struct i40iw_aeqe_info *info = &aeinfo; + int ret; + struct i40iw_qp *iwqp = NULL; + struct i40iw_sc_cq *cq = NULL; + struct i40iw_cq *iwcq = NULL; + struct i40iw_sc_qp *qp = NULL; + struct i40iw_qp_host_ctx_info *ctx_info = NULL; + unsigned long flags; + + u32 aeqcnt = 0; + + if (!sc_aeq->size) + return; + + do { + memset(info, 0, sizeof(*info)); + ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info); + if (ret) + break; + + aeqcnt++; + i40iw_debug(dev, I40IW_DEBUG_AEQ, + "%s ae_id = 0x%x bool qp=%d qp_id = %d\n", + __func__, info->ae_id, info->qp, info->qp_cq_id); + if (info->qp) { + iwqp = iwdev->qp_table[info->qp_cq_id]; + if (!iwqp) { + i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id); + continue; + } + qp = &iwqp->sc_qp; + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_tcp_state = info->tcp_state; + iwqp->hw_iwarp_state = info->iwarp_state; + iwqp->last_aeq = info->ae_id; + spin_unlock_irqrestore(&iwqp->lock, flags); + ctx_info = &iwqp->ctx_info; + ctx_info->err_rq_idx_valid = true; + } else { + if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR) + continue; + } + + switch (info->ae_id) { + case I40IW_AE_LLP_FIN_RECEIVED: + if (qp->term_flags) + continue; + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { + iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; + if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && + (iwqp->ibqp_state == IB_QPS_RTS)) { + i40iw_next_iw_state(iwqp, + I40IW_QP_STATE_CLOSING, 0, 0, 0); + i40iw_cm_disconn(iwqp); + } + iwqp->cm_id->add_ref(iwqp->cm_id); + i40iw_schedule_cm_timer(iwqp->cm_node, + (struct i40iw_puda_buf *)iwqp, + I40IW_TIMER_TYPE_CLOSE, 1, 0); + } + break; + case I40IW_AE_LLP_CLOSE_COMPLETE: + if (qp->term_flags) + i40iw_terminate_done(qp, 0); + else + i40iw_cm_disconn(iwqp); + break; + case I40IW_AE_RESET_SENT: + i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0); + i40iw_cm_disconn(iwqp); + break; + case I40IW_AE_LLP_CONNECTION_RESET: + if (atomic_read(&iwqp->close_timer_started)) + continue; + i40iw_cm_disconn(iwqp); + break; + case I40IW_AE_TERMINATE_SENT: + i40iw_terminate_send_fin(qp); + break; + case I40IW_AE_LLP_TERMINATE_RECEIVED: + i40iw_terminate_received(qp, info); + break; + case I40IW_AE_CQ_OPERATION_ERROR: + i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n", + info->ae_id); + cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx; + iwcq = (struct i40iw_cq *)cq->back_cq; + + if (iwcq->ibcq.event_handler) { + struct ib_event ibevent; + + ibevent.device = iwcq->ibcq.device; + ibevent.event = IB_EVENT_CQ_ERR; + ibevent.element.cq = &iwcq->ibcq; + iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context); + } + break; + case I40IW_AE_PRIV_OPERATION_DENIED: + case I40IW_AE_STAG_ZERO_INVALID: + case I40IW_AE_IB_RREQ_AND_Q1_FULL: + case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: + case I40IW_AE_DDP_UBE_INVALID_MO: + case I40IW_AE_DDP_UBE_INVALID_QN: + case I40IW_AE_DDP_NO_L_BIT: + case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: + case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: + case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST: + case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: + case I40IW_AE_INVALID_ARP_ENTRY: + case I40IW_AE_INVALID_TCP_OPTION_RCVD: + case I40IW_AE_STALE_ARP_ENTRY: + case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: + case I40IW_AE_LLP_SEGMENT_TOO_SMALL: + case I40IW_AE_LLP_SYN_RECEIVED: + case I40IW_AE_LLP_TOO_MANY_RETRIES: + case I40IW_AE_LLP_DOUBT_REACHABILITY: + case I40IW_AE_LCE_QP_CATASTROPHIC: + case I40IW_AE_LCE_FUNCTION_CATASTROPHIC: + case I40IW_AE_LCE_CQ_CATASTROPHIC: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: + case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH: + case I40IW_AE_QP_SUSPEND_COMPLETE: + ctx_info->err_rq_idx_valid = false; + default: + if (!info->sq && ctx_info->err_rq_idx_valid) { + ctx_info->err_rq_idx = info->wqe_idx; + ctx_info->tcp_info_valid = false; + ctx_info->iwarp_info_valid = false; + ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, + iwqp->host_ctx.va, + ctx_info); + } + i40iw_terminate_connection(qp, info); + break; + } + } while (1); + + if (aeqcnt) + dev->aeq_ops->repost_aeq_entries(dev, aeqcnt); +} + +/** + * i40iw_manage_apbvt - add or delete tcp port + * @iwdev: iwarp device + * @accel_local_port: port for apbvt + * @add_port: add or delete port + */ +int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port) +{ + struct i40iw_apbvt_info *info; + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.manage_apbvt_entry.info; + + memset(info, 0, sizeof(*info)); + info->add = add_port; + info->port = cpu_to_le16(accel_local_port); + + cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY; + cqp_info->post_sq = 1; + cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage APBVT entry fail"); + return status; +} + +/** + * i40iw_manage_arp_cache - manage hw arp cache + * @iwdev: iwarp device + * @mac_addr: mac address ptr + * @ip_addr: ip addr for arp cache + * @action: add, delete or modify + */ +void i40iw_manage_arp_cache(struct i40iw_device *iwdev, + unsigned char *mac_addr, + __be32 *ip_addr, + bool ipv4, + u32 action) +{ + struct i40iw_add_arp_cache_entry_info *info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + int arp_index; + + arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); + if (arp_index == -1) + return; + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + if (action == I40IW_ARP_ADD) { + cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY; + info = &cqp_info->in.u.add_arp_cache_entry.info; + memset(info, 0, sizeof(*info)); + info->arp_index = cpu_to_le32(arp_index); + info->permanent = true; + ether_addr_copy(info->mac_addr, mac_addr); + cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; + } else { + cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY; + cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index; + } + + cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request; + cqp_info->post_sq = 1; + if (i40iw_handle_cqp_op(iwdev, cqp_request)) + i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail"); +} + +/** + * i40iw_send_syn_cqp_callback - do syn/ack after qhash + * @cqp_request: qhash cqp completion + * @send_ack: flag send ack + */ +static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack) +{ + i40iw_send_syn(cqp_request->param, send_ack); +} + +/** + * i40iw_manage_qhash - add or modify qhash + * @iwdev: iwarp device + * @cminfo: cm info for qhash + * @etype: type (syn or quad) + * @mtype: type of qhash + * @cmnode: cmnode associated with connection + * @wait: wait for completion + * @user_pri:user pri of the connection + */ +enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev, + struct i40iw_cm_info *cminfo, + enum i40iw_quad_entry_type etype, + enum i40iw_quad_hash_manage_type mtype, + void *cmnode, + bool wait) +{ + struct i40iw_qhash_table_info *info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(iwcqp, wait); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.manage_qhash_table_entry.info; + memset(info, 0, sizeof(*info)); + + info->manage = mtype; + info->entry_type = etype; + if (cminfo->vlan_id != 0xFFFF) { + info->vlan_valid = true; + info->vlan_id = cpu_to_le16(cminfo->vlan_id); + } else { + info->vlan_valid = false; + } + + info->ipv4_valid = cminfo->ipv4; + ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr); + info->qp_num = cpu_to_le32(dev->ilq->qp_id); + info->dest_port = cpu_to_le16(cminfo->loc_port); + info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]); + info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]); + info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]); + info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]); + if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) { + info->src_port = cpu_to_le16(cminfo->rem_port); + info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]); + info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]); + info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]); + info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]); + } + if (cmnode) { + cqp_request->callback_fcn = i40iw_send_syn_cqp_callback; + cqp_request->param = (void *)cmnode; + } + + if (info->ipv4_valid) + i40iw_debug(dev, I40IW_DEBUG_CM, + "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n", + __func__, (!mtype) ? "DELETE" : "ADD", + info->dest_ip, + info->dest_port, info->mac_addr, cminfo->vlan_id); + else + i40iw_debug(dev, I40IW_DEBUG_CM, + "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n", + __func__, (!mtype) ? "DELETE" : "ADD", + info->dest_ip, + info->dest_port, info->mac_addr, cminfo->vlan_id); + cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request; + cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY; + cqp_info->post_sq = 1; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage Qhash Entry fail"); + return status; +} + +/** + * i40iw_hw_flush_wqes - flush qp's wqe + * @iwdev: iwarp device + * @qp: hardware control qp + * @info: info for flush + * @wait: flag wait for completion + */ +enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev, + struct i40iw_sc_qp *qp, + struct i40iw_qp_flush_info *info, + bool wait) +{ + enum i40iw_status_code status; + struct i40iw_qp_flush_info *hw_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; + memcpy(hw_info, info, sizeof(*hw_info)); + + cqp_info->cqp_cmd = OP_QP_FLUSH_WQES; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_flush_wqes.qp = qp; + cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Flush WQE's fail"); + return status; +} + +/** + * i40iw_hw_manage_vf_pble_bp - manage vf pbles + * @iwdev: iwarp device + * @info: info for managing pble + * @wait: flag wait for completion + */ +enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev, + struct i40iw_manage_vf_pble_info *info, + bool wait) +{ + enum i40iw_status_code status; + struct i40iw_manage_vf_pble_info *hw_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + if ((iwdev->init_state < CCQ_CREATED) && wait) + wait = false; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info; + memcpy(hw_info, info, sizeof(*hw_info)); + + cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP; + cqp_info->post_sq = 1; + cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage VF pble_bp fail"); + return status; +} + +/** + * i40iw_get_ib_wc - return change flush code to IB's + * @opcode: iwarp flush code + */ +static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode) +{ + switch (opcode) { + case FLUSH_PROT_ERR: + return IB_WC_LOC_PROT_ERR; + case FLUSH_REM_ACCESS_ERR: + return IB_WC_REM_ACCESS_ERR; + case FLUSH_LOC_QP_OP_ERR: + return IB_WC_LOC_QP_OP_ERR; + case FLUSH_REM_OP_ERR: + return IB_WC_REM_OP_ERR; + case FLUSH_LOC_LEN_ERR: + return IB_WC_LOC_LEN_ERR; + case FLUSH_GENERAL_ERR: + return IB_WC_GENERAL_ERR; + case FLUSH_FATAL_ERR: + default: + return IB_WC_FATAL_ERR; + } +} + +/** + * i40iw_set_flush_info - set flush info + * @pinfo: set flush info + * @min: minor err + * @maj: major err + * @opcode: flush error code + */ +static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo, + u16 *min, + u16 *maj, + enum i40iw_flush_opcode opcode) +{ + *min = (u16)i40iw_get_ib_wc(opcode); + *maj = CQE_MAJOR_DRV; + pinfo->userflushcode = true; +} + +/** + * i40iw_flush_wqes - flush wqe for qp + * @iwdev: iwarp device + * @iwqp: qp to flush wqes + */ +void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp) +{ + struct i40iw_qp_flush_info info; + struct i40iw_qp_flush_info *pinfo = &info; + + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + + memset(pinfo, 0, sizeof(*pinfo)); + info.sq = true; + info.rq = true; + if (qp->term_flags) { + i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code, + &pinfo->sq_major_code, qp->flush_code); + i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code, + &pinfo->rq_major_code, qp->flush_code); + } + (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true); +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c new file mode 100644 index 000000000..90e5af217 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -0,0 +1,1910 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> +#include <net/addrconf.h> + +#include "i40iw.h" +#include "i40iw_register.h" +#include <net/netevent.h> +#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0 +#define CLIENT_IW_INTERFACE_VERSION_MINOR 01 +#define CLIENT_IW_INTERFACE_VERSION_BUILD 00 + +#define DRV_VERSION_MAJOR 0 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 123 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD) + +static int push_mode; +module_param(push_mode, int, 0644); +MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)"); + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all"); + +static int resource_profile; +module_param(resource_profile, int, 0644); +MODULE_PARM_DESC(resource_profile, + "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution"); + +static int max_rdma_vfs = 32; +module_param(max_rdma_vfs, int, 0644); +MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default"); +static int mpa_version = 2; +module_param(mpa_version, int, 0644); +MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2"); + +MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>"); +MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_VERSION(DRV_VERSION); + +static struct i40e_client i40iw_client; +static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw"; + +static LIST_HEAD(i40iw_handlers); +static spinlock_t i40iw_handler_lock; + +static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, + u32 vf_id, u8 *msg, u16 len); + +static struct notifier_block i40iw_inetaddr_notifier = { + .notifier_call = i40iw_inetaddr_event +}; + +static struct notifier_block i40iw_inetaddr6_notifier = { + .notifier_call = i40iw_inet6addr_event +}; + +static struct notifier_block i40iw_net_notifier = { + .notifier_call = i40iw_net_event +}; + +static int i40iw_notifiers_registered; + +/** + * i40iw_find_i40e_handler - find a handler given a client info + * @ldev: pointer to a client info + */ +static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev) +{ + struct i40iw_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_for_each_entry(hdl, &i40iw_handlers, list) { + if (hdl->ldev.netdev == ldev->netdev) { + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return hdl; + } + } + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return NULL; +} + +/** + * i40iw_find_netdev - find a handler given a netdev + * @netdev: pointer to net_device + */ +struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev) +{ + struct i40iw_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_for_each_entry(hdl, &i40iw_handlers, list) { + if (hdl->ldev.netdev == netdev) { + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return hdl; + } + } + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return NULL; +} + +/** + * i40iw_add_handler - add a handler to the list + * @hdl: handler to be added to the handler list + */ +static void i40iw_add_handler(struct i40iw_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_add(&hdl->list, &i40iw_handlers); + spin_unlock_irqrestore(&i40iw_handler_lock, flags); +} + +/** + * i40iw_del_handler - delete a handler from the list + * @hdl: handler to be deleted from the handler list + */ +static int i40iw_del_handler(struct i40iw_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_del(&hdl->list); + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return 0; +} + +/** + * i40iw_enable_intr - set up device interrupts + * @dev: hardware control device structure + * @msix_id: id of the interrupt to be enabled + */ +static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id) +{ + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val); + else + i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val); +} + +/** + * i40iw_dpc - tasklet for aeq and ceq 0 + * @data: iwarp device + */ +static void i40iw_dpc(unsigned long data) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)data; + + if (iwdev->msix_shared) + i40iw_process_ceq(iwdev, iwdev->ceqlist); + i40iw_process_aeq(iwdev); + i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx); +} + +/** + * i40iw_ceq_dpc - dpc handler for CEQ + * @data: data points to CEQ + */ +static void i40iw_ceq_dpc(unsigned long data) +{ + struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data; + struct i40iw_device *iwdev = iwceq->iwdev; + + i40iw_process_ceq(iwdev, iwceq); + i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx); +} + +/** + * i40iw_irq_handler - interrupt handler for aeq and ceq0 + * @irq: Interrupt request number + * @data: iwarp device + */ +static irqreturn_t i40iw_irq_handler(int irq, void *data) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)data; + + tasklet_schedule(&iwdev->dpc_tasklet); + return IRQ_HANDLED; +} + +/** + * i40iw_destroy_cqp - destroy control qp + * @iwdev: iwarp device + * @create_done: 1 if cqp create poll was success + * + * Issue destroy cqp request and + * free the resources associated with the cqp + */ +static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) +{ + enum i40iw_status_code status = 0; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp *cqp = &iwdev->cqp; + + if (free_hwcqp && dev->cqp_ops->cqp_destroy) + status = dev->cqp_ops->cqp_destroy(dev->cqp); + if (status) + i40iw_pr_err("destroy cqp failed"); + + i40iw_free_dma_mem(dev->hw, &cqp->sq); + kfree(cqp->scratch_array); + iwdev->cqp.scratch_array = NULL; + + kfree(cqp->cqp_requests); + cqp->cqp_requests = NULL; +} + +/** + * i40iw_disable_irqs - disable device interrupts + * @dev: hardware control device structure + * @msic_vec: msix vector to disable irq + * @dev_id: parameter to pass to free_irq (used during irq setup) + * + * The function is called when destroying aeq/ceq + */ +static void i40iw_disable_irq(struct i40iw_sc_dev *dev, + struct i40iw_msix_vector *msix_vec, + void *dev_id) +{ + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0); + else + i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0); + synchronize_irq(msix_vec->irq); + free_irq(msix_vec->irq, dev_id); +} + +/** + * i40iw_destroy_aeq - destroy aeq + * @iwdev: iwarp device + * @reset: true if called before reset + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) +{ + enum i40iw_status_code status = I40IW_ERR_NOT_READY; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_aeq *aeq = &iwdev->aeq; + + if (!iwdev->msix_shared) + i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); + if (reset) + goto exit; + + if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) + status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq); + if (status) + i40iw_pr_err("destroy aeq failed %d\n", status); + +exit: + i40iw_free_dma_mem(dev->hw, &aeq->mem); +} + +/** + * i40iw_destroy_ceq - destroy ceq + * @iwdev: iwarp device + * @iwceq: ceq to be destroyed + * @reset: true if called before reset + * + * Issue a destroy ceq request and + * free the resources associated with the ceq + */ +static void i40iw_destroy_ceq(struct i40iw_device *iwdev, + struct i40iw_ceq *iwceq, + bool reset) +{ + enum i40iw_status_code status; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + + if (reset) + goto exit; + + status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); + if (status) { + i40iw_pr_err("ceq destroy command failed %d\n", status); + goto exit; + } + + status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq); + if (status) + i40iw_pr_err("ceq destroy completion failed %d\n", status); +exit: + i40iw_free_dma_mem(dev->hw, &iwceq->mem); +} + +/** + * i40iw_dele_ceqs - destroy all ceq's + * @iwdev: iwarp device + * @reset: true if called before reset + * + * Go through all of the device ceq's and for each ceq + * disable the ceq interrupt and destroy the ceq + */ +static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) +{ + u32 i = 0; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_ceq *iwceq = iwdev->ceqlist; + struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; + + if (iwdev->msix_shared) { + i40iw_disable_irq(dev, msix_vec, (void *)iwdev); + i40iw_destroy_ceq(iwdev, iwceq, reset); + iwceq++; + i++; + } + + for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { + i40iw_disable_irq(dev, msix_vec, (void *)iwceq); + i40iw_destroy_ceq(iwdev, iwceq, reset); + } +} + +/** + * i40iw_destroy_ccq - destroy control cq + * @iwdev: iwarp device + * @reset: true if called before reset + * + * Issue destroy ccq request and + * free the resources associated with the ccq + */ +static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_ccq *ccq = &iwdev->ccq; + enum i40iw_status_code status = 0; + + if (!reset) + status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); + if (status) + i40iw_pr_err("ccq destroy failed %d\n", status); + i40iw_free_dma_mem(dev->hw, &ccq->mem_cq); +} + +/* types of hmc objects */ +static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = { + I40IW_HMC_IW_QP, + I40IW_HMC_IW_CQ, + I40IW_HMC_IW_HTE, + I40IW_HMC_IW_ARP, + I40IW_HMC_IW_APBVT_ENTRY, + I40IW_HMC_IW_MR, + I40IW_HMC_IW_XF, + I40IW_HMC_IW_XFFL, + I40IW_HMC_IW_Q1, + I40IW_HMC_IW_Q1FL, + I40IW_HMC_IW_TIMER, +}; + +/** + * i40iw_close_hmc_objects_type - delete hmc objects of a given type + * @iwdev: iwarp device + * @obj_type: the hmc object type to be deleted + * @is_pf: true if the function is PF otherwise false + * @reset: true if called before reset + */ +static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type obj_type, + struct i40iw_hmc_info *hmc_info, + bool is_pf, + bool reset) +{ + struct i40iw_hmc_del_obj_info info; + + memset(&info, 0, sizeof(info)); + info.hmc_info = hmc_info; + info.rsrc_type = obj_type; + info.count = hmc_info->hmc_obj[obj_type].cnt; + info.is_pf = is_pf; + if (dev->hmc_ops->del_hmc_object(dev, &info, reset)) + i40iw_pr_err("del obj of type %d failed\n", obj_type); +} + +/** + * i40iw_del_hmc_objects - remove all device hmc objects + * @dev: iwarp device + * @hmc_info: hmc_info to free + * @is_pf: true if hmc_info belongs to PF, not vf nor allocated + * by PF on behalf of VF + * @reset: true if called before reset + */ +static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev, + struct i40iw_hmc_info *hmc_info, + bool is_pf, + bool reset) +{ + unsigned int i; + + for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) + i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset); +} + +/** + * i40iw_ceq_handler - interrupt handler for ceq + * @data: ceq pointer + */ +static irqreturn_t i40iw_ceq_handler(int irq, void *data) +{ + struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data; + + if (iwceq->irq != irq) + i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq); + tasklet_schedule(&iwceq->dpc_tasklet); + return IRQ_HANDLED; +} + +/** + * i40iw_create_hmc_obj_type - create hmc object of a given type + * @dev: hardware control device structure + * @info: information for the hmc object to create + */ +static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info) +{ + return dev->hmc_ops->create_hmc_object(dev, info); +} + +/** + * i40iw_create_hmc_objs - create all hmc objects for the device + * @iwdev: iwarp device + * @is_pf: true if the function is PF otherwise false + * + * Create the device hmc objects and allocate hmc pages + * Return 0 if successful, otherwise clean up and return error + */ +static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev, + bool is_pf) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_hmc_create_obj_info info; + enum i40iw_status_code status; + int i; + + memset(&info, 0, sizeof(info)); + info.hmc_info = dev->hmc_info; + info.is_pf = is_pf; + info.entry_type = iwdev->sd_type; + for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { + info.rsrc_type = iw_hmc_obj_types[i]; + info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; + status = i40iw_create_hmc_obj_type(dev, &info); + if (status) { + i40iw_pr_err("create obj type %d status = %d\n", + iw_hmc_obj_types[i], status); + break; + } + } + if (!status) + return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0, + dev->hmc_fn_id, + true, true)); + + while (i) { + i--; + /* destroy the hmc objects of a given type */ + i40iw_close_hmc_objects_type(dev, + iw_hmc_obj_types[i], + dev->hmc_info, + is_pf, + false); + } + return status; +} + +/** + * i40iw_obj_aligned_mem - get aligned memory from device allocated memory + * @iwdev: iwarp device + * @memptr: points to the memory addresses + * @size: size of memory needed + * @mask: mask for the aligned memory + * + * Get aligned memory of the requested size and + * update the memptr to point to the new aligned memory + * Return 0 if successful, otherwise return no memory error + */ +enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, + struct i40iw_dma_mem *memptr, + u32 size, + u32 mask) +{ + unsigned long va, newva; + unsigned long extra; + + va = (unsigned long)iwdev->obj_next.va; + newva = va; + if (mask) + newva = ALIGN(va, (mask + 1)); + extra = newva - va; + memptr->va = (u8 *)va + extra; + memptr->pa = iwdev->obj_next.pa + extra; + memptr->size = size; + if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size)) + return I40IW_ERR_NO_MEMORY; + + iwdev->obj_next.va = memptr->va + size; + iwdev->obj_next.pa = memptr->pa + size; + return 0; +} + +/** + * i40iw_create_cqp - create control qp + * @iwdev: iwarp device + * + * Return 0, if the cqp and all the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status; + u32 sqsize = I40IW_CQP_SW_SQSIZE_2048; + struct i40iw_dma_mem mem; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp_init_info cqp_init_info; + struct i40iw_cqp *cqp = &iwdev->cqp; + u16 maj_err, min_err; + int i; + + cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); + if (!cqp->cqp_requests) + return I40IW_ERR_NO_MEMORY; + cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); + if (!cqp->scratch_array) { + kfree(cqp->cqp_requests); + return I40IW_ERR_NO_MEMORY; + } + dev->cqp = &cqp->sc_cqp; + dev->cqp->dev = dev; + memset(&cqp_init_info, 0, sizeof(cqp_init_info)); + status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq, + (sizeof(struct i40iw_cqp_sq_wqe) * sqsize), + I40IW_CQP_ALIGNMENT); + if (status) + goto exit; + status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx), + I40IW_HOST_CTX_ALIGNMENT_MASK); + if (status) + goto exit; + dev->cqp->host_ctx_pa = mem.pa; + dev->cqp->host_ctx = mem.va; + /* populate the cqp init info */ + cqp_init_info.dev = dev; + cqp_init_info.sq_size = sqsize; + cqp_init_info.sq = cqp->sq.va; + cqp_init_info.sq_pa = cqp->sq.pa; + cqp_init_info.host_ctx_pa = mem.pa; + cqp_init_info.host_ctx = mem.va; + cqp_init_info.hmc_profile = iwdev->resource_profile; + cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs; + cqp_init_info.scratch_array = cqp->scratch_array; + status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info); + if (status) { + i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n", + status, maj_err, min_err); + goto exit; + } + status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err); + if (status) { + i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n", + status, maj_err, min_err); + goto exit; + } + spin_lock_init(&cqp->req_lock); + INIT_LIST_HEAD(&cqp->cqp_avail_reqs); + INIT_LIST_HEAD(&cqp->cqp_pending_reqs); + /* init the waitq of the cqp_requests and add them to the list */ + for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) { + init_waitqueue_head(&cqp->cqp_requests[i].waitq); + list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); + } + return 0; +exit: + /* clean up the created resources */ + i40iw_destroy_cqp(iwdev, false); + return status; +} + +/** + * i40iw_create_ccq - create control cq + * @iwdev: iwarp device + * + * Return 0, if the ccq and the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_dma_mem mem; + enum i40iw_status_code status; + struct i40iw_ccq_init_info info; + struct i40iw_ccq *ccq = &iwdev->ccq; + + memset(&info, 0, sizeof(info)); + dev->ccq = &ccq->sc_cq; + dev->ccq->dev = dev; + info.dev = dev; + ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area); + ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE; + status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq, + ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT); + if (status) + goto exit; + status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size, + I40IW_SHADOWAREA_MASK); + if (status) + goto exit; + ccq->sc_cq.back_cq = (void *)ccq; + /* populate the ccq init info */ + info.cq_base = ccq->mem_cq.va; + info.cq_pa = ccq->mem_cq.pa; + info.num_elem = IW_CCQ_SIZE; + info.shadow_area = mem.va; + info.shadow_area_pa = mem.pa; + info.ceqe_mask = false; + info.ceq_id_valid = true; + info.shadow_read_threshold = 16; + status = dev->ccq_ops->ccq_init(dev->ccq, &info); + if (!status) + status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true); +exit: + if (status) + i40iw_free_dma_mem(dev->hw, &ccq->mem_cq); + return status; +} + +/** + * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq + * @iwdev: iwarp device + * @msix_vec: interrupt vector information + * @iwceq: ceq associated with the vector + * @ceq_id: the id number of the iwceq + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev, + struct i40iw_ceq *iwceq, + u32 ceq_id, + struct i40iw_msix_vector *msix_vec) +{ + enum i40iw_status_code status; + + if (iwdev->msix_shared && !ceq_id) { + tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); + status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev); + } else { + tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq); + status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); + } + + if (status) { + i40iw_pr_err("ceq irq config fail\n"); + return I40IW_ERR_CONFIG; + } + msix_vec->ceq_id = ceq_id; + msix_vec->cpu_affinity = 0; + + return 0; +} + +/** + * i40iw_create_ceq - create completion event queue + * @iwdev: iwarp device + * @iwceq: pointer to the ceq resources to be created + * @ceq_id: the id number of the iwceq + * + * Return 0, if the ceq and the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev, + struct i40iw_ceq *iwceq, + u32 ceq_id) +{ + enum i40iw_status_code status; + struct i40iw_ceq_init_info info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + u64 scratch; + + memset(&info, 0, sizeof(info)); + info.ceq_id = ceq_id; + iwceq->iwdev = iwdev; + iwceq->mem.size = sizeof(struct i40iw_ceqe) * + iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size, + I40IW_CEQ_ALIGNMENT); + if (status) + goto exit; + info.ceq_id = ceq_id; + info.ceqe_base = iwceq->mem.va; + info.ceqe_pa = iwceq->mem.pa; + + info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + iwceq->sc_ceq.ceq_id = ceq_id; + info.dev = dev; + scratch = (uintptr_t)&iwdev->cqp.sc_cqp; + status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info); + if (!status) + status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch); + +exit: + if (status) + i40iw_free_dma_mem(dev->hw, &iwceq->mem); + return status; +} + +void i40iw_request_reset(struct i40iw_device *iwdev) +{ + struct i40e_info *ldev = iwdev->ldev; + + ldev->ops->request_reset(ldev, iwdev->client, 1); +} + +/** + * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources + * @iwdev: iwarp device + * @ldev: i40e lan device + * + * Allocate a list for all device completion event queues + * Create the ceq's and configure their msix interrupt vectors + * Return 0, if at least one ceq is successfully set up, otherwise return error + */ +static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, + struct i40e_info *ldev) +{ + u32 i; + u32 ceq_id; + struct i40iw_ceq *iwceq; + struct i40iw_msix_vector *msix_vec; + enum i40iw_status_code status = 0; + u32 num_ceqs; + + if (ldev && ldev->ops && ldev->ops->setup_qvlist) { + status = ldev->ops->setup_qvlist(ldev, &i40iw_client, + iwdev->iw_qvlist); + if (status) + goto exit; + } else { + status = I40IW_ERR_BAD_PTR; + goto exit; + } + + num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs); + iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL); + if (!iwdev->ceqlist) { + status = I40IW_ERR_NO_MEMORY; + goto exit; + } + i = (iwdev->msix_shared) ? 0 : 1; + for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) { + iwceq = &iwdev->ceqlist[ceq_id]; + status = i40iw_create_ceq(iwdev, iwceq, ceq_id); + if (status) { + i40iw_pr_err("create ceq status = %d\n", status); + break; + } + + msix_vec = &iwdev->iw_msixtbl[i]; + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); + if (status) { + i40iw_destroy_ceq(iwdev, iwceq, false); + break; + } + i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); + iwdev->ceqs_count++; + } + +exit: + if (status) { + if (!iwdev->ceqs_count) { + kfree(iwdev->ceqlist); + iwdev->ceqlist = NULL; + } else { + status = 0; + } + } + return status; +} + +/** + * i40iw_configure_aeq_vector - set up the msix vector for aeq + * @iwdev: iwarp device + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev) +{ + struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; + u32 ret = 0; + + if (!iwdev->msix_shared) { + tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); + ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev); + } + if (ret) { + i40iw_pr_err("aeq irq config fail\n"); + return I40IW_ERR_CONFIG; + } + + return 0; +} + +/** + * i40iw_create_aeq - create async event queue + * @iwdev: iwarp device + * + * Return 0, if the aeq and the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status; + struct i40iw_aeq_init_info info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_aeq *aeq = &iwdev->aeq; + u64 scratch = 0; + u32 aeq_size; + + aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt + + iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + memset(&info, 0, sizeof(info)); + aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size; + status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size, + I40IW_AEQ_ALIGNMENT); + if (status) + goto exit; + + info.aeqe_base = aeq->mem.va; + info.aeq_elem_pa = aeq->mem.pa; + info.elem_cnt = aeq_size; + info.dev = dev; + status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info); + if (status) + goto exit; + status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1); + if (!status) + status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq); +exit: + if (status) + i40iw_free_dma_mem(dev->hw, &aeq->mem); + return status; +} + +/** + * i40iw_setup_aeq - set up the device aeq + * @iwdev: iwarp device + * + * Create the aeq and configure its msix interrupt vector + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + + status = i40iw_create_aeq(iwdev); + if (status) + return status; + + status = i40iw_configure_aeq_vector(iwdev); + if (status) { + i40iw_destroy_aeq(iwdev, false); + return status; + } + + if (!iwdev->msix_shared) + i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx); + return 0; +} + +/** + * i40iw_initialize_ilq - create iwarp local queue for cm + * @iwdev: iwarp device + * + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev) +{ + struct i40iw_puda_rsrc_info info; + enum i40iw_status_code status; + + info.type = I40IW_PUDA_RSRC_TYPE_ILQ; + info.cq_id = 1; + info.qp_id = 0; + info.count = 1; + info.pd_id = 1; + info.sq_size = 8192; + info.rq_size = 8192; + info.buf_size = 1024; + info.tx_buf_cnt = 16384; + info.mss = iwdev->mss; + info.receive = i40iw_receive_ilq; + info.xmit_complete = i40iw_free_sqbuf; + status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info); + if (status) + i40iw_pr_err("ilq create fail\n"); + return status; +} + +/** + * i40iw_initialize_ieq - create iwarp exception queue + * @iwdev: iwarp device + * + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev) +{ + struct i40iw_puda_rsrc_info info; + enum i40iw_status_code status; + + info.type = I40IW_PUDA_RSRC_TYPE_IEQ; + info.cq_id = 2; + info.qp_id = iwdev->sc_dev.exception_lan_queue; + info.count = 1; + info.pd_id = 2; + info.sq_size = 8192; + info.rq_size = 8192; + info.buf_size = 2048; + info.mss = iwdev->mss; + info.tx_buf_cnt = 16384; + status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info); + if (status) + i40iw_pr_err("ieq create fail\n"); + return status; +} + +/** + * i40iw_hmc_setup - create hmc objects for the device + * @iwdev: iwarp device + * + * Set up the device private memory space for the number and size of + * the hmc objects and create the objects + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status; + + iwdev->sd_type = I40IW_SD_TYPE_DIRECT; + status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT); + if (status) + goto exit; + status = i40iw_create_hmc_objs(iwdev, true); + if (status) + goto exit; + iwdev->init_state = HMC_OBJS_CREATED; +exit: + return status; +} + +/** + * i40iw_del_init_mem - deallocate memory resources + * @iwdev: iwarp device + */ +static void i40iw_del_init_mem(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + + i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem); + kfree(dev->hmc_info->sd_table.sd_entry); + dev->hmc_info->sd_table.sd_entry = NULL; + kfree(iwdev->mem_resources); + iwdev->mem_resources = NULL; + kfree(iwdev->ceqlist); + iwdev->ceqlist = NULL; + kfree(iwdev->iw_msixtbl); + iwdev->iw_msixtbl = NULL; + kfree(iwdev->hmc_info_mem); + iwdev->hmc_info_mem = NULL; +} + +/** + * i40iw_del_macip_entry - remove a mac ip address entry from the hw table + * @iwdev: iwarp device + * @idx: the index of the mac ip address to delete + */ +static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx) +{ + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status = 0; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + i40iw_pr_err("cqp_request memory failed\n"); + return; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY; + cqp_info->post_sq = 1; + cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; + cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx; + cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Del MAC Ip entry fail"); +} + +/** + * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table + * @iwdev: iwarp device + * @mac_addr: pointer to mac address + * @idx: the index of the mac ip address to add + */ +static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev, + u8 *mac_addr, + u8 idx) +{ + struct i40iw_local_mac_ipaddr_entry_info *info; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status = 0; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + i40iw_pr_err("cqp_request memory failed\n"); + return I40IW_ERR_NO_MEMORY; + } + + cqp_info = &cqp_request->info; + + cqp_info->post_sq = 1; + info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info; + ether_addr_copy(info->mac_addr, mac_addr); + info->entry_idx = idx; + cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY; + cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; + cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Add MAC Ip entry fail"); + return status; +} + +/** + * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry + * @iwdev: iwarp device + * @mac_ip_tbl_idx: the index of the new mac ip address + * + * Allocate a mac ip address entry and update the mac_ip_tbl_idx + * to hold the index of the newly created mac ip address + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev, + u16 *mac_ip_tbl_idx) +{ + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status = 0; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + i40iw_pr_err("cqp_request memory failed\n"); + return I40IW_ERR_NO_MEMORY; + } + + /* increment refcount, because we need the cqp request ret value */ + atomic_inc(&cqp_request->refcount); + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY; + cqp_info->post_sq = 1; + cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; + cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val; + else + i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail"); + /* decrement refcount and free the cqp request, if no longer used */ + i40iw_put_cqp_request(iwcqp, cqp_request); + return status; +} + +/** + * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry + * @iwdev: iwarp device + * @macaddr: pointer to mac address + * + * Allocate a mac ip address entry and add it to the hw table + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev, + u8 *macaddr) +{ + enum i40iw_status_code status; + + status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx); + if (!status) { + status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr, + (u8)iwdev->mac_ip_table_idx); + if (!status) + status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr, + (u8)iwdev->mac_ip_table_idx); + else + i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); + } + return status; +} + +/** + * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table + * @iwdev: iwarp device + */ +static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev) +{ + struct net_device *ip_dev; + struct inet6_dev *idev; + struct inet6_ifaddr *ifp; + __be32 local_ipaddr6[4]; + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) && + (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || + (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { + idev = __in6_dev_get(ip_dev); + if (!idev) { + i40iw_pr_err("ipv6 inet device not found\n"); + break; + } + list_for_each_entry(ifp, &idev->addr_list, if_list) { + i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr, + rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr); + i40iw_copy_ip_ntohl(local_ipaddr6, + ifp->addr.in6_u.u6_addr32); + i40iw_manage_arp_cache(iwdev, + ip_dev->dev_addr, + local_ipaddr6, + false, + I40IW_ARP_ADD); + } + } + } + rcu_read_unlock(); +} + +/** + * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table + * @iwdev: iwarp device + */ +static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) +{ + struct net_device *dev; + struct in_device *idev; + bool got_lock = true; + u32 ip_addr; + + if (!rtnl_trylock()) + got_lock = false; + + for_each_netdev(&init_net, dev) { + if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) && + (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || + (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + idev = in_dev_get(dev); + for_ifa(idev) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, + "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, + rdma_vlan_dev_vlan_id(dev), dev->dev_addr); + + ip_addr = ntohl(ifa->ifa_address); + i40iw_manage_arp_cache(iwdev, + dev->dev_addr, + &ip_addr, + true, + I40IW_ARP_ADD); + } + endfor_ifa(idev); + in_dev_put(idev); + } + } + if (got_lock) + rtnl_unlock(); +} + +/** + * i40iw_add_mac_ip - add mac and ip addresses + * @iwdev: iwarp device + * + * Create and add a mac ip address entry to the hw table and + * ipv4/ipv6 addresses to the arp cache + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev) +{ + struct net_device *netdev = iwdev->netdev; + enum i40iw_status_code status; + + status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr); + if (status) + return status; + i40iw_add_ipv4_addr(iwdev); + i40iw_add_ipv6_addr(iwdev); + return 0; +} + +/** + * i40iw_wait_pe_ready - Check if firmware is ready + * @hw: provides access to registers + */ +static void i40iw_wait_pe_ready(struct i40iw_hw *hw) +{ + u32 statusfw; + u32 statuscpu0; + u32 statuscpu1; + u32 statuscpu2; + u32 retrycount = 0; + + do { + statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS); + i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw); + statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0); + i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0); + statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1); + i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n", + __LINE__, statuscpu1); + statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2); + i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n", + __LINE__, statuscpu2); + if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80)) + break; /* SUCCESS */ + mdelay(1000); + retrycount++; + } while (retrycount < 14); + i40iw_wr32(hw, 0xb4040, 0x4C104C5); +} + +/** + * i40iw_initialize_dev - initialize device + * @iwdev: iwarp device + * @ldev: lan device information + * + * Allocate memory for the hmc objects and initialize iwdev + * Return 0 if successful, otherwise clean up the resources + * and return error + */ +static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, + struct i40e_info *ldev) +{ + enum i40iw_status_code status; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_device_init_info info; + struct i40iw_dma_mem mem; + u32 size; + + memset(&info, 0, sizeof(info)); + size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) + + (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX); + iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL); + if (!iwdev->hmc_info_mem) { + i40iw_pr_err("memory alloc fail\n"); + return I40IW_ERR_NO_MEMORY; + } + iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem; + dev->hmc_info = &iwdev->hw.hmc; + dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1); + status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); + if (status) + goto exit; + info.fpm_query_buf_pa = mem.pa; + info.fpm_query_buf = mem.va; + status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, + I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); + if (status) + goto exit; + info.fpm_commit_buf_pa = mem.pa; + info.fpm_commit_buf = mem.va; + info.hmc_fn_id = ldev->fid; + info.is_pf = (ldev->ftype) ? false : true; + info.bar0 = ldev->hw_addr; + info.hw = &iwdev->hw; + info.debug_mask = debug; + info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle; + info.exception_lan_queue = 1; + info.vchnl_send = i40iw_virtchnl_send; + status = i40iw_device_init(&iwdev->sc_dev, &info); +exit: + if (status) { + kfree(iwdev->hmc_info_mem); + iwdev->hmc_info_mem = NULL; + } + return status; +} + +/** + * i40iw_register_notifiers - register tcp ip notifiers + */ +static void i40iw_register_notifiers(void) +{ + if (!i40iw_notifiers_registered) { + register_inetaddr_notifier(&i40iw_inetaddr_notifier); + register_inet6addr_notifier(&i40iw_inetaddr6_notifier); + register_netevent_notifier(&i40iw_net_notifier); + } + i40iw_notifiers_registered++; +} + +/** + * i40iw_save_msix_info - copy msix vector information to iwarp device + * @iwdev: iwarp device + * @ldev: lan device information + * + * Allocate iwdev msix table and copy the ldev msix info to the table + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, + struct i40e_info *ldev) +{ + struct i40e_qvlist_info *iw_qvlist; + struct i40e_qv_info *iw_qvinfo; + u32 ceq_idx; + u32 i; + u32 size; + + iwdev->msix_count = ldev->msix_count; + + size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; + size += sizeof(struct i40e_qvlist_info); + size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1; + iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL); + + if (!iwdev->iw_msixtbl) + return I40IW_ERR_NO_MEMORY; + iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]); + iw_qvlist = iwdev->iw_qvlist; + iw_qvinfo = iw_qvlist->qv_info; + iw_qvlist->num_vectors = iwdev->msix_count; + if (iwdev->msix_count <= num_online_cpus()) + iwdev->msix_shared = true; + for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) { + iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry; + iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector; + if (i == 0) { + iw_qvinfo->aeq_idx = 0; + if (iwdev->msix_shared) + iw_qvinfo->ceq_idx = ceq_idx++; + else + iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX; + } else { + iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX; + iw_qvinfo->ceq_idx = ceq_idx++; + } + iw_qvinfo->itr_idx = 3; + iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx; + } + return 0; +} + +/** + * i40iw_deinit_device - clean up the device resources + * @iwdev: iwarp device + * @reset: true if called before reset + * @del_hdl: true if delete hdl entry + * + * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, + * destroy the device queues and free the pble and the hmc objects + */ +static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl) +{ + struct i40e_info *ldev = iwdev->ldev; + + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + + i40iw_pr_info("state = %d\n", iwdev->init_state); + + switch (iwdev->init_state) { + case RDMA_DEV_REGISTERED: + iwdev->iw_status = 0; + i40iw_port_ibevent(iwdev); + i40iw_destroy_rdma_device(iwdev->iwibdev); + /* fallthrough */ + case IP_ADDR_REGISTERED: + if (!reset) + i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); + /* fallthrough */ + case INET_NOTIFIER: + if (i40iw_notifiers_registered > 0) { + i40iw_notifiers_registered--; + unregister_netevent_notifier(&i40iw_net_notifier); + unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); + unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); + } + /* fallthrough */ + case CEQ_CREATED: + i40iw_dele_ceqs(iwdev, reset); + /* fallthrough */ + case AEQ_CREATED: + i40iw_destroy_aeq(iwdev, reset); + /* fallthrough */ + case IEQ_CREATED: + i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset); + /* fallthrough */ + case ILQ_CREATED: + i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset); + /* fallthrough */ + case CCQ_CREATED: + i40iw_destroy_ccq(iwdev, reset); + /* fallthrough */ + case PBLE_CHUNK_MEM: + i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); + /* fallthrough */ + case HMC_OBJS_CREATED: + i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset); + /* fallthrough */ + case CQP_CREATED: + i40iw_destroy_cqp(iwdev, !reset); + /* fallthrough */ + case INITIAL_STATE: + i40iw_cleanup_cm_core(&iwdev->cm_core); + if (dev->is_pf) + i40iw_hw_stats_del_timer(dev); + + i40iw_del_init_mem(iwdev); + break; + case INVALID_STATE: + /* fallthrough */ + default: + i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); + break; + } + + if (del_hdl) + i40iw_del_handler(i40iw_find_i40e_handler(ldev)); + kfree(iwdev->hdl); +} + +/** + * i40iw_setup_init_state - set up the initial device struct + * @hdl: handler for iwarp device - one per instance + * @ldev: lan device information + * @client: iwarp client information, provided during registration + * + * Initialize the iwarp device and its hdl information + * using the ldev and client information + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, + struct i40e_info *ldev, + struct i40e_client *client) +{ + struct i40iw_device *iwdev = &hdl->device; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + + memcpy(&hdl->ldev, ldev, sizeof(*ldev)); + if (resource_profile == 1) + resource_profile = 2; + + iwdev->mpa_version = mpa_version; + iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ? + (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT : + I40IW_HMC_PROFILE_DEFAULT; + iwdev->max_rdma_vfs = + (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0; + iwdev->netdev = ldev->netdev; + hdl->client = client; + iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS; + if (!ldev->ftype) + iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET; + else + iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET; + + status = i40iw_save_msix_info(iwdev, ldev); + if (status) + goto exit; + iwdev->hw.dev_context = (void *)ldev->pcidev; + iwdev->hw.hw_addr = ldev->hw_addr; + status = i40iw_allocate_dma_mem(&iwdev->hw, + &iwdev->obj_mem, 8192, 4096); + if (status) + goto exit; + iwdev->obj_next = iwdev->obj_mem; + iwdev->push_mode = push_mode; + init_waitqueue_head(&iwdev->vchnl_waitq); + status = i40iw_initialize_dev(iwdev, ldev); +exit: + if (status) { + kfree(iwdev->iw_msixtbl); + i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem); + iwdev->iw_msixtbl = NULL; + } + return status; +} + +/** + * i40iw_open - client interface operation open for iwarp/uda device + * @ldev: lan device information + * @client: iwarp client information, provided during registration + * + * Called by the lan driver during the processing of client register + * Create device resources, set up queues, pble and hmc objects and + * register the device with the ib verbs interface + * Return 0 if successful, otherwise return error + */ +static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) +{ + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + enum i40iw_status_code status; + struct i40iw_handler *hdl; + + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) + return -ENOMEM; + iwdev = &hdl->device; + iwdev->hdl = hdl; + dev = &iwdev->sc_dev; + i40iw_setup_cm_core(iwdev); + + dev->back_dev = (void *)iwdev; + iwdev->ldev = &hdl->ldev; + iwdev->client = client; + mutex_init(&iwdev->pbl_mutex); + i40iw_add_handler(hdl); + + do { + status = i40iw_setup_init_state(hdl, ldev, client); + if (status) + break; + iwdev->init_state = INITIAL_STATE; + if (dev->is_pf) + i40iw_wait_pe_ready(dev->hw); + status = i40iw_create_cqp(iwdev); + if (status) + break; + iwdev->init_state = CQP_CREATED; + status = i40iw_hmc_setup(iwdev); + if (status) + break; + status = i40iw_create_ccq(iwdev); + if (status) + break; + iwdev->init_state = CCQ_CREATED; + status = i40iw_initialize_ilq(iwdev); + if (status) + break; + iwdev->init_state = ILQ_CREATED; + status = i40iw_initialize_ieq(iwdev); + if (status) + break; + iwdev->init_state = IEQ_CREATED; + status = i40iw_setup_aeq(iwdev); + if (status) + break; + iwdev->init_state = AEQ_CREATED; + status = i40iw_setup_ceqs(iwdev, ldev); + if (status) + break; + iwdev->init_state = CEQ_CREATED; + status = i40iw_initialize_hw_resources(iwdev); + if (status) + break; + dev->ccq_ops->ccq_arm(dev->ccq); + status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); + if (status) + break; + iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch"); + i40iw_register_notifiers(); + iwdev->init_state = INET_NOTIFIER; + status = i40iw_add_mac_ip(iwdev); + if (status) + break; + iwdev->init_state = IP_ADDR_REGISTERED; + if (i40iw_register_rdma_device(iwdev)) { + i40iw_pr_err("register rdma device fail\n"); + break; + }; + + iwdev->init_state = RDMA_DEV_REGISTERED; + iwdev->iw_status = 1; + i40iw_port_ibevent(iwdev); + i40iw_pr_info("i40iw_open completed\n"); + return 0; + } while (0); + + i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); + i40iw_deinit_device(iwdev, false, false); + return -ERESTART; +} + +/** + * i40iw_l2param_change : handle qs handles for qos and mss change + * @ldev: lan device information + * @client: client for paramater change + * @params: new parameters from L2 + */ +static void i40iw_l2param_change(struct i40e_info *ldev, + struct i40e_client *client, + struct i40e_params *params) +{ + struct i40iw_handler *hdl; + struct i40iw_device *iwdev; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + iwdev = &hdl->device; + if (params->mtu) + iwdev->mss = params->mtu - I40IW_MTU_TO_MSS; +} + +/** + * i40iw_close - client interface operation close for iwarp/uda device + * @ldev: lan device information + * @client: client to close + * + * Called by the lan driver during the processing of client unregister + * Destroy and clean up the driver resources + */ +static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset) +{ + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + iwdev = &hdl->device; + destroy_workqueue(iwdev->virtchnl_wq); + i40iw_deinit_device(iwdev, reset, true); +} + +/** + * i40iw_vf_reset - process VF reset + * @ldev: lan device information + * @client: client interface instance + * @vf_id: virtual function id + * + * Called when a VF is reset by the PF + * Destroy and clean up the VF resources + */ +static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id) +{ + struct i40iw_handler *hdl; + struct i40iw_sc_dev *dev; + struct i40iw_hmc_fcn_info hmc_fcn_info; + struct i40iw_virt_mem vf_dev_mem; + struct i40iw_vfdev *tmp_vfdev; + unsigned int i; + unsigned long flags; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + dev = &hdl->device.sc_dev; + + for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) { + if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id)) + continue; + + /* free all resources allocated on behalf of vf */ + tmp_vfdev = dev->vf_dev[i]; + spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags); + dev->vf_dev[i] = NULL; + spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags); + i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false); + /* remove vf hmc function */ + memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info)); + hmc_fcn_info.vf_id = vf_id; + hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx; + hmc_fcn_info.free_fcn = true; + i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info); + /* free vf_dev */ + vf_dev_mem.va = tmp_vfdev; + vf_dev_mem.size = sizeof(struct i40iw_vfdev) + + sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX; + i40iw_free_virt_mem(dev->hw, &vf_dev_mem); + break; + } +} + +/** + * i40iw_vf_enable - enable a number of VFs + * @ldev: lan device information + * @client: client interface instance + * @num_vfs: number of VFs for the PF + * + * Called when the number of VFs changes + */ +static void i40iw_vf_enable(struct i40e_info *ldev, + struct i40e_client *client, + u32 num_vfs) +{ + struct i40iw_handler *hdl; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT) + hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT; + else + hdl->device.max_enabled_vfs = num_vfs; +} + +/** + * i40iw_vf_capable - check if VF capable + * @ldev: lan device information + * @client: client interface instance + * @vf_id: virtual function id + * + * Return 1 if a VF slot is available or if VF is already RDMA enabled + * Return 0 otherwise + */ +static int i40iw_vf_capable(struct i40e_info *ldev, + struct i40e_client *client, + u32 vf_id) +{ + struct i40iw_handler *hdl; + struct i40iw_sc_dev *dev; + unsigned int i; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return 0; + + dev = &hdl->device.sc_dev; + + for (i = 0; i < hdl->device.max_enabled_vfs; i++) { + if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id)) + return 1; + } + + return 0; +} + +/** + * i40iw_virtchnl_receive - receive a message through the virtual channel + * @ldev: lan device information + * @client: client interface instance + * @vf_id: virtual function id associated with the message + * @msg: message buffer pointer + * @len: length of the message + * + * Invoke virtual channel receive operation for the given msg + * Return 0 if successful, otherwise return error + */ +static int i40iw_virtchnl_receive(struct i40e_info *ldev, + struct i40e_client *client, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_handler *hdl; + struct i40iw_sc_dev *dev; + struct i40iw_device *iwdev; + int ret_code = I40IW_NOT_SUPPORTED; + + if (!len || !msg) + return I40IW_ERR_PARAM; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return I40IW_ERR_PARAM; + + dev = &hdl->device.sc_dev; + iwdev = dev->back_dev; + + i40iw_debug(dev, I40IW_DEBUG_VIRT, "msg %p, message length %u\n", msg, len); + + if (dev->vchnl_if.vchnl_recv) { + ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len); + if (!dev->is_pf) { + atomic_dec(&iwdev->vchnl_msgs); + wake_up(&iwdev->vchnl_waitq); + } + } + return ret_code; +} + +/** + * i40iw_virtchnl_send - send a message through the virtual channel + * @dev: iwarp device + * @vf_id: virtual function id associated with the message + * @msg: virtual channel message buffer pointer + * @len: length of the message + * + * Invoke virtual channel send operation for the given msg + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_device *iwdev; + struct i40e_info *ldev; + enum i40iw_status_code ret_code = I40IW_ERR_BAD_PTR; + + if (!dev || !dev->back_dev) + return ret_code; + + iwdev = dev->back_dev; + ldev = iwdev->ldev; + + if (ldev && ldev->ops && ldev->ops->virtchnl_send) + ret_code = ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len); + + return ret_code; +} + +/* client interface functions */ +static struct i40e_client_ops i40e_ops = { + .open = i40iw_open, + .close = i40iw_close, + .l2_param_change = i40iw_l2param_change, + .virtchnl_receive = i40iw_virtchnl_receive, + .vf_reset = i40iw_vf_reset, + .vf_enable = i40iw_vf_enable, + .vf_capable = i40iw_vf_capable +}; + +/** + * i40iw_init_module - driver initialization function + * + * First function to call when the driver is loaded + * Register the driver as i40e client and port mapper client + */ +static int __init i40iw_init_module(void) +{ + int ret; + + memset(&i40iw_client, 0, sizeof(i40iw_client)); + i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR; + i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR; + i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD; + i40iw_client.ops = &i40e_ops; + memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH); + i40iw_client.type = I40E_CLIENT_IWARP; + spin_lock_init(&i40iw_handler_lock); + ret = i40e_register_client(&i40iw_client); + return ret; +} + +/** + * i40iw_exit_module - driver exit clean up function + * + * The function is called just before the driver is unloaded + * Unregister the driver as i40e client and port mapper client + */ +static void __exit i40iw_exit_module(void) +{ + i40e_unregister_client(&i40iw_client); +} + +module_init(i40iw_init_module); +module_exit(i40iw_exit_module); diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h new file mode 100644 index 000000000..7e2049351 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h @@ -0,0 +1,215 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_OSDEP_H +#define I40IW_OSDEP_H + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/bitops.h> +#include <net/tcp.h> +#include <crypto/hash.h> +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include <linux/io-64-nonatomic-lo-hi.h> + +#define STATS_TIMER_DELAY 1000 + +static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value) +{ + wqe_words[byte_index >> 3] = value; +} + +/** + * set_32bit_val - set 32 value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @value: value to write + **/ +static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value) +{ + wqe_words[byte_index >> 2] = value; +} + +/** + * get_64bit_val - read 64 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to read from + * @value: read value + **/ +static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value) +{ + *value = wqe_words[byte_index >> 3]; +} + +/** + * get_32bit_val - read 32 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to reaad from + * @value: return 32 bit value + **/ +static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value) +{ + *value = wqe_words[byte_index >> 2]; +} + +struct i40iw_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +struct i40iw_virt_mem { + void *va; + u32 size; +} __packed; + +#define i40iw_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("i40iw " s, ##__VA_ARGS__); \ +} while (0) + +#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) + +#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \ + /* _i=0...31 */ +#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31 +#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \ + << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16 +#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT) +#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20 +#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT) +#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29 +#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \ + << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT) +#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31 +#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT) + +#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \ + /* _i=0...31 */ +#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31 +#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \ + << I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT) + +#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \ + /* _i=0...31 */ +#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31 +#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \ + << I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT) +#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \ + << I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT) +#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \ + << I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \ + << I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT) + +#define I40E_GLPE_FWLDSTATUS 0x0000D200 +#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0 +#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT) +#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1 +#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT) +#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2 +#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT) +#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3 +#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT) +#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4 +#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT) + +struct i40iw_sc_dev; +struct i40iw_sc_qp; +struct i40iw_puda_buf; +struct i40iw_puda_completion_info; +struct i40iw_update_sds_info; +struct i40iw_hmc_fcn_info; +struct i40iw_virtchnl_work_info; +struct i40iw_manage_vf_pble_info; +struct i40iw_device; +struct i40iw_hmc_info; +struct i40iw_hw; + +u8 __iomem *i40iw_get_hw_addr(void *dev); +void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); +enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev); +enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr, + u32 length, u32 value); +struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf); +void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum); +void i40iw_free_hash_desc(struct shash_desc *); +enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **); +enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info); +enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev, + struct i40iw_hmc_fcn_info *hmcfcninfo); +enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id); +enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id); +enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *mem); +enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev, + struct i40iw_manage_vf_pble_info *info); +void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); +void *i40iw_remove_head(struct list_head *list); + +void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); +void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); +void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp); +void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp); + +enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev, + struct i40iw_manage_vf_pble_info *info, + bool wait); +struct i40iw_dev_pestat; +void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *); +void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *); +#define i40iw_mmiowb() mmiowb() +void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value); +u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg); +#endif /* _I40IW_OSDEP_H_ */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h new file mode 100644 index 000000000..a0b8ca10d --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_p.h @@ -0,0 +1,106 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_P_H +#define I40IW_P_H + +#define PAUSE_TIMER_VALUE 0xFFFF +#define REFRESH_THRESHOLD 0x7FFF +#define HIGH_THRESHOLD 0x800 +#define LOW_THRESHOLD 0x200 +#define ALL_TC2PFC 0xFF + +void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask, + char *desc, u64 *buf, u32 size); +/* init operations */ +enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev, + struct i40iw_device_init_info *info); + +enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *); + +void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp); + +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch); + +enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp, + struct i40iw_fast_reg_stag_info *info, + bool post_sq); + +/* HMC/FPM functions */ +enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, + u8 hmc_fn_id); + +enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id, + u32 *vf_cnt_array); + +/* cqp misc functions */ + +void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp); + +void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info); + +void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info); + +enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, u64 scratch); + +enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, u64 scratch); + +enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp, + u64 scratch, u8 hmc_fn_id, + bool post_sq, + bool poll_registers); + +enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count); + +void free_sd_mem(struct i40iw_sc_dev *dev); + +enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev, + struct cqp_commands_info *pcmdinfo); + +enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev); + +/* prototype for functions used for dynamic memory allocation */ +enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, + struct i40iw_dma_mem *mem, u64 size, + u32 alignment); +void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem); +enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem, u32 size); +enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem); +u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq); + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c new file mode 100644 index 000000000..ded853d2f --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c @@ -0,0 +1,618 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_status.h" +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_hmc.h" + +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" + +#include <linux/pci.h> +#include <linux/genalloc.h> +#include <linux/vmalloc.h> +#include "i40iw_pble.h" +#include "i40iw.h" + +struct i40iw_device; +static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc); +static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk); + +/** + * i40iw_destroy_pble_pool - destroy pool during module unload + * @pble_rsrc: pble resources + */ +void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc) +{ + struct list_head *clist; + struct list_head *tlist; + struct i40iw_chunk *chunk; + struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo; + + if (pinfo->pool) { + list_for_each_safe(clist, tlist, &pinfo->clist) { + chunk = list_entry(clist, struct i40iw_chunk, list); + if (chunk->type == I40IW_VMALLOC) + i40iw_free_vmalloc_mem(dev->hw, chunk); + kfree(chunk); + } + gen_pool_destroy(pinfo->pool); + } +} + +/** + * i40iw_hmc_init_pble - Initialize pble resources during module load + * @dev: i40iw_sc_dev struct + * @pble_rsrc: pble resources + */ +enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc) +{ + struct i40iw_hmc_info *hmc_info; + u32 fpm_idx = 0; + + hmc_info = dev->hmc_info; + pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base; + /* Now start the pble' on 4k boundary */ + if (pble_rsrc->fpm_base_addr & 0xfff) + fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3; + + pble_rsrc->unallocated_pble = + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx; + pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3); + + pble_rsrc->pinfo.pool_shift = POOL_SHIFT; + pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1); + INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); + if (!pble_rsrc->pinfo.pool) + goto error; + + if (add_pble_pool(dev, pble_rsrc)) + goto error; + + return 0; + + error:i40iw_destroy_pble_pool(dev, pble_rsrc); + return I40IW_ERR_NO_MEMORY; +} + +/** + * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address + * @ pble_rsrc: structure containing fpm address + * @ idx: where to return indexes + */ +static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct sd_pd_idx *idx) +{ + idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE; + idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE; + idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD); +} + +/** + * add_sd_direct - add sd direct for pble + * @dev: hardware control device structure + * @pble_rsrc: pble resource ptr + * @info: page info for sd + */ +static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_add_page_info *info) +{ + enum i40iw_status_code ret_code = 0; + struct sd_pd_idx *idx = &info->idx; + struct i40iw_chunk *chunk = info->chunk; + struct i40iw_hmc_info *hmc_info = info->hmc_info; + struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry; + u32 offset = 0; + + if (!sd_entry->valid) { + if (dev->is_pf) { + ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info, + info->idx.sd_idx, + I40IW_SD_TYPE_DIRECT, + I40IW_HMC_DIRECT_BP_SIZE); + if (ret_code) + return ret_code; + chunk->type = I40IW_DMA_COHERENT; + } + } + offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT; + chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT; + chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset); + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n", + chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); + return 0; +} + +/** + * i40iw_free_vmalloc_mem - free vmalloc during close + * @hw: hw struct + * @chunk: chunk information for vmalloc + */ +static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + int i; + + if (!chunk->pg_cnt) + goto done; + for (i = 0; i < chunk->pg_cnt; i++) + dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); + + done: + kfree(chunk->dmaaddrs); + chunk->dmaaddrs = NULL; + vfree(chunk->vaddr); + chunk->vaddr = NULL; + chunk->type = 0; +} + +/** + * i40iw_get_vmalloc_mem - get 2M page for sd + * @hw: hardware address + * @chunk: chunk to adf + * @pg_cnt: #of 4 K pages + */ +static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw, + struct i40iw_chunk *chunk, + int pg_cnt) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + struct page *page; + u8 *addr; + u32 size; + int i; + + chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); + if (!chunk->dmaaddrs) + return I40IW_ERR_NO_MEMORY; + size = PAGE_SIZE * pg_cnt; + chunk->vaddr = vmalloc(size); + if (!chunk->vaddr) { + kfree(chunk->dmaaddrs); + chunk->dmaaddrs = NULL; + return I40IW_ERR_NO_MEMORY; + } + chunk->size = size; + addr = (u8 *)chunk->vaddr; + for (i = 0; i < pg_cnt; i++) { + page = vmalloc_to_page((void *)addr); + if (!page) + break; + chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i])) + break; + addr += PAGE_SIZE; + } + + chunk->pg_cnt = i; + chunk->type = I40IW_VMALLOC; + if (i == pg_cnt) + return 0; + + i40iw_free_vmalloc_mem(hw, chunk); + return I40IW_ERR_NO_MEMORY; +} + +/** + * fpm_to_idx - given fpm address, get pble index + * @pble_rsrc: pble resource management + * @addr: fpm address for index + */ +static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr) +{ + return (addr - (pble_rsrc->fpm_base_addr)) >> 3; +} + +/** + * add_bp_pages - add backing pages for sd + * @dev: hardware control device structure + * @pble_rsrc: pble resource management + * @info: page info for sd + */ +static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_add_page_info *info) +{ + u8 *addr; + struct i40iw_dma_mem mem; + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry; + struct i40iw_hmc_info *hmc_info = info->hmc_info; + struct i40iw_chunk *chunk = info->chunk; + struct i40iw_manage_vf_pble_info vf_pble_info; + enum i40iw_status_code status = 0; + u32 rel_pd_idx = info->idx.rel_pd_idx; + u32 pd_idx = info->idx.pd_idx; + u32 i; + + status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages); + if (status) + return I40IW_ERR_NO_MEMORY; + status = i40iw_add_sd_table_entry(dev->hw, hmc_info, + info->idx.sd_idx, I40IW_SD_TYPE_PAGED, + I40IW_HMC_DIRECT_BP_SIZE); + if (status) { + i40iw_free_vmalloc_mem(dev->hw, chunk); + return status; + } + if (!dev->is_pf) { + status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE, + fpm_to_idx(pble_rsrc, + pble_rsrc->next_fpm_addr), + (info->pages << PBLE_512_SHIFT)); + if (status) { + i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status); + i40iw_free_vmalloc_mem(dev->hw, chunk); + return status; + } + } + addr = chunk->vaddr; + for (i = 0; i < info->pages; i++) { + mem.pa = chunk->dmaaddrs[i]; + mem.size = PAGE_SIZE; + mem.va = (void *)(addr); + pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++]; + if (!pd_entry->valid) { + status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem); + if (status) + goto error; + addr += PAGE_SIZE; + } else { + i40iw_pr_err("pd entry is valid expecting to be invalid\n"); + } + } + if (!dev->is_pf) { + vf_pble_info.first_pd_index = info->idx.rel_pd_idx; + vf_pble_info.inv_pd_ent = false; + vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE; + vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa; + vf_pble_info.sd_index = info->idx.sd_idx; + status = i40iw_hw_manage_vf_pble_bp(dev->back_dev, + &vf_pble_info, true); + if (status) { + i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status); + goto error; + } + } + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + return 0; +error: + i40iw_free_vmalloc_mem(dev->hw, chunk); + return status; +} + +/** + * add_pble_pool - add a sd entry for pble resoure + * @dev: hardware control device structure + * @pble_rsrc: pble resource management + */ +static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc) +{ + struct i40iw_hmc_sd_entry *sd_entry; + struct i40iw_hmc_info *hmc_info; + struct i40iw_chunk *chunk; + struct i40iw_add_page_info info; + struct sd_pd_idx *idx = &info.idx; + enum i40iw_status_code ret_code = 0; + enum i40iw_sd_entry_type sd_entry_type; + u64 sd_reg_val = 0; + u32 pages; + + if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) + return I40IW_ERR_NO_MEMORY; + if (pble_rsrc->next_fpm_addr & 0xfff) { + i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr); + return I40IW_ERR_INVALID_PAGE_DESC_INDEX; + } + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return I40IW_ERR_NO_MEMORY; + hmc_info = dev->hmc_info; + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + get_sd_pd_idx(pble_rsrc, idx); + sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx]; + pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD - + idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD; + pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT); + if (!pages) { + ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE; + goto error; + } + info.chunk = chunk; + info.hmc_info = hmc_info; + info.pages = pages; + info.sd_entry = sd_entry; + if (!sd_entry->valid) { + sd_entry_type = (!idx->rel_pd_idx && + (pages == I40IW_HMC_PD_CNT_IN_SD) && + dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED; + } else { + sd_entry_type = sd_entry->entry_type; + } + i40iw_debug(dev, I40IW_DEBUG_PBLE, + "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n", + pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr); + i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n", + sd_entry_type, sd_entry->valid); + + if (sd_entry_type == I40IW_SD_TYPE_DIRECT) + ret_code = add_sd_direct(dev, pble_rsrc, &info); + if (ret_code) + sd_entry_type = I40IW_SD_TYPE_PAGED; + else + pble_rsrc->stats_direct_sds++; + + if (sd_entry_type == I40IW_SD_TYPE_PAGED) { + ret_code = add_bp_pages(dev, pble_rsrc, &info); + if (ret_code) + goto error; + else + pble_rsrc->stats_paged_sds++; + } + + if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr, + (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) { + i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n"); + ret_code = I40IW_ERR_NO_MEMORY; + goto error; + } + pble_rsrc->next_fpm_addr += chunk->size; + i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n", + pble_rsrc->next_fpm_addr, chunk->size, chunk->size); + pble_rsrc->unallocated_pble -= (chunk->size >> 3); + list_add(&chunk->list, &pble_rsrc->pinfo.clist); + sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ? + sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; + if (sd_entry->valid) + return 0; + if (dev->is_pf) + ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id, + sd_reg_val, idx->sd_idx, + sd_entry->entry_type, true); + if (ret_code) { + i40iw_pr_err("cqp cmd failed for sd (pbles)\n"); + goto error; + } + + sd_entry->valid = true; + return 0; + error: + kfree(chunk); + return ret_code; +} + +/** + * free_lvl2 - fee level 2 pble + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + */ +static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc) +{ + u32 i; + struct gen_pool *pool; + struct i40iw_pble_level2 *lvl2 = &palloc->level2; + struct i40iw_pble_info *root = &lvl2->root; + struct i40iw_pble_info *leaf = lvl2->leaf; + + pool = pble_rsrc->pinfo.pool; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + if (leaf->addr) + gen_pool_free(pool, leaf->addr, (leaf->cnt << 3)); + else + break; + } + + if (root->addr) + gen_pool_free(pool, root->addr, (root->cnt << 3)); + + kfree(lvl2->leaf); + lvl2->leaf = NULL; +} + +/** + * get_lvl2_pble - get level 2 pble resource + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + * @pool: pool pointer + */ +static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + struct gen_pool *pool) +{ + u32 lf4k, lflast, total, i; + u32 pblcnt = PBLE_PER_PAGE; + u64 *addr; + struct i40iw_pble_level2 *lvl2 = &palloc->level2; + struct i40iw_pble_info *root = &lvl2->root; + struct i40iw_pble_info *leaf; + + /* number of full 512 (4K) leafs) */ + lf4k = palloc->total_cnt >> 9; + lflast = palloc->total_cnt % PBLE_PER_PAGE; + total = (lflast == 0) ? lf4k : lf4k + 1; + lvl2->leaf_cnt = total; + + leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC); + if (!leaf) + return I40IW_ERR_NO_MEMORY; + lvl2->leaf = leaf; + /* allocate pbles for the root */ + root->addr = gen_pool_alloc(pool, (total << 3)); + if (!root->addr) { + kfree(lvl2->leaf); + lvl2->leaf = NULL; + return I40IW_ERR_NO_MEMORY; + } + root->idx = fpm_to_idx(pble_rsrc, + (u64)gen_pool_virt_to_phys(pool, root->addr)); + root->cnt = total; + addr = (u64 *)root->addr; + for (i = 0; i < total; i++, leaf++) { + pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE; + leaf->addr = gen_pool_alloc(pool, (pblcnt << 3)); + if (!leaf->addr) + goto error; + leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr)); + + leaf->cnt = pblcnt; + *addr = (u64)leaf->idx; + addr++; + } + palloc->level = I40IW_LEVEL_2; + pble_rsrc->stats_lvl2++; + return 0; + error: + free_lvl2(pble_rsrc, palloc); + return I40IW_ERR_NO_MEMORY; +} + +/** + * get_lvl1_pble - get level 1 pble resource + * @dev: hardware control device structure + * @pble_rsrc: pble resource management + * @palloc: level 1 pble allocation + */ +static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc) +{ + u64 *addr; + struct gen_pool *pool; + struct i40iw_pble_info *lvl1 = &palloc->level1; + + pool = pble_rsrc->pinfo.pool; + addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3)); + + if (!addr) + return I40IW_ERR_NO_MEMORY; + + palloc->level = I40IW_LEVEL_1; + lvl1->addr = (unsigned long)addr; + lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, + (unsigned long)addr)); + lvl1->cnt = palloc->total_cnt; + pble_rsrc->stats_lvl1++; + return 0; +} + +/** + * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine + * @dev: i40iw_sc_dev struct + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @pool: pointer to general purpose special memory pool descriptor + */ +static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + struct gen_pool *pool) +{ + enum i40iw_status_code status = 0; + + status = get_lvl1_pble(dev, pble_rsrc, palloc); + if (status && (palloc->total_cnt > PBLE_PER_PAGE)) + status = get_lvl2_pble(pble_rsrc, palloc, pool); + return status; +} + +/** + * i40iw_get_pble - allocate pbles from the pool + * @dev: i40iw_sc_dev struct + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @pble_cnt: #of pbles requested + */ +enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + u32 pble_cnt) +{ + struct gen_pool *pool; + enum i40iw_status_code status = 0; + u32 max_sds = 0; + int i; + + pool = pble_rsrc->pinfo.pool; + palloc->total_cnt = pble_cnt; + palloc->level = I40IW_LEVEL_0; + /*check first to see if we can get pble's without acquiring additional sd's */ + status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool); + if (!status) + goto exit; + max_sds = (palloc->total_cnt >> 18) + 1; + for (i = 0; i < max_sds; i++) { + status = add_pble_pool(dev, pble_rsrc); + if (status) + break; + status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool); + if (!status) + break; + } +exit: + if (!status) + pble_rsrc->stats_alloc_ok++; + else + pble_rsrc->stats_alloc_fail++; + + return status; +} + +/** + * i40iw_free_pble - put pbles back into pool + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble resource being freed + */ +void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc) +{ + struct gen_pool *pool; + + pool = pble_rsrc->pinfo.pool; + if (palloc->level == I40IW_LEVEL_2) + free_lvl2(pble_rsrc, palloc); + else + gen_pool_free(pool, palloc->level1.addr, + (palloc->level1.cnt << 3)); + pble_rsrc->stats_alloc_freed++; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h new file mode 100644 index 000000000..7b1851d21 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.h @@ -0,0 +1,131 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_PBLE_H +#define I40IW_PBLE_H + +#define POOL_SHIFT 6 +#define PBLE_PER_PAGE 512 +#define I40IW_HMC_PAGED_BP_SHIFT 12 +#define PBLE_512_SHIFT 9 + +enum i40iw_pble_level { + I40IW_LEVEL_0 = 0, + I40IW_LEVEL_1 = 1, + I40IW_LEVEL_2 = 2 +}; + +enum i40iw_alloc_type { + I40IW_NO_ALLOC = 0, + I40IW_DMA_COHERENT = 1, + I40IW_VMALLOC = 2 +}; + +struct i40iw_pble_info { + unsigned long addr; + u32 idx; + u32 cnt; +}; + +struct i40iw_pble_level2 { + struct i40iw_pble_info root; + struct i40iw_pble_info *leaf; + u32 leaf_cnt; +}; + +struct i40iw_pble_alloc { + u32 total_cnt; + enum i40iw_pble_level level; + union { + struct i40iw_pble_info level1; + struct i40iw_pble_level2 level2; + }; +}; + +struct sd_pd_idx { + u32 sd_idx; + u32 pd_idx; + u32 rel_pd_idx; +}; + +struct i40iw_add_page_info { + struct i40iw_chunk *chunk; + struct i40iw_hmc_sd_entry *sd_entry; + struct i40iw_hmc_info *hmc_info; + struct sd_pd_idx idx; + u32 pages; +}; + +struct i40iw_chunk { + struct list_head list; + u32 size; + void *vaddr; + u64 fpm_addr; + u32 pg_cnt; + dma_addr_t *dmaaddrs; + enum i40iw_alloc_type type; +}; + +struct i40iw_pble_pool { + struct gen_pool *pool; + struct list_head clist; + u32 total_pble_alloc; + u32 free_pble_cnt; + u32 pool_shift; +}; + +struct i40iw_hmc_pble_rsrc { + u32 unallocated_pble; + u64 fpm_base_addr; + u64 next_fpm_addr; + struct i40iw_pble_pool pinfo; + + u32 stats_direct_sds; + u32 stats_paged_sds; + u64 stats_alloc_ok; + u64 stats_alloc_fail; + u64 stats_alloc_freed; + u64 stats_lvl1; + u64 stats_lvl2; +}; + +void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc); +enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc); +void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc); +enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + u32 pble_cnt); +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c new file mode 100644 index 000000000..8eb400d8a --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -0,0 +1,1436 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" + +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_puda.h" + +static void i40iw_ieq_receive(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf); +static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid); +static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx); +static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc + *rsrc, bool initial); +/** + * i40iw_puda_get_listbuf - get buffer from puda list + * @list: list to use for buffers (ILQ or IEQ) + */ +static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list) +{ + struct i40iw_puda_buf *buf = NULL; + + if (!list_empty(list)) { + buf = (struct i40iw_puda_buf *)list->next; + list_del((struct list_head *)&buf->list); + } + return buf; +} + +/** + * i40iw_puda_get_bufpool - return buffer from resource + * @rsrc: resource to use for buffer + */ +struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_puda_buf *buf = NULL; + struct list_head *list = &rsrc->bufpool; + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + buf = i40iw_puda_get_listbuf(list); + if (buf) + rsrc->avail_buf_count--; + else + rsrc->stats_buf_alloc_fail++; + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + return buf; +} + +/** + * i40iw_puda_ret_bufpool - return buffer to rsrc list + * @rsrc: resource to use for buffer + * @buf: buffe to return to resouce + */ +void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc, + struct i40iw_puda_buf *buf) +{ + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + list_add(&buf->list, &rsrc->bufpool); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->avail_buf_count++; +} + +/** + * i40iw_puda_post_recvbuf - set wqe for rcv buffer + * @rsrc: resource ptr + * @wqe_idx: wqe index to use + * @buf: puda buffer for rcv q + * @initial: flag if during init time + */ +static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, + struct i40iw_puda_buf *buf, bool initial) +{ + u64 *wqe; + struct i40iw_sc_qp *qp = &rsrc->qp; + u64 offset24 = 0; + + qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; + wqe = qp->qp_uk.rq_base[wqe_idx].elem; + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__, + wqe_idx, buf, wqe); + if (!initial) + get_64bit_val(wqe, 24, &offset24); + + offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); + set_64bit_val(wqe, 24, offset24); + + set_64bit_val(wqe, 0, buf->mem.pa); + set_64bit_val(wqe, 8, + LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); + set_64bit_val(wqe, 24, offset24); +} + +/** + * i40iw_puda_replenish_rq - post rcv buffers + * @rsrc: resource to use for buffer + * @initial: flag if during init time + */ +static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc, + bool initial) +{ + u32 i; + u32 invalid_cnt = rsrc->rxq_invalid_cnt; + struct i40iw_puda_buf *buf = NULL; + + for (i = 0; i < invalid_cnt; i++) { + buf = i40iw_puda_get_bufpool(rsrc); + if (!buf) + return I40IW_ERR_list_empty; + i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, + initial); + rsrc->rx_wqe_idx = + ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); + rsrc->rxq_invalid_cnt--; + } + return 0; +} + +/** + * i40iw_puda_alloc_buf - allocate mem for buffer + * @dev: iwarp device + * @length: length of buffer + */ +static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev, + u32 length) +{ + struct i40iw_puda_buf *buf = NULL; + struct i40iw_virt_mem buf_mem; + enum i40iw_status_code ret; + + ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem, + sizeof(struct i40iw_puda_buf)); + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s: error mem for buf\n", __func__); + return NULL; + } + buf = (struct i40iw_puda_buf *)buf_mem.va; + ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1); + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s: error dma mem for buf\n", __func__); + i40iw_free_virt_mem(dev->hw, &buf_mem); + return NULL; + } + buf->buf_mem.va = buf_mem.va; + buf->buf_mem.size = buf_mem.size; + return buf; +} + +/** + * i40iw_puda_dele_buf - delete buffer back to system + * @dev: iwarp device + * @buf: buffer to free + */ +static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf) +{ + i40iw_free_dma_mem(dev->hw, &buf->mem); + i40iw_free_virt_mem(dev->hw, &buf->buf_mem); +} + +/** + * i40iw_puda_get_next_send_wqe - return next wqe for processing + * @qp: puda qp for wqe + * @wqe_idx: wqe index for caller + */ +static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) +{ + u64 *wqe = NULL; + enum i40iw_status_code ret_code = 0; + + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return wqe; + wqe = qp->sq_base[*wqe_idx].elem; + + return wqe; +} + +/** + * i40iw_puda_poll_info - poll cq for completion + * @cq: cq for poll + * @info: info return for successful completion + */ +static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq, + struct i40iw_puda_completion_info *info) +{ + u64 qword0, qword2, qword3; + u64 *cqe; + u64 comp_ctx; + bool valid_bit; + u32 major_err, minor_err; + bool error; + + cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk); + get_64bit_val(cqe, 24, &qword3); + valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID); + + if (valid_bit != cq->cq_uk.polarity) + return I40IW_ERR_QUEUE_EMPTY; + + i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32); + error = (bool)RS_64(qword3, I40IW_CQ_ERROR); + if (error) { + i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__); + major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR)); + minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR)); + info->compl_error = major_err << 16 | minor_err; + return I40IW_ERR_CQ_COMPL_ERROR; + } + + get_64bit_val(cqe, 0, &qword0); + get_64bit_val(cqe, 16, &qword2); + + info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); + info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); + + get_64bit_val(cqe, 8, &comp_ctx); + info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; + info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); + + if (info->q_type == I40IW_CQE_QTYPE_RQ) { + info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID); + info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO); + info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO); + info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN); + } + + return 0; +} + +/** + * i40iw_puda_poll_completion - processes completion for cq + * @dev: iwarp device + * @cq: cq getting interrupt + * @compl_err: return any completion err + */ +enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, + struct i40iw_sc_cq *cq, u32 *compl_err) +{ + struct i40iw_qp_uk *qp; + struct i40iw_cq_uk *cq_uk = &cq->cq_uk; + struct i40iw_puda_completion_info info; + enum i40iw_status_code ret = 0; + struct i40iw_puda_buf *buf; + struct i40iw_puda_rsrc *rsrc; + void *sqwrid; + u8 cq_type = cq->cq_type; + unsigned long flags; + + if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) { + rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq; + } else { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__); + return I40IW_ERR_BAD_PTR; + } + memset(&info, 0, sizeof(info)); + ret = i40iw_puda_poll_info(cq, &info); + *compl_err = info.compl_error; + if (ret == I40IW_ERR_QUEUE_EMPTY) + return ret; + if (ret) + goto done; + + qp = info.qp; + if (!qp || !rsrc) { + ret = I40IW_ERR_BAD_PTR; + goto done; + } + + if (qp->qp_id != rsrc->qp_id) { + ret = I40IW_ERR_BAD_PTR; + goto done; + } + + if (info.q_type == I40IW_CQE_QTYPE_RQ) { + buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx]; + /* Get all the tcpip information in the buf header */ + ret = i40iw_puda_get_tcpip_info(&info, buf); + if (ret) { + rsrc->stats_rcvd_pkt_err++; + if (cq_type == I40IW_CQ_TYPE_ILQ) { + i40iw_ilq_putback_rcvbuf(&rsrc->qp, + info.wqe_idx); + } else { + i40iw_puda_ret_bufpool(rsrc, buf); + i40iw_puda_replenish_rq(rsrc, false); + } + goto done; + } + + rsrc->stats_pkt_rcvd++; + rsrc->compl_rxwqe_idx = info.wqe_idx; + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__); + rsrc->receive(rsrc->dev, buf); + if (cq_type == I40IW_CQ_TYPE_ILQ) + i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); + else + i40iw_puda_replenish_rq(rsrc, false); + + } else { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__); + sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid; + I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); + rsrc->xmit_complete(rsrc->dev, sqwrid); + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + rsrc->tx_wqe_avail_cnt++; + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + if (!list_empty(&dev->ilq->txpend)) + i40iw_puda_send_buf(dev->ilq, NULL); + } + +done: + I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret); + if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0) + cq_uk->polarity = !cq_uk->polarity; + /* update cq tail in cq shadow memory also */ + I40IW_RING_MOVE_TAIL(cq_uk->cq_ring); + set_64bit_val(cq_uk->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring)); + return 0; +} + +/** + * i40iw_puda_send - complete send wqe for transmit + * @qp: puda qp for send + * @info: buffer information for transmit + */ +enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, + struct i40iw_puda_send_info *info) +{ + u64 *wqe; + u32 iplen, l4len; + u64 header[2]; + u32 wqe_idx; + u8 iipt; + + /* number of 32 bits DWORDS in header */ + l4len = info->tcplen >> 2; + if (info->ipv4) { + iipt = 3; + iplen = 5; + } else { + iipt = 1; + iplen = 10; + } + + wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; + /* Third line of WQE descriptor */ + /* maclen is in words */ + header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) | + LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) | + LS_64(iipt, I40IW_UDA_QPSQ_IIPT) | + LS_64(l4len, I40IW_UDA_QPSQ_L4LEN); + /* Forth line of WQE descriptor */ + header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) | + LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) | + LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) | + LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID); + + set_64bit_val(wqe, 0, info->paddr); + set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); + set_64bit_val(wqe, 16, header[0]); + set_64bit_val(wqe, 24, header[1]); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); + i40iw_qp_post_wr(&qp->qp_uk); + return 0; +} + +/** + * i40iw_puda_send_buf - transmit puda buffer + * @rsrc: resource to use for buffer + * @buf: puda buffer to transmit + */ +void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf) +{ + struct i40iw_puda_send_info info; + enum i40iw_status_code ret = 0; + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + /* if no wqe available or not from a completion and we have + * pending buffers, we must queue new buffer + */ + if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { + list_add_tail(&buf->list, &rsrc->txpend); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->stats_sent_pkt_q++; + if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s: adding to txpend\n", __func__); + return; + } + rsrc->tx_wqe_avail_cnt--; + /* if we are coming from a completion and have pending buffers + * then Get one from pending list + */ + if (!buf) { + buf = i40iw_puda_get_listbuf(&rsrc->txpend); + if (!buf) + goto done; + } + + info.scratch = (void *)buf; + info.paddr = buf->mem.pa; + info.len = buf->totallen; + info.tcplen = buf->tcphlen; + info.maclen = buf->maclen; + info.ipv4 = buf->ipv4; + info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ); + + ret = i40iw_puda_send(&rsrc->qp, &info); + if (ret) { + rsrc->tx_wqe_avail_cnt++; + rsrc->stats_sent_pkt_q++; + list_add(&buf->list, &rsrc->txpend); + if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s: adding to puda_send\n", __func__); + } else { + rsrc->stats_pkt_sent++; + } +done: + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); +} + +/** + * i40iw_puda_qp_setctx - during init, set qp's context + * @rsrc: qp's resource + */ +static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_qp *qp = &rsrc->qp; + u64 *qp_ctx = qp->hw_host_ctx; + + set_64bit_val(qp_ctx, 8, qp->sq_pa); + set_64bit_val(qp_ctx, 16, qp->rq_pa); + + set_64bit_val(qp_ctx, 24, + LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | + LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE)); + + set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS)); + set_64bit_val(qp_ctx, 56, 0); + set_64bit_val(qp_ctx, 64, 1); + + set_64bit_val(qp_ctx, 136, + LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) | + LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM)); + + set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN)); + + set_64bit_val(qp_ctx, 168, + LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX)); + + set_64bit_val(qp_ctx, 176, + LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | + LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | + LS_64(qp->qs_handle, I40IWQPC_QSHANDLE)); + + i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT", + qp_ctx, I40IW_QP_CTX_SIZE); +} + +/** + * i40iw_puda_qp_wqe - setup wqe for qp create + * @rsrc: resource for qp + */ +static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_qp *qp = &rsrc->qp; + struct i40iw_sc_dev *dev = rsrc->dev; + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + struct i40iw_ccq_cqe_info compl_info; + enum i40iw_status_code status = 0; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 40, qp->shadow_area_pa); + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | + LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) | + LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + set_64bit_val(wqe, 24, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); + i40iw_sc_cqp_post_sq(cqp); + status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_CREATE_QP, + &compl_info); + return status; +} + +/** + * i40iw_puda_qp_create - create qp for resource + * @rsrc: resource to use for buffer + */ +static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_qp *qp = &rsrc->qp; + struct i40iw_qp_uk *ukqp = &qp->qp_uk; + enum i40iw_status_code ret = 0; + u32 sq_size, rq_size, t_size; + struct i40iw_dma_mem *mem; + + sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE; + rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE; + t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) + + I40IW_QP_CTX_SIZE); + /* Get page aligned memory */ + ret = + i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size, + I40IW_HW_PAGE_SIZE); + if (ret) { + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__); + return ret; + } + + mem = &rsrc->qpmem; + memset(mem->va, 0, t_size); + qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false); + qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false); + qp->pd = &rsrc->sc_pd; + qp->qp_type = I40IW_QP_TYPE_UDA; + qp->dev = rsrc->dev; + qp->back_qp = (void *)rsrc; + qp->sq_pa = mem->pa; + qp->rq_pa = qp->sq_pa + sq_size; + ukqp->sq_base = mem->va; + ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; + ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; + qp->shadow_area_pa = qp->rq_pa + rq_size; + qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE; + qp->hw_host_ctx_pa = + qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3); + ukqp->qp_id = rsrc->qp_id; + ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; + ukqp->rq_wrid_array = rsrc->rq_wrid_array; + + ukqp->qp_id = rsrc->qp_id; + ukqp->sq_size = rsrc->sq_size; + ukqp->rq_size = rsrc->rq_size; + + I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size); + I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size); + I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size); + + if (qp->pd->dev->is_pf) + ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + + I40E_PFPE_WQEALLOC); + else + ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + + I40E_VFPE_WQEALLOC1); + + qp->qs_handle = qp->dev->qs_handle; + i40iw_puda_qp_setctx(rsrc); + ret = i40iw_puda_qp_wqe(rsrc); + if (ret) + i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); + return ret; +} + +/** + * i40iw_puda_cq_create - create cq for resource + * @rsrc: resource for which cq to create + */ +static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_dev *dev = rsrc->dev; + struct i40iw_sc_cq *cq = &rsrc->cq; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + enum i40iw_status_code ret = 0; + u32 tsize, cqsize; + u32 shadow_read_threshold = 128; + struct i40iw_dma_mem *mem; + struct i40iw_ccq_cqe_info compl_info; + struct i40iw_cq_init_info info; + struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info; + + cq->back_cq = (void *)rsrc; + cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); + tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); + ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, + I40IW_CQ0_ALIGNMENT_MASK); + if (ret) + return ret; + + mem = &rsrc->cqmem; + memset(&info, 0, sizeof(info)); + info.dev = dev; + info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ? + I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ; + info.shadow_read_threshold = rsrc->cq_size >> 2; + info.ceq_id_valid = true; + info.cq_base_pa = mem->pa; + info.shadow_area_pa = mem->pa + cqsize; + init_info->cq_base = mem->va; + init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize); + init_info->cq_size = rsrc->cq_size; + init_info->cq_id = rsrc->cq_id; + ret = dev->iw_priv_cq_ops->cq_init(cq, &info); + if (ret) + goto error; + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); + if (!wqe) { + ret = I40IW_ERR_RING_FULL; + goto error; + } + + set_64bit_val(wqe, 0, rsrc->cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + set_64bit_val(wqe, 32, cq->cq_pa); + + set_64bit_val(wqe, 40, cq->shadow_area_pa); + + header = rsrc->cq_id | + LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + set_64bit_val(wqe, 24, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(dev->cqp); + ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_CREATE_CQ, + &compl_info); + +error: + if (ret) + i40iw_free_dma_mem(dev->hw, &rsrc->cqmem); + return ret; +} + +/** + * i40iw_puda_dele_resources - delete all resources during close + * @dev: iwarp device + * @type: type of resource to dele + * @reset: true if reset chip + */ +void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev, + enum puda_resource_type type, + bool reset) +{ + struct i40iw_ccq_cqe_info compl_info; + struct i40iw_puda_rsrc *rsrc; + struct i40iw_puda_buf *buf = NULL; + struct i40iw_puda_buf *nextbuf = NULL; + struct i40iw_virt_mem *vmem; + enum i40iw_status_code ret; + + switch (type) { + case I40IW_PUDA_RSRC_TYPE_ILQ: + rsrc = dev->ilq; + vmem = &dev->ilq_mem; + break; + case I40IW_PUDA_RSRC_TYPE_IEQ: + rsrc = dev->ieq; + vmem = &dev->ieq_mem; + break; + default: + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n", + __func__, type); + return; + } + + switch (rsrc->completion) { + case PUDA_HASH_CRC_COMPLETE: + i40iw_free_hash_desc(rsrc->hash_desc); + case PUDA_QP_CREATED: + do { + if (reset) + break; + ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp, + 0, false, true, true); + if (ret) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s error ieq qp destroy\n", + __func__); + + ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_DESTROY_QP, + &compl_info); + if (ret) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s error ieq qp destroy done\n", + __func__); + } while (0); + + i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); + /* fallthrough */ + case PUDA_CQ_CREATED: + do { + if (reset) + break; + ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true); + if (ret) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s error ieq cq destroy\n", + __func__); + + ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_DESTROY_CQ, + &compl_info); + if (ret) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s error ieq qp destroy done\n", + __func__); + } while (0); + + i40iw_free_dma_mem(dev->hw, &rsrc->cqmem); + break; + default: + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__); + break; + } + /* Free all allocated puda buffers for both tx and rx */ + buf = rsrc->alloclist; + while (buf) { + nextbuf = buf->next; + i40iw_puda_dele_buf(dev, buf); + buf = nextbuf; + rsrc->alloc_buf_count--; + } + i40iw_free_virt_mem(dev->hw, vmem); +} + +/** + * i40iw_puda_allocbufs - allocate buffers for resource + * @rsrc: resource for buffer allocation + * @count: number of buffers to create + */ +static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc, + u32 count) +{ + u32 i; + struct i40iw_puda_buf *buf; + struct i40iw_puda_buf *nextbuf; + + for (i = 0; i < count; i++) { + buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size); + if (!buf) { + rsrc->stats_buf_alloc_fail++; + return I40IW_ERR_NO_MEMORY; + } + i40iw_puda_ret_bufpool(rsrc, buf); + rsrc->alloc_buf_count++; + if (!rsrc->alloclist) { + rsrc->alloclist = buf; + } else { + nextbuf = rsrc->alloclist; + rsrc->alloclist = buf; + buf->next = nextbuf; + } + } + rsrc->avail_buf_count = rsrc->alloc_buf_count; + return 0; +} + +/** + * i40iw_puda_create_rsrc - create resouce (ilq or ieq) + * @dev: iwarp device + * @info: resource information + */ +enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev, + struct i40iw_puda_rsrc_info *info) +{ + enum i40iw_status_code ret = 0; + struct i40iw_puda_rsrc *rsrc; + u32 pudasize; + u32 sqwridsize, rqwridsize; + struct i40iw_virt_mem *vmem; + + info->count = 1; + pudasize = sizeof(struct i40iw_puda_rsrc); + sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info); + rqwridsize = info->rq_size * 8; + switch (info->type) { + case I40IW_PUDA_RSRC_TYPE_ILQ: + vmem = &dev->ilq_mem; + break; + case I40IW_PUDA_RSRC_TYPE_IEQ: + vmem = &dev->ieq_mem; + break; + default: + return I40IW_NOT_SUPPORTED; + } + ret = + i40iw_allocate_virt_mem(dev->hw, vmem, + pudasize + sqwridsize + rqwridsize); + if (ret) + return ret; + rsrc = (struct i40iw_puda_rsrc *)vmem->va; + spin_lock_init(&rsrc->bufpool_lock); + if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) { + dev->ilq = (struct i40iw_puda_rsrc *)vmem->va; + dev->ilq_count = info->count; + rsrc->receive = info->receive; + rsrc->xmit_complete = info->xmit_complete; + } else { + vmem = &dev->ieq_mem; + dev->ieq_count = info->count; + dev->ieq = (struct i40iw_puda_rsrc *)vmem->va; + rsrc->receive = i40iw_ieq_receive; + rsrc->xmit_complete = i40iw_ieq_tx_compl; + } + + rsrc->type = info->type; + rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize); + rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); + rsrc->mss = info->mss; + /* Initialize all ieq lists */ + INIT_LIST_HEAD(&rsrc->bufpool); + INIT_LIST_HEAD(&rsrc->txpend); + + rsrc->tx_wqe_avail_cnt = info->sq_size - 1; + dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id); + rsrc->qp_id = info->qp_id; + rsrc->cq_id = info->cq_id; + rsrc->sq_size = info->sq_size; + rsrc->rq_size = info->rq_size; + rsrc->cq_size = info->rq_size + info->sq_size; + rsrc->buf_size = info->buf_size; + rsrc->dev = dev; + + ret = i40iw_puda_cq_create(rsrc); + if (!ret) { + rsrc->completion = PUDA_CQ_CREATED; + ret = i40iw_puda_qp_create(rsrc); + } + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__); + goto error; + } + rsrc->completion = PUDA_QP_CREATED; + + ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size); + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__); + goto error; + } + + rsrc->rxq_invalid_cnt = info->rq_size; + ret = i40iw_puda_replenish_rq(rsrc, true); + if (ret) + goto error; + + if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) { + if (!i40iw_init_hash_desc(&rsrc->hash_desc)) { + rsrc->check_crc = true; + rsrc->completion = PUDA_HASH_CRC_COMPLETE; + ret = 0; + } + } + + dev->ccq_ops->ccq_arm(&rsrc->cq); + return ret; + error: + i40iw_puda_dele_resources(dev, info->type, false); + + return ret; +} + +/** + * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq + * @qp: ilq's qp resource + * @wqe_idx: wqe index of completed rcvbuf + */ +static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx) +{ + u64 *wqe; + u64 offset24; + + wqe = qp->qp_uk.rq_base[wqe_idx].elem; + get_64bit_val(wqe, 24, &offset24); + offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); + set_64bit_val(wqe, 24, offset24); +} + +/** + * i40iw_ieq_get_fpdu - given length return fpdu length + * @length: length if fpdu + */ +static u16 i40iw_ieq_get_fpdu_length(u16 length) +{ + u16 fpdu_len; + + fpdu_len = length + I40IW_IEQ_MPA_FRAMING; + fpdu_len = (fpdu_len + 3) & 0xfffffffc; + return fpdu_len; +} + +/** + * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf + * @buf: rcv buffer with partial + * @txbuf: tx buffer for sendign back + * @buf_offset: rcv buffer offset to copy from + * @txbuf_offset: at offset in tx buf to copy + * @length: length of data to copy + */ +static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf, + struct i40iw_puda_buf *txbuf, + u16 buf_offset, u32 txbuf_offset, + u32 length) +{ + void *mem1 = (u8 *)buf->mem.va + buf_offset; + void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; + + memcpy(mem2, mem1, length); +} + +/** + * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling + * @buf: reeive buffer with partial + * @txbuf: buffer to prepare + */ +static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf, + struct i40iw_puda_buf *txbuf) +{ + txbuf->maclen = buf->maclen; + txbuf->tcphlen = buf->tcphlen; + txbuf->ipv4 = buf->ipv4; + txbuf->hdrlen = buf->hdrlen; + i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen); +} + +/** + * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range + * @buf: receive exception buffer + * @fps: first partial sequence number + */ +static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps) +{ + u32 offset; + + if (buf->seqnum < fps) { + offset = fps - buf->seqnum; + if (offset > buf->datalen) + return; + buf->data += offset; + buf->datalen -= (u16)offset; + buf->seqnum = fps; + } +} + +/** + * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu + * @ieq: ieq resource + * @rxlist: ieq's received buffer list + * @pbufl: temporary list for buffers for fpddu + * @txbuf: tx buffer for fpdu + * @fpdu_len: total length of fpdu + */ +static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq, + struct list_head *rxlist, + struct list_head *pbufl, + struct i40iw_puda_buf *txbuf, + u16 fpdu_len) +{ + struct i40iw_puda_buf *buf; + u32 nextseqnum; + u16 txoffset, bufoffset; + + buf = i40iw_puda_get_listbuf(pbufl); + nextseqnum = buf->seqnum + fpdu_len; + txbuf->totallen = buf->hdrlen + fpdu_len; + txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; + i40iw_ieq_setup_tx_buf(buf, txbuf); + + txoffset = buf->hdrlen; + bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); + + do { + if (buf->datalen >= fpdu_len) { + /* copied full fpdu */ + i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len); + buf->datalen -= fpdu_len; + buf->data += fpdu_len; + buf->seqnum = nextseqnum; + break; + } + /* copy partial fpdu */ + i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen); + txoffset += buf->datalen; + fpdu_len -= buf->datalen; + i40iw_puda_ret_bufpool(ieq, buf); + buf = i40iw_puda_get_listbuf(pbufl); + bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); + } while (1); + + /* last buffer on the list*/ + if (buf->datalen) + list_add(&buf->list, rxlist); + else + i40iw_puda_ret_bufpool(ieq, buf); +} + +/** + * i40iw_ieq_create_pbufl - create buffer list for single fpdu + * @rxlist: resource list for receive ieq buffes + * @pbufl: temp. list for buffers for fpddu + * @buf: first receive buffer + * @fpdu_len: total length of fpdu + */ +static enum i40iw_status_code i40iw_ieq_create_pbufl( + struct i40iw_pfpdu *pfpdu, + struct list_head *rxlist, + struct list_head *pbufl, + struct i40iw_puda_buf *buf, + u16 fpdu_len) +{ + enum i40iw_status_code status = 0; + struct i40iw_puda_buf *nextbuf; + u32 nextseqnum; + u16 plen = fpdu_len - buf->datalen; + bool done = false; + + nextseqnum = buf->seqnum + buf->datalen; + do { + nextbuf = i40iw_puda_get_listbuf(rxlist); + if (!nextbuf) { + status = I40IW_ERR_list_empty; + break; + } + list_add_tail(&nextbuf->list, pbufl); + if (nextbuf->seqnum != nextseqnum) { + pfpdu->bad_seq_num++; + status = I40IW_ERR_SEQ_NUM; + break; + } + if (nextbuf->datalen >= plen) { + done = true; + } else { + plen -= nextbuf->datalen; + nextseqnum = nextbuf->seqnum + nextbuf->datalen; + } + + } while (!done); + + return status; +} + +/** + * i40iw_ieq_handle_partial - process partial fpdu buffer + * @ieq: ieq resource + * @pfpdu: partial management per user qp + * @buf: receive buffer + * @fpdu_len: fpdu len in the buffer + */ +static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq, + struct i40iw_pfpdu *pfpdu, + struct i40iw_puda_buf *buf, + u16 fpdu_len) +{ + enum i40iw_status_code status = 0; + u8 *crcptr; + u32 mpacrc; + u32 seqnum = buf->seqnum; + struct list_head pbufl; /* partial buffer list */ + struct i40iw_puda_buf *txbuf = NULL; + struct list_head *rxlist = &pfpdu->rxlist; + + INIT_LIST_HEAD(&pbufl); + list_add(&buf->list, &pbufl); + + status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); + if (!status) + goto error; + + txbuf = i40iw_puda_get_bufpool(ieq); + if (!txbuf) { + pfpdu->no_tx_bufs++; + status = I40IW_ERR_NO_TXBUFS; + goto error; + } + + i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); + i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); + crcptr = txbuf->data + fpdu_len - 4; + mpacrc = *(u32 *)crcptr; + if (ieq->check_crc) { + status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data, + (fpdu_len - 4), mpacrc); + if (status) { + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: error bad crc\n", __func__); + goto error; + } + } + + i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER", + txbuf->mem.va, txbuf->totallen); + i40iw_puda_send_buf(ieq, txbuf); + pfpdu->rcv_nxt = seqnum + fpdu_len; + return status; + error: + while (!list_empty(&pbufl)) { + buf = (struct i40iw_puda_buf *)(pbufl.prev); + list_del(&buf->list); + list_add(&buf->list, rxlist); + } + if (txbuf) + i40iw_puda_ret_bufpool(ieq, txbuf); + return status; +} + +/** + * i40iw_ieq_process_buf - process buffer rcvd for ieq + * @ieq: ieq resource + * @pfpdu: partial management per user qp + * @buf: receive buffer + */ +static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq, + struct i40iw_pfpdu *pfpdu, + struct i40iw_puda_buf *buf) +{ + u16 fpdu_len = 0; + u16 datalen = buf->datalen; + u8 *datap = buf->data; + u8 *crcptr; + u16 ioffset = 0; + u32 mpacrc; + u32 seqnum = buf->seqnum; + u16 length = 0; + u16 full = 0; + bool partial = false; + struct i40iw_puda_buf *txbuf; + struct list_head *rxlist = &pfpdu->rxlist; + enum i40iw_status_code ret = 0; + enum i40iw_status_code status = 0; + + ioffset = (u16)(buf->data - (u8 *)buf->mem.va); + while (datalen) { + fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(u16 *)datap)); + if (fpdu_len > pfpdu->max_fpdu_data) { + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: error bad fpdu_len\n", __func__); + status = I40IW_ERR_MPA_CRC; + list_add(&buf->list, rxlist); + return status; + } + + if (datalen < fpdu_len) { + partial = true; + break; + } + crcptr = datap + fpdu_len - 4; + mpacrc = *(u32 *)crcptr; + if (ieq->check_crc) + ret = i40iw_ieq_check_mpacrc(ieq->hash_desc, + datap, fpdu_len - 4, mpacrc); + if (ret) { + status = I40IW_ERR_MPA_CRC; + list_add(&buf->list, rxlist); + return status; + } + full++; + pfpdu->fpdu_processed++; + datap += fpdu_len; + length += fpdu_len; + datalen -= fpdu_len; + } + if (full) { + /* copy full pdu's in the txbuf and send them out */ + txbuf = i40iw_puda_get_bufpool(ieq); + if (!txbuf) { + pfpdu->no_tx_bufs++; + status = I40IW_ERR_NO_TXBUFS; + list_add(&buf->list, rxlist); + return status; + } + /* modify txbuf's buffer header */ + i40iw_ieq_setup_tx_buf(buf, txbuf); + /* copy full fpdu's to new buffer */ + i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen, + length); + txbuf->totallen = buf->hdrlen + length; + + i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum); + i40iw_puda_send_buf(ieq, txbuf); + + if (!datalen) { + pfpdu->rcv_nxt = buf->seqnum + length; + i40iw_puda_ret_bufpool(ieq, buf); + return status; + } + buf->data = datap; + buf->seqnum = seqnum + length; + buf->datalen = datalen; + pfpdu->rcv_nxt = buf->seqnum; + } + if (partial) + status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); + + return status; +} + +/** + * i40iw_ieq_process_fpdus - process fpdu's buffers on its list + * @qp: qp for which partial fpdus + * @ieq: ieq resource + */ +static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp, + struct i40iw_puda_rsrc *ieq) +{ + struct i40iw_pfpdu *pfpdu = &qp->pfpdu; + struct list_head *rxlist = &pfpdu->rxlist; + struct i40iw_puda_buf *buf; + enum i40iw_status_code status; + + do { + if (list_empty(rxlist)) + break; + buf = i40iw_puda_get_listbuf(rxlist); + if (!buf) { + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: error no buf\n", __func__); + break; + } + if (buf->seqnum != pfpdu->rcv_nxt) { + /* This could be out of order or missing packet */ + pfpdu->out_of_order++; + list_add(&buf->list, rxlist); + break; + } + /* keep processing buffers from the head of the list */ + status = i40iw_ieq_process_buf(ieq, pfpdu, buf); + if (status == I40IW_ERR_MPA_CRC) { + pfpdu->mpa_crc_err = true; + while (!list_empty(rxlist)) { + buf = i40iw_puda_get_listbuf(rxlist); + i40iw_puda_ret_bufpool(ieq, buf); + pfpdu->crc_err++; + } + /* create CQP for AE */ + i40iw_ieq_mpa_crc_ae(ieq->dev, qp); + } + } while (!status); +} + +/** + * i40iw_ieq_handle_exception - handle qp's exception + * @ieq: ieq resource + * @qp: qp receiving excpetion + * @buf: receive buffer + */ +static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, + struct i40iw_sc_qp *qp, + struct i40iw_puda_buf *buf) +{ + struct i40iw_puda_buf *tmpbuf = NULL; + struct i40iw_pfpdu *pfpdu = &qp->pfpdu; + u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; + u32 rcv_wnd = hw_host_ctx[23]; + /* first partial seq # in q2 */ + u32 fps = qp->q2_buf[16]; + struct list_head *rxlist = &pfpdu->rxlist; + struct list_head *plist; + + pfpdu->total_ieq_bufs++; + + if (pfpdu->mpa_crc_err) { + pfpdu->crc_err++; + goto error; + } + if (pfpdu->mode && (fps != pfpdu->fps)) { + /* clean up qp as it is new partial sequence */ + i40iw_ieq_cleanup_qp(ieq->dev, qp); + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: restarting new partial\n", __func__); + pfpdu->mode = false; + } + + if (!pfpdu->mode) { + i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128); + /* First_Partial_Sequence_Number check */ + pfpdu->rcv_nxt = fps; + pfpdu->fps = fps; + pfpdu->mode = true; + pfpdu->max_fpdu_data = ieq->mss; + pfpdu->pmode_count++; + INIT_LIST_HEAD(rxlist); + i40iw_ieq_check_first_buf(buf, fps); + } + + if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { + pfpdu->bad_seq_num++; + goto error; + } + + if (!list_empty(rxlist)) { + tmpbuf = (struct i40iw_puda_buf *)rxlist->next; + plist = &tmpbuf->list; + while ((struct list_head *)tmpbuf != rxlist) { + if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) + break; + tmpbuf = (struct i40iw_puda_buf *)plist->next; + } + /* Insert buf before tmpbuf */ + list_add_tail(&buf->list, &tmpbuf->list); + } else { + list_add_tail(&buf->list, rxlist); + } + i40iw_ieq_process_fpdus(qp, ieq); + return; + error: + i40iw_puda_ret_bufpool(ieq, buf); +} + +/** + * i40iw_ieq_receive - received exception buffer + * @dev: iwarp device + * @buf: exception buffer received + */ +static void i40iw_ieq_receive(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf) +{ + struct i40iw_puda_rsrc *ieq = dev->ieq; + struct i40iw_sc_qp *qp = NULL; + u32 wqe_idx = ieq->compl_rxwqe_idx; + + qp = i40iw_ieq_get_qp(dev, buf); + if (!qp) { + ieq->stats_bad_qp_id++; + i40iw_puda_ret_bufpool(ieq, buf); + } else { + i40iw_ieq_handle_exception(ieq, qp, buf); + } + /* + * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq() + * on which wqe_idx to start replenish rq + */ + if (!ieq->rxq_invalid_cnt) + ieq->rx_wqe_idx = wqe_idx; + ieq->rxq_invalid_cnt++; +} + +/** + * i40iw_ieq_tx_compl - put back after sending completed exception buffer + * @dev: iwarp device + * @sqwrid: pointer to puda buffer + */ +static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid) +{ + struct i40iw_puda_rsrc *ieq = dev->ieq; + struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid; + + i40iw_puda_ret_bufpool(ieq, buf); + if (!list_empty(&ieq->txpend)) { + buf = i40iw_puda_get_listbuf(&ieq->txpend); + i40iw_puda_send_buf(ieq, buf); + } +} + +/** + * i40iw_ieq_cleanup_qp - qp is being destroyed + * @dev: iwarp device + * @qp: all pending fpdu buffers + */ +void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) +{ + struct i40iw_puda_buf *buf; + struct i40iw_pfpdu *pfpdu = &qp->pfpdu; + struct list_head *rxlist = &pfpdu->rxlist; + struct i40iw_puda_rsrc *ieq = dev->ieq; + + if (!pfpdu->mode) + return; + while (!list_empty(rxlist)) { + buf = i40iw_puda_get_listbuf(rxlist); + i40iw_puda_ret_bufpool(ieq, buf); + } +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h new file mode 100644 index 000000000..52bf7826c --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h @@ -0,0 +1,183 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_PUDA_H +#define I40IW_PUDA_H + +#define I40IW_IEQ_MPA_FRAMING 6 + +struct i40iw_sc_dev; +struct i40iw_sc_qp; +struct i40iw_sc_cq; + +enum puda_resource_type { + I40IW_PUDA_RSRC_TYPE_ILQ = 1, + I40IW_PUDA_RSRC_TYPE_IEQ +}; + +enum puda_rsrc_complete { + PUDA_CQ_CREATED = 1, + PUDA_QP_CREATED, + PUDA_TX_COMPLETE, + PUDA_RX_COMPLETE, + PUDA_HASH_CRC_COMPLETE +}; + +struct i40iw_puda_completion_info { + struct i40iw_qp_uk *qp; + u8 q_type; + u8 vlan_valid; + u8 l3proto; + u8 l4proto; + u16 payload_len; + u32 compl_error; /* No_err=0, else major and minor err code */ + u32 qp_id; + u32 wqe_idx; +}; + +struct i40iw_puda_send_info { + u64 paddr; /* Physical address */ + u32 len; + u8 tcplen; + u8 maclen; + bool ipv4; + bool doloopback; + void *scratch; +}; + +struct i40iw_puda_buf { + struct list_head list; /* MUST be first entry */ + struct i40iw_dma_mem mem; /* DMA memory for the buffer */ + struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */ + struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */ + void *scratch; + u8 *iph; + u8 *tcph; + u8 *data; + u16 datalen; + u16 vlan_id; + u8 tcphlen; /* tcp length in bytes */ + u8 maclen; /* mac length in bytes */ + u32 totallen; /* machlen+iphlen+tcphlen+datalen */ + atomic_t refcount; + u8 hdrlen; + bool ipv4; + u32 seqnum; +}; + +struct i40iw_puda_rsrc_info { + enum puda_resource_type type; /* ILQ or IEQ */ + u32 count; + u16 pd_id; + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u16 buf_size; + u16 mss; + u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */ + void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *); + void (*xmit_complete)(struct i40iw_sc_dev *, void *); +}; + +struct i40iw_puda_rsrc { + struct i40iw_sc_cq cq; + struct i40iw_sc_qp qp; + struct i40iw_sc_pd sc_pd; + struct i40iw_sc_dev *dev; + struct i40iw_dma_mem cqmem; + struct i40iw_dma_mem qpmem; + struct i40iw_virt_mem ilq_mem; + enum puda_rsrc_complete completion; + enum puda_resource_type type; + u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */ + u16 mss; + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 cq_size; + struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 compl_rxwqe_idx; + u32 rx_wqe_idx; + u32 rxq_invalid_cnt; + u32 tx_wqe_avail_cnt; + bool check_crc; + struct shash_desc *hash_desc; + struct list_head txpend; + struct list_head bufpool; /* free buffers pool list for recv and xmit */ + u32 alloc_buf_count; + u32 avail_buf_count; /* snapshot of currently available buffers */ + spinlock_t bufpool_lock; + struct i40iw_puda_buf *alloclist; + void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *); + void (*xmit_complete)(struct i40iw_sc_dev *, void *); + /* puda stats */ + u64 stats_buf_alloc_fail; + u64 stats_pkt_rcvd; + u64 stats_pkt_sent; + u64 stats_rcvd_pkt_err; + u64 stats_sent_pkt_q; + u64 stats_bad_qp_id; +}; + +struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc); +void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc, + struct i40iw_puda_buf *buf); +void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, + struct i40iw_puda_send_info *info); +enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev, + struct i40iw_puda_rsrc_info *info); +void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev, + enum puda_resource_type type, + bool reset); +enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, + struct i40iw_sc_cq *cq, u32 *compl_err); +void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); + +struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, + void *addr, u32 length, u32 value); +enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc); +void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); +void i40iw_free_hash_desc(struct shash_desc *desc); +void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, + u32 seqnum); +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h new file mode 100644 index 000000000..57768184e --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_register.h @@ -0,0 +1,1030 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_REGISTER_H +#define I40IW_REGISTER_H + +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ + +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) + +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) + +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ + +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF + +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) + +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) + +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) + +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK (0xFFFF << I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK (0x1 << I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK (0xFF << I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK (0xFF << I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK (0xFF << I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK (0x1 << I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK (0xFF << I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK (0xFF << I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK (0x1 << I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) + +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) + +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* I40IW_REGISTER_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h new file mode 100644 index 000000000..b0110c15e --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h @@ -0,0 +1,100 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_STATUS_H +#define I40IW_STATUS_H + +/* Error Codes */ +enum i40iw_status_code { + I40IW_SUCCESS = 0, + I40IW_ERR_NVM = -1, + I40IW_ERR_NVM_CHECKSUM = -2, + I40IW_ERR_CONFIG = -4, + I40IW_ERR_PARAM = -5, + I40IW_ERR_DEVICE_NOT_SUPPORTED = -6, + I40IW_ERR_RESET_FAILED = -7, + I40IW_ERR_SWFW_SYNC = -8, + I40IW_ERR_NO_MEMORY = -9, + I40IW_ERR_BAD_PTR = -10, + I40IW_ERR_INVALID_PD_ID = -11, + I40IW_ERR_INVALID_QP_ID = -12, + I40IW_ERR_INVALID_CQ_ID = -13, + I40IW_ERR_INVALID_CEQ_ID = -14, + I40IW_ERR_INVALID_AEQ_ID = -15, + I40IW_ERR_INVALID_SIZE = -16, + I40IW_ERR_INVALID_ARP_INDEX = -17, + I40IW_ERR_INVALID_FPM_FUNC_ID = -18, + I40IW_ERR_QP_INVALID_MSG_SIZE = -19, + I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20, + I40IW_ERR_INVALID_FRAG_COUNT = -21, + I40IW_ERR_QUEUE_EMPTY = -22, + I40IW_ERR_INVALID_ALIGNMENT = -23, + I40IW_ERR_FLUSHED_QUEUE = -24, + I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, + I40IW_ERR_INVALID_IMM_DATA_SIZE = -26, + I40IW_ERR_TIMEOUT = -27, + I40IW_ERR_OPCODE_MISMATCH = -28, + I40IW_ERR_CQP_COMPL_ERROR = -29, + I40IW_ERR_INVALID_VF_ID = -30, + I40IW_ERR_INVALID_HMCFN_ID = -31, + I40IW_ERR_BACKING_PAGE_ERROR = -32, + I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33, + I40IW_ERR_INVALID_PBLE_INDEX = -34, + I40IW_ERR_INVALID_SD_INDEX = -35, + I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36, + I40IW_ERR_INVALID_SD_TYPE = -37, + I40IW_ERR_MEMCPY_FAILED = -38, + I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39, + I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40, + I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41, + I40IW_ERR_SRQ_ENABLED = -42, + I40IW_ERR_BUF_TOO_SHORT = -43, + I40IW_ERR_BAD_IWARP_CQE = -44, + I40IW_ERR_NVM_BLANK_MODE = -45, + I40IW_ERR_NOT_IMPLEMENTED = -46, + I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47, + I40IW_ERR_NOT_READY = -48, + I40IW_NOT_SUPPORTED = -49, + I40IW_ERR_FIRMWARE_API_VERSION = -50, + I40IW_ERR_RING_FULL = -51, + I40IW_ERR_MPA_CRC = -61, + I40IW_ERR_NO_TXBUFS = -62, + I40IW_ERR_SEQ_NUM = -63, + I40IW_ERR_list_empty = -64, + I40IW_ERR_INVALID_MAC_ADDR = -65, + I40IW_ERR_BAD_STAG = -66, + I40IW_ERR_CQ_COMPL_ERROR = -67, + +}; +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h new file mode 100644 index 000000000..edb3a8c82 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -0,0 +1,1312 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_TYPE_H +#define I40IW_TYPE_H +#include "i40iw_user.h" +#include "i40iw_hmc.h" +#include "i40iw_vf.h" +#include "i40iw_virtchnl.h" + +struct i40iw_cqp_sq_wqe { + u64 buf[I40IW_CQP_WQE_SIZE]; +}; + +struct i40iw_sc_aeqe { + u64 buf[I40IW_AEQE_SIZE]; +}; + +struct i40iw_ceqe { + u64 buf[I40IW_CEQE_SIZE]; +}; + +struct i40iw_cqp_ctx { + u64 buf[I40IW_CQP_CTX_SIZE]; +}; + +struct i40iw_cq_shadow_area { + u64 buf[I40IW_SHADOW_AREA_SIZE]; +}; + +struct i40iw_sc_dev; +struct i40iw_hmc_info; +struct i40iw_dev_pestat; + +struct i40iw_cqp_ops; +struct i40iw_ccq_ops; +struct i40iw_ceq_ops; +struct i40iw_aeq_ops; +struct i40iw_mr_ops; +struct i40iw_cqp_misc_ops; +struct i40iw_pd_ops; +struct i40iw_priv_qp_ops; +struct i40iw_priv_cq_ops; +struct i40iw_hmc_ops; + +enum i40iw_resource_indicator_type { + I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0, + I40IW_RSRC_INDICATOR_TYPE_CQ, + I40IW_RSRC_INDICATOR_TYPE_QP, + I40IW_RSRC_INDICATOR_TYPE_SRQ +}; + +enum i40iw_hdrct_flags { + DDP_LEN_FLAG = 0x80, + DDP_HDR_FLAG = 0x40, + RDMA_HDR_FLAG = 0x20 +}; + +enum i40iw_term_layers { + LAYER_RDMA = 0, + LAYER_DDP = 1, + LAYER_MPA = 2 +}; + +enum i40iw_term_error_types { + RDMAP_REMOTE_PROT = 1, + RDMAP_REMOTE_OP = 2, + DDP_CATASTROPHIC = 0, + DDP_TAGGED_BUFFER = 1, + DDP_UNTAGGED_BUFFER = 2, + DDP_LLP = 3 +}; + +enum i40iw_term_rdma_errors { + RDMAP_INV_STAG = 0x00, + RDMAP_INV_BOUNDS = 0x01, + RDMAP_ACCESS = 0x02, + RDMAP_UNASSOC_STAG = 0x03, + RDMAP_TO_WRAP = 0x04, + RDMAP_INV_RDMAP_VER = 0x05, + RDMAP_UNEXPECTED_OP = 0x06, + RDMAP_CATASTROPHIC_LOCAL = 0x07, + RDMAP_CATASTROPHIC_GLOBAL = 0x08, + RDMAP_CANT_INV_STAG = 0x09, + RDMAP_UNSPECIFIED = 0xff +}; + +enum i40iw_term_ddp_errors { + DDP_CATASTROPHIC_LOCAL = 0x00, + DDP_TAGGED_INV_STAG = 0x00, + DDP_TAGGED_BOUNDS = 0x01, + DDP_TAGGED_UNASSOC_STAG = 0x02, + DDP_TAGGED_TO_WRAP = 0x03, + DDP_TAGGED_INV_DDP_VER = 0x04, + DDP_UNTAGGED_INV_QN = 0x01, + DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, + DDP_UNTAGGED_INV_MSN_RANGE = 0x03, + DDP_UNTAGGED_INV_MO = 0x04, + DDP_UNTAGGED_INV_TOO_LONG = 0x05, + DDP_UNTAGGED_INV_DDP_VER = 0x06 +}; + +enum i40iw_term_mpa_errors { + MPA_CLOSED = 0x01, + MPA_CRC = 0x02, + MPA_MARKER = 0x03, + MPA_REQ_RSP = 0x04, +}; + +enum i40iw_flush_opcode { + FLUSH_INVALID = 0, + FLUSH_PROT_ERR, + FLUSH_REM_ACCESS_ERR, + FLUSH_LOC_QP_OP_ERR, + FLUSH_REM_OP_ERR, + FLUSH_LOC_LEN_ERR, + FLUSH_GENERAL_ERR, + FLUSH_FATAL_ERR +}; + +enum i40iw_term_eventtypes { + TERM_EVENT_QP_FATAL, + TERM_EVENT_QP_ACCESS_ERR +}; + +struct i40iw_terminate_hdr { + u8 layer_etype; + u8 error_code; + u8 hdrct; + u8 rsvd; +}; + +enum i40iw_debug_flag { + I40IW_DEBUG_NONE = 0x00000000, + I40IW_DEBUG_ERR = 0x00000001, + I40IW_DEBUG_INIT = 0x00000002, + I40IW_DEBUG_DEV = 0x00000004, + I40IW_DEBUG_CM = 0x00000008, + I40IW_DEBUG_VERBS = 0x00000010, + I40IW_DEBUG_PUDA = 0x00000020, + I40IW_DEBUG_ILQ = 0x00000040, + I40IW_DEBUG_IEQ = 0x00000080, + I40IW_DEBUG_QP = 0x00000100, + I40IW_DEBUG_CQ = 0x00000200, + I40IW_DEBUG_MR = 0x00000400, + I40IW_DEBUG_PBLE = 0x00000800, + I40IW_DEBUG_WQE = 0x00001000, + I40IW_DEBUG_AEQ = 0x00002000, + I40IW_DEBUG_CQP = 0x00004000, + I40IW_DEBUG_HMC = 0x00008000, + I40IW_DEBUG_USER = 0x00010000, + I40IW_DEBUG_VIRT = 0x00020000, + I40IW_DEBUG_DCB = 0x00040000, + I40IW_DEBUG_CQE = 0x00800000, + I40IW_DEBUG_ALL = 0xFFFFFFFF +}; + +enum i40iw_hw_stat_index_32b { + I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0, + I40IW_HW_STAT_INDEX_IP4RXTRUNC, + I40IW_HW_STAT_INDEX_IP4TXNOROUTE, + I40IW_HW_STAT_INDEX_IP6RXDISCARD, + I40IW_HW_STAT_INDEX_IP6RXTRUNC, + I40IW_HW_STAT_INDEX_IP6TXNOROUTE, + I40IW_HW_STAT_INDEX_TCPRTXSEG, + I40IW_HW_STAT_INDEX_TCPRXOPTERR, + I40IW_HW_STAT_INDEX_TCPRXPROTOERR, + I40IW_HW_STAT_INDEX_MAX_32 +}; + +enum i40iw_hw_stat_index_64b { + I40IW_HW_STAT_INDEX_IP4RXOCTS = 0, + I40IW_HW_STAT_INDEX_IP4RXPKTS, + I40IW_HW_STAT_INDEX_IP4RXFRAGS, + I40IW_HW_STAT_INDEX_IP4RXMCPKTS, + I40IW_HW_STAT_INDEX_IP4TXOCTS, + I40IW_HW_STAT_INDEX_IP4TXPKTS, + I40IW_HW_STAT_INDEX_IP4TXFRAGS, + I40IW_HW_STAT_INDEX_IP4TXMCPKTS, + I40IW_HW_STAT_INDEX_IP6RXOCTS, + I40IW_HW_STAT_INDEX_IP6RXPKTS, + I40IW_HW_STAT_INDEX_IP6RXFRAGS, + I40IW_HW_STAT_INDEX_IP6RXMCPKTS, + I40IW_HW_STAT_INDEX_IP6TXOCTS, + I40IW_HW_STAT_INDEX_IP6TXPKTS, + I40IW_HW_STAT_INDEX_IP6TXFRAGS, + I40IW_HW_STAT_INDEX_IP6TXMCPKTS, + I40IW_HW_STAT_INDEX_TCPRXSEGS, + I40IW_HW_STAT_INDEX_TCPTXSEG, + I40IW_HW_STAT_INDEX_RDMARXRDS, + I40IW_HW_STAT_INDEX_RDMARXSNDS, + I40IW_HW_STAT_INDEX_RDMARXWRS, + I40IW_HW_STAT_INDEX_RDMATXRDS, + I40IW_HW_STAT_INDEX_RDMATXSNDS, + I40IW_HW_STAT_INDEX_RDMATXWRS, + I40IW_HW_STAT_INDEX_RDMAVBND, + I40IW_HW_STAT_INDEX_RDMAVINV, + I40IW_HW_STAT_INDEX_MAX_64 +}; + +struct i40iw_dev_hw_stat_offsets { + u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32]; + u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64]; +}; + +struct i40iw_dev_hw_stats { + u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32]; + u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64]; +}; + +struct i40iw_device_pestat_ops { + void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool); + void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *); + void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *); + void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *); + void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *); +}; + +struct i40iw_dev_pestat { + struct i40iw_hw *hw; + struct i40iw_device_pestat_ops ops; + struct i40iw_dev_hw_stats hw_stats; + struct i40iw_dev_hw_stats last_read_hw_stats; + struct i40iw_dev_hw_stat_offsets hw_stat_offsets; + struct timer_list stats_timer; + spinlock_t stats_lock; /* rdma stats lock */ +}; + +struct i40iw_hw { + u8 __iomem *hw_addr; + void *dev_context; + struct i40iw_hmc_info hmc; +}; + +struct i40iw_pfpdu { + struct list_head rxlist; + u32 rcv_nxt; + u32 fps; + u32 max_fpdu_data; + bool mode; + bool mpa_crc_err; + u64 total_ieq_bufs; + u64 fpdu_processed; + u64 bad_seq_num; + u64 crc_err; + u64 no_tx_bufs; + u64 tx_err; + u64 out_of_order; + u64 pmode_count; +}; + +struct i40iw_sc_pd { + u32 size; + struct i40iw_sc_dev *dev; + u16 pd_id; +}; + +struct i40iw_cqp_quanta { + u64 elem[I40IW_CQP_WQE_SIZE]; +}; + +struct i40iw_sc_cqp { + u32 size; + u64 sq_pa; + u64 host_ctx_pa; + void *back_cqp; + struct i40iw_sc_dev *dev; + enum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *, + struct i40iw_update_sds_info *); + struct i40iw_dma_mem sdbuf; + struct i40iw_ring sq_ring; + struct i40iw_cqp_quanta *sq_base; + u64 *host_ctx; + u64 *scratch_array; + u32 cqp_id; + u32 sq_size; + u32 hw_sq_size; + u8 struct_ver; + u8 polarity; + bool en_datacenter_tcp; + u8 hmc_profile; + u8 enabled_vf_count; + u8 timeout_count; +}; + +struct i40iw_sc_aeq { + u32 size; + u64 aeq_elem_pa; + struct i40iw_sc_dev *dev; + struct i40iw_sc_aeqe *aeqe_base; + void *pbl_list; + u32 elem_cnt; + struct i40iw_ring aeq_ring; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 polarity; +}; + +struct i40iw_sc_ceq { + u32 size; + u64 ceq_elem_pa; + struct i40iw_sc_dev *dev; + struct i40iw_ceqe *ceqe_base; + void *pbl_list; + u32 ceq_id; + u32 elem_cnt; + struct i40iw_ring ceq_ring; + bool virtual_map; + u8 pbl_chunk_size; + bool tph_en; + u8 tph_val; + u32 first_pm_pbl_idx; + u8 polarity; +}; + +struct i40iw_sc_cq { + struct i40iw_cq_uk cq_uk; + u64 cq_pa; + u64 shadow_area_pa; + struct i40iw_sc_dev *dev; + void *pbl_list; + void *back_cq; + u32 ceq_id; + u32 shadow_read_threshold; + bool ceqe_mask; + bool virtual_map; + u8 pbl_chunk_size; + u8 cq_type; + bool ceq_id_valid; + bool tph_en; + u8 tph_val; + u32 first_pm_pbl_idx; + bool check_overflow; +}; + +struct i40iw_sc_qp { + struct i40iw_qp_uk qp_uk; + u64 sq_pa; + u64 rq_pa; + u64 hw_host_ctx_pa; + u64 shadow_area_pa; + u64 q2_pa; + struct i40iw_sc_dev *dev; + struct i40iw_sc_pd *pd; + u64 *hw_host_ctx; + void *llp_stream_handle; + void *back_qp; + struct i40iw_pfpdu pfpdu; + u8 *q2_buf; + u64 qp_compl_ctx; + u16 qs_handle; + u16 exception_lan_queue; + u16 push_idx; + u8 sq_tph_val; + u8 rq_tph_val; + u8 qp_state; + u8 qp_type; + u8 hw_sq_size; + u8 hw_rq_size; + u8 src_mac_addr_idx; + bool sq_tph_en; + bool rq_tph_en; + bool rcv_tph_en; + bool xmit_tph_en; + bool virtual_map; + bool flush_sq; + bool flush_rq; + bool sq_flush; + enum i40iw_flush_opcode flush_code; + enum i40iw_term_eventtypes eventtype; + u8 term_flags; +}; + +struct i40iw_hmc_fpm_misc { + u32 max_ceqs; + u32 max_sds; + u32 xf_block_size; + u32 q1_block_size; + u32 ht_multiplier; + u32 timer_bucket; +}; + +struct i40iw_vchnl_if { + enum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16); + enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16); +}; + +#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512 + +struct i40iw_vchnl_vf_msg_buffer { + struct i40iw_virtchnl_op_buf vchnl_msg; + char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1]; +}; + +struct i40iw_vfdev { + struct i40iw_sc_dev *pf_dev; + u8 *hmc_info_mem; + struct i40iw_dev_pestat dev_pestat; + struct i40iw_hmc_pble_info *pble_info; + struct i40iw_hmc_info hmc_info; + struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer; + u64 fpm_query_buf_pa; + u64 *fpm_query_buf; + u32 vf_id; + u32 msg_count; + bool pf_hmc_initialized; + u16 pmf_index; + u16 iw_vf_idx; /* VF Device table index */ + bool stats_initialized; +}; + +struct i40iw_sc_dev { + struct list_head cqp_cmd_head; /* head of the CQP command list */ + spinlock_t cqp_lock; /* cqp list sync */ + struct i40iw_dev_uk dev_uk; + struct i40iw_dev_pestat dev_pestat; + struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT]; + u64 fpm_query_buf_pa; + u64 fpm_commit_buf_pa; + u64 *fpm_query_buf; + u64 *fpm_commit_buf; + void *back_dev; + struct i40iw_hw *hw; + u8 __iomem *db_addr; + struct i40iw_hmc_info *hmc_info; + struct i40iw_hmc_pble_info *pble_info; + struct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT]; + struct i40iw_sc_cqp *cqp; + struct i40iw_sc_aeq *aeq; + struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT]; + struct i40iw_sc_cq *ccq; + struct i40iw_cqp_ops *cqp_ops; + struct i40iw_ccq_ops *ccq_ops; + struct i40iw_ceq_ops *ceq_ops; + struct i40iw_aeq_ops *aeq_ops; + struct i40iw_pd_ops *iw_pd_ops; + struct i40iw_priv_qp_ops *iw_priv_qp_ops; + struct i40iw_priv_cq_ops *iw_priv_cq_ops; + struct i40iw_mr_ops *mr_ops; + struct i40iw_cqp_misc_ops *cqp_misc_ops; + struct i40iw_hmc_ops *hmc_ops; + struct i40iw_vchnl_if vchnl_if; + u32 ilq_count; + struct i40iw_virt_mem ilq_mem; + struct i40iw_puda_rsrc *ilq; + u32 ieq_count; + struct i40iw_virt_mem ieq_mem; + struct i40iw_puda_rsrc *ieq; + + struct i40iw_vf_cqp_ops *iw_vf_cqp_ops; + + struct i40iw_hmc_fpm_misc hmc_fpm_misc; + u16 qs_handle; + u32 debug_mask; + u16 exception_lan_queue; + u8 hmc_fn_id; + bool is_pf; + bool vchnl_up; + u8 vf_id; + u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY]; + struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf; + u8 hw_rev; +}; + +struct i40iw_modify_cq_info { + u64 cq_pa; + struct i40iw_cqe *cq_base; + void *pbl_list; + u32 ceq_id; + u32 cq_size; + u32 shadow_read_threshold; + bool virtual_map; + u8 pbl_chunk_size; + bool check_overflow; + bool cq_resize; + bool ceq_change; + bool check_overflow_change; + u32 first_pm_pbl_idx; + bool ceq_valid; +}; + +struct i40iw_create_qp_info { + u8 next_iwarp_state; + bool ord_valid; + bool tcp_ctx_valid; + bool cq_num_valid; + bool static_rsrc; + bool arp_cache_idx_valid; +}; + +struct i40iw_modify_qp_info { + u64 rx_win0; + u64 rx_win1; + u16 new_mss; + u8 next_iwarp_state; + u8 termlen; + bool ord_valid; + bool tcp_ctx_valid; + bool cq_num_valid; + bool static_rsrc; + bool arp_cache_idx_valid; + bool reset_tcp_conn; + bool remove_hash_idx; + bool dont_send_term; + bool dont_send_fin; + bool cached_var_valid; + bool mss_change; + bool force_loopback; +}; + +struct i40iw_ccq_cqe_info { + struct i40iw_sc_cqp *cqp; + u64 scratch; + u32 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + u8 op_code; + bool error; +}; + +struct i40iw_l2params { + u16 qs_handle_list[I40IW_MAX_USER_PRIORITY]; + u16 mss; +}; + +struct i40iw_device_init_info { + u64 fpm_query_buf_pa; + u64 fpm_commit_buf_pa; + u64 *fpm_query_buf; + u64 *fpm_commit_buf; + struct i40iw_hw *hw; + void __iomem *bar0; + enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16); + u16 qs_handle; + u16 exception_lan_queue; + u8 hmc_fn_id; + bool is_pf; + u32 debug_mask; +}; + +enum i40iw_cqp_hmc_profile { + I40IW_HMC_PROFILE_DEFAULT = 1, + I40IW_HMC_PROFILE_FAVOR_VF = 2, + I40IW_HMC_PROFILE_EQUAL = 3, +}; + +struct i40iw_cqp_init_info { + u64 cqp_compl_ctx; + u64 host_ctx_pa; + u64 sq_pa; + struct i40iw_sc_dev *dev; + struct i40iw_cqp_quanta *sq; + u64 *host_ctx; + u64 *scratch_array; + u32 sq_size; + u8 struct_ver; + bool en_datacenter_tcp; + u8 hmc_profile; + u8 enabled_vf_count; +}; + +struct i40iw_ceq_init_info { + u64 ceqe_pa; + struct i40iw_sc_dev *dev; + u64 *ceqe_base; + void *pbl_list; + u32 elem_cnt; + u32 ceq_id; + bool virtual_map; + u8 pbl_chunk_size; + bool tph_en; + u8 tph_val; + u32 first_pm_pbl_idx; +}; + +struct i40iw_aeq_init_info { + u64 aeq_elem_pa; + struct i40iw_sc_dev *dev; + u32 *aeqe_base; + void *pbl_list; + u32 elem_cnt; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; +}; + +struct i40iw_ccq_init_info { + u64 cq_pa; + u64 shadow_area_pa; + struct i40iw_sc_dev *dev; + struct i40iw_cqe *cq_base; + u64 *shadow_area; + void *pbl_list; + u32 num_elem; + u32 ceq_id; + u32 shadow_read_threshold; + bool ceqe_mask; + bool ceq_id_valid; + bool tph_en; + u8 tph_val; + bool avoid_mem_cflct; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; +}; + +struct i40iwarp_offload_info { + u16 rcv_mark_offset; + u16 snd_mark_offset; + u16 pd_id; + u8 ddp_ver; + u8 rdmap_ver; + u8 ord_size; + u8 ird_size; + bool wr_rdresp_en; + bool rd_enable; + bool snd_mark_en; + bool rcv_mark_en; + bool bind_en; + bool fast_reg_en; + bool priv_mode_en; + bool lsmm_present; + u8 iwarp_mode; + bool align_hdrs; + bool rcv_no_mpa_crc; + + u8 last_byte_sent; +}; + +struct i40iw_tcp_offload_info { + bool ipv4; + bool no_nagle; + bool insert_vlan_tag; + bool time_stamp; + u8 cwnd_inc_limit; + bool drop_ooo_seg; + bool dup_ack_thresh; + u8 ttl; + u8 src_mac_addr_idx; + bool avoid_stretch_ack; + u8 tos; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr0; + u32 dest_ip_addr1; + u32 dest_ip_addr2; + u32 dest_ip_addr3; + u32 snd_mss; + u16 vlan_tag; + u16 arp_idx; + u32 flow_label; + bool wscale; + u8 tcp_state; + u8 snd_wscale; + u8 rcv_wscale; + u32 time_stamp_recent; + u32 time_stamp_age; + u32 snd_nxt; + u32 snd_wnd; + u32 rcv_nxt; + u32 rcv_wnd; + u32 snd_max; + u32 snd_una; + u32 srtt; + u32 rtt_var; + u32 ss_thresh; + u32 cwnd; + u32 snd_wl1; + u32 snd_wl2; + u32 max_snd_window; + u8 rexmit_thresh; + u32 local_ipaddr0; + u32 local_ipaddr1; + u32 local_ipaddr2; + u32 local_ipaddr3; + bool ignore_tcp_opt; + bool ignore_tcp_uns_opt; +}; + +struct i40iw_qp_host_ctx_info { + u64 qp_compl_ctx; + struct i40iw_tcp_offload_info *tcp_info; + struct i40iwarp_offload_info *iwarp_info; + u32 send_cq_num; + u32 rcv_cq_num; + u16 push_idx; + bool push_mode_en; + bool tcp_info_valid; + bool iwarp_info_valid; + bool err_rq_idx_valid; + u16 err_rq_idx; +}; + +struct i40iw_aeqe_info { + u64 compl_ctx; + u32 qp_cq_id; + u16 ae_id; + u16 wqe_idx; + u8 tcp_state; + u8 iwarp_state; + bool qp; + bool cq; + bool sq; + bool in_rdrsp_wr; + bool out_rdrsp; + u8 q2_data_written; + bool aeqe_overflow; +}; + +struct i40iw_allocate_stag_info { + u64 total_len; + u32 chunk_size; + u32 stag_idx; + u32 page_size; + u16 pd_id; + u16 access_rights; + bool remote_access; + bool use_hmc_fcn_index; + u8 hmc_fcn_index; + bool use_pf_rid; +}; + +struct i40iw_reg_ns_stag_info { + u64 reg_addr_pa; + u64 fbo; + void *va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum i40iw_addressing_type addr_type; + i40iw_stag_index stag_idx; + u16 access_rights; + u16 pd_id; + i40iw_stag_key stag_key; + bool use_hmc_fcn_index; + u8 hmc_fcn_index; + bool use_pf_rid; +}; + +struct i40iw_fast_reg_stag_info { + u64 wr_id; + u64 reg_addr_pa; + u64 fbo; + void *va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum i40iw_addressing_type addr_type; + i40iw_stag_index stag_idx; + u16 access_rights; + u16 pd_id; + i40iw_stag_key stag_key; + bool local_fence; + bool read_fence; + bool signaled; + bool use_hmc_fcn_index; + u8 hmc_fcn_index; + bool use_pf_rid; + bool defer_flag; +}; + +struct i40iw_dealloc_stag_info { + u32 stag_idx; + u16 pd_id; + bool mr; + bool dealloc_pbl; +}; + +struct i40iw_register_shared_stag { + void *va; + enum i40iw_addressing_type addr_type; + i40iw_stag_index new_stag_idx; + i40iw_stag_index parent_stag_idx; + u32 access_rights; + u16 pd_id; + i40iw_stag_key new_stag_key; +}; + +struct i40iw_qp_init_info { + struct i40iw_qp_uk_init_info qp_uk_init_info; + struct i40iw_sc_pd *pd; + u64 *host_ctx; + u8 *q2; + u64 sq_pa; + u64 rq_pa; + u64 host_ctx_pa; + u64 q2_pa; + u64 shadow_area_pa; + u8 sq_tph_val; + u8 rq_tph_val; + u8 type; + bool sq_tph_en; + bool rq_tph_en; + bool rcv_tph_en; + bool xmit_tph_en; + bool virtual_map; +}; + +struct i40iw_cq_init_info { + struct i40iw_sc_dev *dev; + u64 cq_base_pa; + u64 shadow_area_pa; + u32 ceq_id; + u32 shadow_read_threshold; + bool virtual_map; + bool ceqe_mask; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + bool ceq_id_valid; + bool tph_en; + u8 tph_val; + u8 type; + struct i40iw_cq_uk_init_info cq_uk_init_info; +}; + +struct i40iw_upload_context_info { + u64 buf_pa; + bool freeze_qp; + bool raw_format; + u32 qp_id; + u8 qp_type; +}; + +struct i40iw_add_arp_cache_entry_info { + u8 mac_addr[6]; + u32 reach_max; + u16 arp_index; + bool permanent; +}; + +struct i40iw_apbvt_info { + u16 port; + bool add; +}; + +enum i40iw_quad_entry_type { + I40IW_QHASH_TYPE_TCP_ESTABLISHED = 1, + I40IW_QHASH_TYPE_TCP_SYN, +}; + +enum i40iw_quad_hash_manage_type { + I40IW_QHASH_MANAGE_TYPE_DELETE = 0, + I40IW_QHASH_MANAGE_TYPE_ADD, + I40IW_QHASH_MANAGE_TYPE_MODIFY +}; + +struct i40iw_qhash_table_info { + enum i40iw_quad_hash_manage_type manage; + enum i40iw_quad_entry_type entry_type; + bool vlan_valid; + bool ipv4_valid; + u8 mac_addr[6]; + u16 vlan_id; + u16 qs_handle; + u32 qp_num; + u32 dest_ip[4]; + u32 src_ip[4]; + u32 dest_port; + u32 src_port; +}; + +struct i40iw_local_mac_ipaddr_entry_info { + u8 mac_addr[6]; + u8 entry_idx; +}; + +struct i40iw_cqp_manage_push_page_info { + u32 push_idx; + u16 qs_handle; + u8 free_page; +}; + +struct i40iw_qp_flush_info { + u16 sq_minor_code; + u16 sq_major_code; + u16 rq_minor_code; + u16 rq_major_code; + u16 ae_code; + u8 ae_source; + bool sq; + bool rq; + bool userflushcode; + bool generate_ae; +}; + +struct i40iw_cqp_commit_fpm_values { + u64 qp_base; + u64 cq_base; + u32 hte_base; + u32 arp_base; + u32 apbvt_inuse_base; + u32 mr_base; + u32 xf_base; + u32 xffl_base; + u32 q1_base; + u32 q1fl_base; + u32 fsimc_base; + u32 fsiav_base; + u32 pbl_base; + + u32 qp_cnt; + u32 cq_cnt; + u32 hte_cnt; + u32 arp_cnt; + u32 mr_cnt; + u32 xf_cnt; + u32 xffl_cnt; + u32 q1_cnt; + u32 q1fl_cnt; + u32 fsimc_cnt; + u32 fsiav_cnt; + u32 pbl_cnt; +}; + +struct i40iw_cqp_query_fpm_values { + u16 first_pe_sd_index; + u32 qp_objsize; + u32 cq_objsize; + u32 hte_objsize; + u32 arp_objsize; + u32 mr_objsize; + u32 xf_objsize; + u32 q1_objsize; + u32 fsimc_objsize; + u32 fsiav_objsize; + + u32 qp_max; + u32 cq_max; + u32 hte_max; + u32 arp_max; + u32 mr_max; + u32 xf_max; + u32 xffl_max; + u32 q1_max; + u32 q1fl_max; + u32 fsimc_max; + u32 fsiav_max; + u32 pbl_max; +}; + +struct i40iw_cqp_ops { + enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *, + struct i40iw_cqp_init_info *); + enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *); + void (*cqp_post_sq)(struct i40iw_sc_cqp *); + u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch); + enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *); + enum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8, + struct i40iw_ccq_cqe_info *); +}; + +struct i40iw_ccq_ops { + enum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *, + struct i40iw_ccq_init_info *); + enum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool); + enum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool); + enum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *); + enum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *, + struct i40iw_ccq_cqe_info *); + void (*ccq_arm)(struct i40iw_sc_cq *); +}; + +struct i40iw_ceq_ops { + enum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *, + struct i40iw_ceq_init_info *); + enum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool); + enum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *); + enum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *); + enum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64); + enum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool); + void *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *); +}; + +struct i40iw_aeq_ops { + enum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *, + struct i40iw_aeq_init_info *); + enum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool); + enum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool); + enum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *, + struct i40iw_aeqe_info *); + enum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32); + enum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *); + enum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *); +}; + +struct i40iw_pd_ops { + void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16); +}; + +struct i40iw_priv_qp_ops { + enum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *); + enum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *, + struct i40iw_create_qp_info *, u64, bool); + enum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *, + struct i40iw_modify_qp_info *, u64, bool); + enum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool); + enum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *, + struct i40iw_qp_flush_info *, u64, bool); + enum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *, + struct i40iw_upload_context_info *, + u64, bool); + enum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *, + struct i40iw_qp_host_ctx_info *); + + void (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag); + void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32); + void (*qp_send_rtt)(struct i40iw_sc_qp *, bool); + enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8); +}; + +struct i40iw_priv_cq_ops { + enum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *); + enum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool); + enum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool); + enum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *, + struct i40iw_modify_cq_info *, u64, bool); +}; + +struct i40iw_mr_ops { + enum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *, + struct i40iw_allocate_stag_info *, u64, bool); + enum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *, + struct i40iw_reg_ns_stag_info *, + u64, bool); + enum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *, + struct i40iw_register_shared_stag *, + u64, bool); + enum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *, + struct i40iw_dealloc_stag_info *, + u64, bool); + enum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool); + enum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool); +}; + +struct i40iw_cqp_misc_ops { + enum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *, + struct i40iw_cqp_manage_push_page_info *, + u64, bool); + enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *, + u64, u8, bool, bool); + enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *, + u64, u8, u8, bool, bool); + enum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8, + struct i40iw_dma_mem *, bool, u8); + enum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8, + struct i40iw_dma_mem *, bool, u8); + enum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *, + u64, u8, bool, bool); + enum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *, + struct i40iw_add_arp_cache_entry_info *, + u64, bool); + enum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool); + enum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool); + enum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *, + struct i40iw_apbvt_info *, u64, bool); + enum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *, + struct i40iw_qhash_table_info *, u64, bool); + enum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool); + enum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, + struct i40iw_local_mac_ipaddr_entry_info *, + u64, bool); + enum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool); + enum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool); + enum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp + *); + enum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *); + enum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *); + enum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64); + enum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64); +}; + +struct i40iw_hmc_ops { + enum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8); + enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *, + struct i40iw_hmc_fpm_misc *); + enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8); + enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *); + enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *); + enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *, + bool reset); + enum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *); + enum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *); +}; + +struct cqp_info { + union { + struct { + struct i40iw_sc_qp *qp; + struct i40iw_create_qp_info info; + u64 scratch; + } qp_create; + + struct { + struct i40iw_sc_qp *qp; + struct i40iw_modify_qp_info info; + u64 scratch; + } qp_modify; + + struct { + struct i40iw_sc_qp *qp; + u64 scratch; + bool remove_hash_idx; + bool ignore_mw_bnd; + } qp_destroy; + + struct { + struct i40iw_sc_cq *cq; + u64 scratch; + bool check_overflow; + } cq_create; + + struct { + struct i40iw_sc_cq *cq; + u64 scratch; + } cq_destroy; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_allocate_stag_info info; + u64 scratch; + } alloc_stag; + + struct { + struct i40iw_sc_dev *dev; + u64 scratch; + u32 mw_stag_index; + u16 pd_id; + } mw_alloc; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_reg_ns_stag_info info; + u64 scratch; + } mr_reg_non_shared; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_dealloc_stag_info info; + u64 scratch; + } dealloc_stag; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_local_mac_ipaddr_entry_info info; + u64 scratch; + } add_local_mac_ipaddr_entry; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_add_arp_cache_entry_info info; + u64 scratch; + } add_arp_cache_entry; + + struct { + struct i40iw_sc_cqp *cqp; + u64 scratch; + u8 entry_idx; + u8 ignore_ref_count; + } del_local_mac_ipaddr_entry; + + struct { + struct i40iw_sc_cqp *cqp; + u64 scratch; + u16 arp_index; + } del_arp_cache_entry; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_manage_vf_pble_info info; + u64 scratch; + } manage_vf_pble_bp; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_cqp_manage_push_page_info info; + u64 scratch; + } manage_push_page; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_upload_context_info info; + u64 scratch; + } qp_upload_context; + + struct { + struct i40iw_sc_cqp *cqp; + u64 scratch; + } alloc_local_mac_ipaddr_entry; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_hmc_fcn_info info; + u64 scratch; + } manage_hmc_pm; + + struct { + struct i40iw_sc_ceq *ceq; + u64 scratch; + } ceq_create; + + struct { + struct i40iw_sc_ceq *ceq; + u64 scratch; + } ceq_destroy; + + struct { + struct i40iw_sc_aeq *aeq; + u64 scratch; + } aeq_create; + + struct { + struct i40iw_sc_aeq *aeq; + u64 scratch; + } aeq_destroy; + + struct { + struct i40iw_sc_qp *qp; + struct i40iw_qp_flush_info info; + u64 scratch; + } qp_flush_wqes; + + struct { + struct i40iw_sc_cqp *cqp; + void *fpm_values_va; + u64 fpm_values_pa; + u8 hmc_fn_id; + u64 scratch; + } query_fpm_values; + + struct { + struct i40iw_sc_cqp *cqp; + void *fpm_values_va; + u64 fpm_values_pa; + u8 hmc_fn_id; + u64 scratch; + } commit_fpm_values; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_apbvt_info info; + u64 scratch; + } manage_apbvt_entry; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_qhash_table_info info; + u64 scratch; + } manage_qhash_table_entry; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_update_sds_info info; + u64 scratch; + } update_pe_sds; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_sc_qp *qp; + u64 scratch; + } suspend_resume; + } u; +}; + +struct cqp_commands_info { + struct list_head cqp_cmd_entry; + u8 cqp_cmd; + u8 post_sq; + struct cqp_info in; +}; + +struct i40iw_virtchnl_work_info { + void (*callback_fcn)(void *vf_dev); + void *worker_vf_dev; +}; + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h new file mode 100644 index 000000000..12acd688d --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2006 - 2016 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef I40IW_USER_CONTEXT_H +#define I40IW_USER_CONTEXT_H + +#include <linux/types.h> + +#define I40IW_ABI_USERSPACE_VER 4 +#define I40IW_ABI_KERNEL_VER 4 +struct i40iw_alloc_ucontext_req { + __u32 reserved32; + __u8 userspace_ver; + __u8 reserved8[3]; +}; + +struct i40iw_alloc_ucontext_resp { + __u32 max_pds; /* maximum pds allowed for this user process */ + __u32 max_qps; /* maximum qps allowed for this user process */ + __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ + __u8 kernel_ver; + __u8 reserved[3]; +}; + +struct i40iw_alloc_pd_resp { + __u32 pd_id; + __u8 reserved[4]; +}; + +struct i40iw_create_cq_req { + __u64 user_cq_buffer; + __u64 user_shadow_area; +}; + +struct i40iw_create_qp_req { + __u64 user_wqe_buffers; + __u64 user_compl_ctx; + + /* UDA QP PHB */ + __u64 user_sq_phb; /* place for VA of the sq phb buff */ + __u64 user_rq_phb; /* place for VA of the rq phb buff */ +}; + +enum i40iw_memreg_type { + IW_MEMREG_TYPE_MEM = 0x0000, + IW_MEMREG_TYPE_QP = 0x0001, + IW_MEMREG_TYPE_CQ = 0x0002, +}; + +struct i40iw_mem_reg_req { + __u16 reg_type; /* Memory, QP or CQ */ + __u16 cq_pages; + __u16 rq_pages; + __u16 sq_pages; +}; + +struct i40iw_create_cq_resp { + __u32 cq_id; + __u32 cq_size; + __u32 mmap_db_index; + __u32 reserved; +}; + +struct i40iw_create_qp_resp { + __u32 qp_id; + __u32 actual_sq_size; + __u32 actual_rq_size; + __u32 i40iw_drv_opt; + __u16 push_idx; + __u8 lsmm; + __u8 rsvd2; +}; + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c new file mode 100644 index 000000000..f78c3dc8b --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c @@ -0,0 +1,1204 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_status.h" +#include "i40iw_d.h" +#include "i40iw_user.h" +#include "i40iw_register.h" + +static u32 nop_signature = 0x55550000; + +/** + * i40iw_nop_1 - insert a nop wqe and move head. no post work + * @qp: hw qp ptr + */ +static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) +{ + u64 header, *wqe; + u64 *wqe_0 = NULL; + u32 wqe_idx, peek_head; + bool signaled = false; + + if (!qp->sq_ring.head) + return I40IW_ERR_PARAM; + + wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + wqe = qp->sq_base[wqe_idx].elem; + peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; + wqe_0 = qp->sq_base[peek_head].elem; + if (peek_head) + wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); + else + wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; + + wmb(); /* Memory barrier to ensure data is written before valid bit is set */ + + set_64bit_val(wqe, 24, header); + return 0; +} + +/** + * i40iw_qp_post_wr - post wr to hrdware + * @qp: hw qp ptr + */ +void i40iw_qp_post_wr(struct i40iw_qp_uk *qp) +{ + u64 temp; + u32 hw_sq_tail; + u32 sw_sq_head; + + mb(); /* valid bit is written and loads completed before reading shadow */ + + /* read the doorbell shadow area */ + get_64bit_val(qp->shadow_area, 0, &temp); + + hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL); + sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (sw_sq_head != hw_sq_tail) { + if (sw_sq_head > qp->initial_ring.head) { + if ((hw_sq_tail >= qp->initial_ring.head) && + (hw_sq_tail < sw_sq_head)) { + writel(qp->qp_id, qp->wqe_alloc_reg); + } + } else if (sw_sq_head != qp->initial_ring.head) { + if ((hw_sq_tail >= qp->initial_ring.head) || + (hw_sq_tail < sw_sq_head)) { + writel(qp->qp_id, qp->wqe_alloc_reg); + } + } + } + + qp->initial_ring.head = qp->sq_ring.head; +} + +/** + * i40iw_qp_ring_push_db - ring qp doorbell + * @qp: hw qp ptr + * @wqe_idx: wqe index + */ +static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx) +{ + set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id); + qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); +} + +/** + * i40iw_qp_get_next_send_wqe - return next wqe ptr + * @qp: hw qp ptr + * @wqe_idx: return wqe index + * @wqe_size: size of sq wqe + */ +u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, + u32 *wqe_idx, + u8 wqe_size) +{ + u64 *wqe = NULL; + u64 wqe_ptr; + u32 peek_head = 0; + u16 offset; + enum i40iw_status_code ret_code = 0; + u8 nop_wqe_cnt = 0, i; + u64 *wqe_0 = NULL; + + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem; + offset = (u16)(wqe_ptr) & 0x7F; + if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) { + nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE; + for (i = 0; i < nop_wqe_cnt; i++) { + i40iw_nop_1(qp); + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return NULL; + } + + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + } + for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) { + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return NULL; + } + + wqe = qp->sq_base[*wqe_idx].elem; + + peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + wqe_0 = qp->sq_base[peek_head].elem; + if (peek_head & 0x3) + wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); + return wqe; +} + +/** + * i40iw_set_fragment - set fragment in wqe + * @wqe: wqe for setting fragment + * @offset: offset value + * @sge: sge length and stag + */ +static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) +{ + if (sge) { + set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO)); + set_64bit_val(wqe, (offset + 8), + (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) | + LS_64(sge->stag, I40IWQPSQ_FRAG_STAG))); + } +} + +/** + * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe + * @qp: hw qp ptr + * @wqe_idx: return wqe index + */ +u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) +{ + u64 *wqe = NULL; + enum i40iw_status_code ret_code; + + if (I40IW_RING_FULL_ERR(qp->rq_ring)) + return NULL; + + I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + if (!*wqe_idx) + qp->rwqe_polarity = !qp->rwqe_polarity; + /* rq_wqe_size_multiplier is no of qwords in one rq wqe */ + wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem; + + return wqe; +} + +/** + * i40iw_rdma_write - rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 header; + u64 *wqe; + struct i40iw_rdma_write *op_info; + u32 i, wqe_idx; + u32 total_size = 0, byte_off; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + + op_info = &info->op.rdma_write; + if (op_info->num_lo_sges > qp->max_sq_frag_cnt) + return I40IW_ERR_INVALID_FRAG_COUNT; + + for (i = 0; i < op_info->num_lo_sges; i++) + total_size += op_info->lo_sg_list[i].len; + + if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE) + return I40IW_ERR_QP_INVALID_MSG_SIZE; + + read_fence |= info->read_fence; + + ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = total_size; + set_64bit_val(wqe, 16, + LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); + if (!op_info->rem_addr.stag) + return I40IW_ERR_BAD_STAG; + + header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | + LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | + LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, op_info->lo_sg_list); + + for (i = 1; i < op_info->num_lo_sges; i++) { + byte_off = 32 + (i - 1) * 16; + i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]); + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_rdma_read - rdma read command + * @qp: hw qp ptr + * @info: post sq information + * @inv_stag: flag for inv_stag + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool inv_stag, + bool post_sq) +{ + u64 *wqe; + struct i40iw_rdma_read *op_info; + u64 header; + u32 wqe_idx; + enum i40iw_status_code ret_code; + u8 wqe_size; + bool local_fence = false; + + op_info = &info->op.rdma_read; + ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size); + if (ret_code) + return ret_code; + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->lo_addr.len; + local_fence |= info->local_fence; + + set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); + header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | + LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, &op_info->lo_addr); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_send - rdma send command + * @qp: hw qp ptr + * @info: post sq information + * @stag_to_inv: stag_to_inv value + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + u32 stag_to_inv, + bool post_sq) +{ + u64 *wqe; + struct i40iw_post_send *op_info; + u64 header; + u32 i, wqe_idx, total_size = 0, byte_off; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + + op_info = &info->op.send; + if (qp->max_sq_frag_cnt < op_info->num_sges) + return I40IW_ERR_INVALID_FRAG_COUNT; + + for (i = 0; i < op_info->num_sges; i++) + total_size += op_info->sg_list[i].len; + ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + read_fence |= info->read_fence; + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = total_size; + set_64bit_val(wqe, 16, 0); + header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) | + LS_64(info->op_type, I40IWQPSQ_OPCODE) | + LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0), + I40IWQPSQ_ADDFRAGCNT) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, op_info->sg_list); + + for (i = 1; i < op_info->num_sges; i++) { + byte_off = 32 + (i - 1) * 16; + i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]); + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_inline_rdma_write - inline rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 *wqe; + u8 *dest, *src; + struct i40iw_inline_rdma_write *op_info; + u64 *push; + u64 header = 0; + u32 i, wqe_idx; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + + op_info = &info->op.inline_rdma_write; + if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) + return I40IW_ERR_INVALID_IMM_DATA_SIZE; + + ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + read_fence |= info->read_fence; + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len; + set_64bit_val(wqe, 16, + LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); + + header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | + LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | + LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) | + LS_64(1, I40IWQPSQ_INLINEDATAFLAG) | + LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + dest = (u8 *)wqe; + src = (u8 *)(op_info->data); + + if (op_info->len <= 16) { + for (i = 0; i < op_info->len; i++, src++, dest++) + *dest = *src; + } else { + for (i = 0; i < 16; i++, src++, dest++) + *dest = *src; + dest = (u8 *)wqe + 32; + for (; i < op_info->len; i++, src++, dest++) + *dest = *src; + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (qp->push_db) { + push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20); + memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32); + i40iw_qp_ring_push_db(qp, wqe_idx); + } else { + if (post_sq) + i40iw_qp_post_wr(qp); + } + + return 0; +} + +/** + * i40iw_inline_send - inline send operation + * @qp: hw qp ptr + * @info: post sq information + * @stag_to_inv: remote stag + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + u32 stag_to_inv, + bool post_sq) +{ + u64 *wqe; + u8 *dest, *src; + struct i40iw_post_inline_send *op_info; + u64 header; + u32 wqe_idx, i; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + u64 *push; + + op_info = &info->op.inline_send; + if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) + return I40IW_ERR_INVALID_IMM_DATA_SIZE; + + ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + read_fence |= info->read_fence; + + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len; + header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) | + LS_64(info->op_type, I40IWQPSQ_OPCODE) | + LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) | + LS_64(1, I40IWQPSQ_INLINEDATAFLAG) | + LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + dest = (u8 *)wqe; + src = (u8 *)(op_info->data); + + if (op_info->len <= 16) { + for (i = 0; i < op_info->len; i++, src++, dest++) + *dest = *src; + } else { + for (i = 0; i < 16; i++, src++, dest++) + *dest = *src; + dest = (u8 *)wqe + 32; + for (; i < op_info->len; i++, src++, dest++) + *dest = *src; + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (qp->push_db) { + push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20); + memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32); + i40iw_qp_ring_push_db(qp, wqe_idx); + } else { + if (post_sq) + i40iw_qp_post_wr(qp); + } + + return 0; +} + +/** + * i40iw_stag_local_invalidate - stag invalidate operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 *wqe; + struct i40iw_inv_local_stag *op_info; + u64 header; + u32 wqe_idx; + bool local_fence = false; + + op_info = &info->op.inv_local_stag; + local_fence = info->local_fence; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = 0; + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, + LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG)); + set_64bit_val(wqe, 16, 0); + header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_mw_bind - Memory Window bind operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 *wqe; + struct i40iw_bind_window *op_info; + u64 header; + u32 wqe_idx; + bool local_fence = false; + + op_info = &info->op.bind_window; + + local_fence |= info->local_fence; + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = 0; + set_64bit_val(wqe, 0, (uintptr_t)op_info->va); + set_64bit_val(wqe, 8, + LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) | + LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG)); + set_64bit_val(wqe, 16, op_info->bind_length); + header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) | + LS_64(((op_info->enable_reads << 2) | + (op_info->enable_writes << 3)), + I40IWQPSQ_STAGRIGHTS) | + LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0), + I40IWQPSQ_VABASEDTO) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_post_receive - post receive wqe + * @qp: hw qp ptr + * @info: post rq information + */ +static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp, + struct i40iw_post_rq_info *info) +{ + u64 *wqe; + u64 header; + u32 total_size = 0, wqe_idx, i, byte_off; + + if (qp->max_rq_frag_cnt < info->num_sges) + return I40IW_ERR_INVALID_FRAG_COUNT; + for (i = 0; i < info->num_sges; i++) + total_size += info->sg_list[i].len; + wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->rq_wrid_array[wqe_idx] = info->wr_id; + set_64bit_val(wqe, 16, 0); + + header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0), + I40IWQPSQ_ADDFRAGCNT) | + LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, info->sg_list); + + for (i = 1; i < info->num_sges; i++) { + byte_off = 32 + (i - 1) * 16; + i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]); + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + return 0; +} + +/** + * i40iw_cq_request_notification - cq notification request (door bell) + * @cq: hw cq + * @cq_notify: notification type + */ +static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq, + enum i40iw_completion_notify cq_notify) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next_se = 0; + u8 arm_next = 0; + u8 arm_seq_num; + + get_64bit_val(cq->shadow_area, 32, &temp_val); + arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM); + arm_seq_num++; + + sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT); + arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE); + arm_next_se |= 1; + if (cq_notify == IW_CQ_COMPL_EVENT) + arm_next = 1; + temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) | + LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) | + LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) | + LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT); + + set_64bit_val(cq->shadow_area, 32, temp_val); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + writel(cq->cq_id, cq->cqe_alloc_reg); +} + +/** + * i40iw_cq_post_entries - update tail in shadow memory + * @cq: hw cq + * @count: # of entries processed + */ +static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq, + u8 count) +{ + I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count); + set_64bit_val(cq->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); + return 0; +} + +/** + * i40iw_cq_poll_completion - get cq completion info + * @cq: hw cq + * @info: cq poll information returned + * @post_cq: update cq tail + */ +static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, + struct i40iw_cq_poll_info *info, + bool post_cq) +{ + u64 comp_ctx, qword0, qword2, qword3, wqe_qword; + u64 *cqe, *sw_wqe; + struct i40iw_qp_uk *qp; + struct i40iw_ring *pring = NULL; + u32 wqe_idx, q_type, array_idx = 0; + enum i40iw_status_code ret_code = 0; + enum i40iw_status_code ret_code2 = 0; + bool move_cq_head = true; + u8 polarity; + u8 addl_frag_cnt, addl_wqes = 0; + + if (cq->avoid_mem_cflct) + cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq); + else + cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq); + + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)RS_64(qword3, I40IW_CQ_VALID); + + if (polarity != cq->polarity) + return I40IW_ERR_QUEUE_EMPTY; + + q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); + info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR); + info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP); + if (info->error) { + info->comp_status = I40IW_COMPL_STATUS_FLUSHED; + info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR); + info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR); + } else { + info->comp_status = I40IW_COMPL_STATUS_SUCCESS; + } + + get_64bit_val(cqe, 0, &qword0); + get_64bit_val(cqe, 16, &qword2); + + info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM); + + info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); + + get_64bit_val(cqe, 8, &comp_ctx); + + info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT); + info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ); + + qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; + wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); + info->qp_handle = (i40iw_qp_handle)(unsigned long)qp; + + if (q_type == I40IW_CQE_QTYPE_RQ) { + array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier; + if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) { + info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail]; + array_idx = qp->rq_ring.tail; + } else { + info->wr_id = qp->rq_wrid_array[array_idx]; + } + + info->op_type = I40IW_OP_TYPE_REC; + if (qword3 & I40IWCQ_STAG_MASK) { + info->stag_invalid_set = true; + info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG); + } else { + info->stag_invalid_set = false; + } + info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN); + I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1); + pring = &qp->rq_ring; + } else { + if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) { + info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; + + info->op_type = (u8)RS_64(qword3, I40IWCQ_OP); + sw_wqe = qp->sq_base[wqe_idx].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + addl_frag_cnt = + (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT); + i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes); + + addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE); + I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes)); + } else { + do { + u8 op_type; + u32 tail; + + tail = qp->sq_ring.tail; + sw_wqe = qp->sq_base[tail].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE); + info->op_type = op_type; + addl_frag_cnt = (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT); + i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes); + addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE); + I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes)); + if (op_type != I40IWQP_OP_NOP) { + info->wr_id = qp->sq_wrtrk_array[tail].wrid; + info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; + break; + } + } while (1); + } + pring = &qp->sq_ring; + } + + ret_code = 0; + + if (!ret_code && + (info->comp_status == I40IW_COMPL_STATUS_FLUSHED)) + if (pring && (I40IW_RING_MORE_WORK(*pring))) + move_cq_head = false; + + if (move_cq_head) { + I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2); + + if (ret_code2 && !ret_code) + ret_code = ret_code2; + + if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0) + cq->polarity ^= 1; + + if (post_cq) { + I40IW_RING_MOVE_TAIL(cq->cq_ring); + set_64bit_val(cq->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); + } + } else { + if (info->is_srq) + return ret_code; + qword3 &= ~I40IW_CQ_WQEIDX_MASK; + qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX); + set_64bit_val(cqe, 24, qword3); + } + + return ret_code; +} + +/** + * i40iw_get_wqe_shift - get shift count for maximum wqe size + * @wqdepth: depth of wq required. + * @sge: Maximum Scatter Gather Elements wqe + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the wqe size based on sge. + * If sge, == 1, shift =0 (wqe_size of 32 bytes), for sge=2 and 3, shift =1 + * (64 bytes wqes) and 2 otherwise (128 bytes wqe). + */ +enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift) +{ + u32 size; + + *shift = 0; + if (sge > 1) + *shift = (sge < 4) ? 1 : 2; + + /* check if wqdepth is multiple of 2 or not */ + + if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1))) + return I40IW_ERR_INVALID_SIZE; + + size = wqdepth << *shift; /* multiple of 32 bytes count */ + if (size > I40IWQP_SW_MAX_WQSIZE) + return I40IW_ERR_INVALID_SIZE; + return 0; +} + +static struct i40iw_qp_uk_ops iw_qp_uk_ops = { + i40iw_qp_post_wr, + i40iw_qp_ring_push_db, + i40iw_rdma_write, + i40iw_rdma_read, + i40iw_send, + i40iw_inline_rdma_write, + i40iw_inline_send, + i40iw_stag_local_invalidate, + i40iw_mw_bind, + i40iw_post_receive, + i40iw_nop +}; + +static struct i40iw_cq_ops iw_cq_ops = { + i40iw_cq_request_notification, + i40iw_cq_poll_completion, + i40iw_cq_post_entries, + i40iw_clean_cq +}; + +static struct i40iw_device_uk_ops iw_device_uk_ops = { + i40iw_cq_uk_init, + i40iw_qp_uk_init, +}; + +/** + * i40iw_qp_uk_init - initialize shared qp + * @qp: hw qp (user and kernel) + * @info: qp initialization info + * + * initializes the vars used in both user and kernel mode. + * size of the wqe depends on numbers of max. fragements + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for sq and rq. If srq is used, + * then rq_base will point to one rq wqe only (not the whole + * array of wqes) + */ +enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp, + struct i40iw_qp_uk_init_info *info) +{ + enum i40iw_status_code ret_code = 0; + u32 sq_ring_size; + u8 sqshift, rqshift; + + if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) + return I40IW_ERR_INVALID_FRAG_COUNT; + + if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) + return I40IW_ERR_INVALID_FRAG_COUNT; + ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, &sqshift); + if (ret_code) + return ret_code; + + ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, &rqshift); + if (ret_code) + return ret_code; + + qp->sq_base = info->sq; + qp->rq_base = info->rq; + qp->shadow_area = info->shadow_area; + qp->sq_wrtrk_array = info->sq_wrtrk_array; + qp->rq_wrid_array = info->rq_wrid_array; + + qp->wqe_alloc_reg = info->wqe_alloc_reg; + qp->qp_id = info->qp_id; + + qp->sq_size = info->sq_size; + qp->push_db = info->push_db; + qp->push_wqe = info->push_wqe; + + qp->max_sq_frag_cnt = info->max_sq_frag_cnt; + sq_ring_size = qp->sq_size << sqshift; + + I40IW_RING_INIT(qp->sq_ring, sq_ring_size); + I40IW_RING_INIT(qp->initial_ring, sq_ring_size); + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + I40IW_RING_MOVE_TAIL(qp->sq_ring); + I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code); + qp->swqe_polarity = 1; + qp->swqe_polarity_deferred = 1; + qp->rwqe_polarity = 0; + + if (!qp->use_srq) { + qp->rq_size = info->rq_size; + qp->max_rq_frag_cnt = info->max_rq_frag_cnt; + qp->rq_wqe_size = rqshift; + I40IW_RING_INIT(qp->rq_ring, qp->rq_size); + qp->rq_wqe_size_multiplier = 4 << rqshift; + } + qp->ops = iw_qp_uk_ops; + + return ret_code; +} + +/** + * i40iw_cq_uk_init - initialize shared cq (user and kernel) + * @cq: hw cq + * @info: hw cq initialization info + */ +enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, + struct i40iw_cq_uk_init_info *info) +{ + if ((info->cq_size < I40IW_MIN_CQ_SIZE) || + (info->cq_size > I40IW_MAX_CQ_SIZE)) + return I40IW_ERR_INVALID_SIZE; + cq->cq_base = (struct i40iw_cqe *)info->cq_base; + cq->cq_id = info->cq_id; + cq->cq_size = info->cq_size; + cq->cqe_alloc_reg = info->cqe_alloc_reg; + cq->shadow_area = info->shadow_area; + cq->avoid_mem_cflct = info->avoid_mem_cflct; + + I40IW_RING_INIT(cq->cq_ring, cq->cq_size); + cq->polarity = 1; + cq->ops = iw_cq_ops; + + return 0; +} + +/** + * i40iw_device_init_uk - setup routines for iwarp shared device + * @dev: iwarp shared (user and kernel) + */ +void i40iw_device_init_uk(struct i40iw_dev_uk *dev) +{ + dev->ops_uk = iw_device_uk_ops; +} + +/** + * i40iw_clean_cq - clean cq entries + * @ queue completion context + * @cq: cq to clean + */ +void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq) +{ + u64 *cqe; + u64 qword3, comp_ctx; + u32 cq_head; + u8 polarity, temp; + + cq_head = cq->cq_ring.head; + temp = cq->polarity; + do { + if (cq->avoid_mem_cflct) + cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]); + else + cqe = (u64 *)&cq->cq_base[cq_head]; + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)RS_64(qword3, I40IW_CQ_VALID); + + if (polarity != temp) + break; + + get_64bit_val(cqe, 8, &comp_ctx); + if ((void *)(unsigned long)comp_ctx == queue) + set_64bit_val(cqe, 8, 0); + + cq_head = (cq_head + 1) % cq->cq_ring.size; + if (!cq_head) + temp ^= 1; + } while (true); +} + +/** + * i40iw_nop - send a nop + * @qp: hw qp ptr + * @wr_id: work request id + * @signaled: flag if signaled for completion + * @post_sq: flag to post sq + */ +enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, + u64 wr_id, + bool signaled, + bool post_sq) +{ + u64 header, *wqe; + u32 wqe_idx; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->sq_wrtrk_array[wqe_idx].wrid = wr_id; + qp->sq_wrtrk_array[wqe_idx].wr_len = 0; + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ + * @frag_cnt: number of fragments + * @wqe_size: size of sq wqe returned + */ +enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size) +{ + switch (frag_cnt) { + case 0: + case 1: + *wqe_size = I40IW_QP_WQE_MIN_SIZE; + break; + case 2: + case 3: + *wqe_size = 64; + break; + case 4: + case 5: + *wqe_size = 96; + break; + case 6: + case 7: + *wqe_size = 128; + break; + default: + return I40IW_ERR_INVALID_FRAG_COUNT; + } + + return 0; +} + +/** + * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ + * @frag_cnt: number of fragments + * @wqe_size: size of rq wqe returned + */ +enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size) +{ + switch (frag_cnt) { + case 0: + case 1: + *wqe_size = 32; + break; + case 2: + case 3: + *wqe_size = 64; + break; + case 4: + case 5: + case 6: + case 7: + *wqe_size = 128; + break; + default: + return I40IW_ERR_INVALID_FRAG_COUNT; + } + + return 0; +} + +/** + * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size + * @data_size: data size for inline + * @wqe_size: size of sq wqe returned + */ +enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, + u8 *wqe_size) +{ + if (data_size > I40IW_MAX_INLINE_DATA_SIZE) + return I40IW_ERR_INVALID_IMM_DATA_SIZE; + + if (data_size <= 16) + *wqe_size = I40IW_QP_WQE_MIN_SIZE; + else if (data_size <= 48) + *wqe_size = 64; + else if (data_size <= 80) + *wqe_size = 96; + else + *wqe_size = 128; + + return 0; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h new file mode 100644 index 000000000..5cd971bb8 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_user.h @@ -0,0 +1,442 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_USER_H +#define I40IW_USER_H + +enum i40iw_device_capabilities_const { + I40IW_WQE_SIZE = 4, + I40IW_CQP_WQE_SIZE = 8, + I40IW_CQE_SIZE = 4, + I40IW_EXTENDED_CQE_SIZE = 8, + I40IW_AEQE_SIZE = 2, + I40IW_CEQE_SIZE = 1, + I40IW_CQP_CTX_SIZE = 8, + I40IW_SHADOW_AREA_SIZE = 8, + I40IW_CEQ_MAX_COUNT = 256, + I40IW_QUERY_FPM_BUF_SIZE = 128, + I40IW_COMMIT_FPM_BUF_SIZE = 128, + I40IW_MIN_IW_QP_ID = 1, + I40IW_MAX_IW_QP_ID = 262143, + I40IW_MIN_CEQID = 0, + I40IW_MAX_CEQID = 256, + I40IW_MIN_CQID = 0, + I40IW_MAX_CQID = 131071, + I40IW_MIN_AEQ_ENTRIES = 1, + I40IW_MAX_AEQ_ENTRIES = 524287, + I40IW_MIN_CEQ_ENTRIES = 1, + I40IW_MAX_CEQ_ENTRIES = 131071, + I40IW_MIN_CQ_SIZE = 1, + I40IW_MAX_CQ_SIZE = 1048575, + I40IW_MAX_AEQ_ALLOCATE_COUNT = 255, + I40IW_DB_ID_ZERO = 0, + I40IW_MAX_WQ_FRAGMENT_COUNT = 6, + I40IW_MAX_SGE_RD = 1, + I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647, + I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647, + I40IW_MAX_PUSH_PAGE_COUNT = 4096, + I40IW_MAX_PE_ENABLED_VF_COUNT = 32, + I40IW_MAX_VF_FPM_ID = 47, + I40IW_MAX_VF_PER_PF = 127, + I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496, + I40IW_MAX_INLINE_DATA_SIZE = 112, + I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 112, + I40IW_MAX_IRD_SIZE = 32, + I40IW_QPCTX_ENCD_MAXIRD = 3, + I40IW_MAX_WQ_ENTRIES = 2048, + I40IW_MAX_ORD_SIZE = 32, + I40IW_Q2_BUFFER_SIZE = (248 + 100), + I40IW_QP_CTX_SIZE = 248 +}; + +#define i40iw_handle void * +#define i40iw_adapter_handle i40iw_handle +#define i40iw_qp_handle i40iw_handle +#define i40iw_cq_handle i40iw_handle +#define i40iw_srq_handle i40iw_handle +#define i40iw_pd_id i40iw_handle +#define i40iw_stag_handle i40iw_handle +#define i40iw_stag_index u32 +#define i40iw_stag u32 +#define i40iw_stag_key u8 + +#define i40iw_tagged_offset u64 +#define i40iw_access_privileges u32 +#define i40iw_physical_fragment u64 +#define i40iw_address_list u64 * + +#define I40IW_CREATE_STAG(index, key) (((index) << 8) + (key)) + +#define I40IW_STAG_KEY_FROM_STAG(stag) ((stag) && 0x000000FF) + +#define I40IW_STAG_INDEX_FROM_STAG(stag) (((stag) && 0xFFFFFF00) >> 8) + +struct i40iw_qp_uk; +struct i40iw_cq_uk; +struct i40iw_srq_uk; +struct i40iw_qp_uk_init_info; +struct i40iw_cq_uk_init_info; +struct i40iw_srq_uk_init_info; + +struct i40iw_sge { + i40iw_tagged_offset tag_off; + u32 len; + i40iw_stag stag; +}; + +#define i40iw_sgl struct i40iw_sge * + +struct i40iw_ring { + u32 head; + u32 tail; + u32 size; +}; + +struct i40iw_cqe { + u64 buf[I40IW_CQE_SIZE]; +}; + +struct i40iw_extended_cqe { + u64 buf[I40IW_EXTENDED_CQE_SIZE]; +}; + +struct i40iw_wqe { + u64 buf[I40IW_WQE_SIZE]; +}; + +struct i40iw_qp_uk_ops; + +enum i40iw_addressing_type { + I40IW_ADDR_TYPE_ZERO_BASED = 0, + I40IW_ADDR_TYPE_VA_BASED = 1, +}; + +#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01 +#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02 +#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04 +#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05 +#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08 +#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a +#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10 +#define I40IW_ACCESS_FLAGS_ALL 0x1F + +#define I40IW_OP_TYPE_RDMA_WRITE 0 +#define I40IW_OP_TYPE_RDMA_READ 1 +#define I40IW_OP_TYPE_SEND 3 +#define I40IW_OP_TYPE_SEND_INV 4 +#define I40IW_OP_TYPE_SEND_SOL 5 +#define I40IW_OP_TYPE_SEND_SOL_INV 6 +#define I40IW_OP_TYPE_REC 7 +#define I40IW_OP_TYPE_BIND_MW 8 +#define I40IW_OP_TYPE_FAST_REG_NSMR 9 +#define I40IW_OP_TYPE_INV_STAG 10 +#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11 +#define I40IW_OP_TYPE_NOP 12 + +enum i40iw_completion_status { + I40IW_COMPL_STATUS_SUCCESS = 0, + I40IW_COMPL_STATUS_FLUSHED, + I40IW_COMPL_STATUS_INVALID_WQE, + I40IW_COMPL_STATUS_QP_CATASTROPHIC, + I40IW_COMPL_STATUS_REMOTE_TERMINATION, + I40IW_COMPL_STATUS_INVALID_STAG, + I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION, + I40IW_COMPL_STATUS_ACCESS_VIOLATION, + I40IW_COMPL_STATUS_INVALID_PD_ID, + I40IW_COMPL_STATUS_WRAP_ERROR, + I40IW_COMPL_STATUS_STAG_INVALID_PDID, + I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD, + I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED, + I40IW_COMPL_STATUS_STAG_NOT_INVALID, + I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE, + I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY, + I40IW_COMPL_STATUS_INVALID_FBO, + I40IW_COMPL_STATUS_INVALID_LENGTH, + I40IW_COMPL_STATUS_INVALID_ACCESS, + I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG, + I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS, + I40IW_COMPL_STATUS_INVALID_REGION, + I40IW_COMPL_STATUS_INVALID_WINDOW, + I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH +}; + +enum i40iw_completion_notify { + IW_CQ_COMPL_EVENT = 0, + IW_CQ_COMPL_SOLICITED = 1 +}; + +struct i40iw_post_send { + i40iw_sgl sg_list; + u8 num_sges; +}; + +struct i40iw_post_inline_send { + void *data; + u32 len; +}; + +struct i40iw_post_send_w_inv { + i40iw_sgl sg_list; + u32 num_sges; + i40iw_stag remote_stag_to_inv; +}; + +struct i40iw_post_inline_send_w_inv { + void *data; + u32 len; + i40iw_stag remote_stag_to_inv; +}; + +struct i40iw_rdma_write { + i40iw_sgl lo_sg_list; + u8 num_lo_sges; + struct i40iw_sge rem_addr; +}; + +struct i40iw_inline_rdma_write { + void *data; + u32 len; + struct i40iw_sge rem_addr; +}; + +struct i40iw_rdma_read { + struct i40iw_sge lo_addr; + struct i40iw_sge rem_addr; +}; + +struct i40iw_bind_window { + i40iw_stag mr_stag; + u64 bind_length; + void *va; + enum i40iw_addressing_type addressing_type; + bool enable_reads; + bool enable_writes; + i40iw_stag mw_stag; +}; + +struct i40iw_inv_local_stag { + i40iw_stag target_stag; +}; + +struct i40iw_post_sq_info { + u64 wr_id; + u8 op_type; + bool signaled; + bool read_fence; + bool local_fence; + bool inline_data; + bool defer_flag; + union { + struct i40iw_post_send send; + struct i40iw_post_send send_w_sol; + struct i40iw_post_send_w_inv send_w_inv; + struct i40iw_post_send_w_inv send_w_sol_inv; + struct i40iw_rdma_write rdma_write; + struct i40iw_rdma_read rdma_read; + struct i40iw_rdma_read rdma_read_inv; + struct i40iw_bind_window bind_window; + struct i40iw_inv_local_stag inv_local_stag; + struct i40iw_inline_rdma_write inline_rdma_write; + struct i40iw_post_inline_send inline_send; + struct i40iw_post_inline_send inline_send_w_sol; + struct i40iw_post_inline_send_w_inv inline_send_w_inv; + struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv; + } op; +}; + +struct i40iw_post_rq_info { + u64 wr_id; + i40iw_sgl sg_list; + u32 num_sges; +}; + +struct i40iw_cq_poll_info { + u64 wr_id; + i40iw_qp_handle qp_handle; + u32 bytes_xfered; + u32 tcp_seq_num; + u32 qp_id; + i40iw_stag inv_stag; + enum i40iw_completion_status comp_status; + u16 major_err; + u16 minor_err; + u8 op_type; + bool stag_invalid_set; + bool push_dropped; + bool error; + bool is_srq; + bool solicited_event; +}; + +struct i40iw_qp_uk_ops { + void (*iw_qp_post_wr)(struct i40iw_qp_uk *); + void (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32); + enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool, bool); + enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, u32, bool); + enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, u32, bool); + enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *, + struct i40iw_post_rq_info *); + enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool); +}; + +struct i40iw_cq_ops { + void (*iw_cq_request_notification)(struct i40iw_cq_uk *, + enum i40iw_completion_notify); + enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *, + struct i40iw_cq_poll_info *, bool); + enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count); + void (*iw_cq_clean)(void *, struct i40iw_cq_uk *); +}; + +struct i40iw_dev_uk; + +struct i40iw_device_uk_ops { + enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *, + struct i40iw_cq_uk_init_info *); + enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *, + struct i40iw_qp_uk_init_info *); +}; + +struct i40iw_dev_uk { + struct i40iw_device_uk_ops ops_uk; +}; + +struct i40iw_sq_uk_wr_trk_info { + u64 wrid; + u64 wr_len; +}; + +struct i40iw_qp_quanta { + u64 elem[I40IW_WQE_SIZE]; +}; + +struct i40iw_qp_uk { + struct i40iw_qp_quanta *sq_base; + struct i40iw_qp_quanta *rq_base; + u32 __iomem *wqe_alloc_reg; + struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u64 *shadow_area; + u32 *push_db; + u64 *push_wqe; + struct i40iw_ring sq_ring; + struct i40iw_ring rq_ring; + struct i40iw_ring initial_ring; + u32 qp_id; + u32 sq_size; + u32 rq_size; + struct i40iw_qp_uk_ops ops; + bool use_srq; + u8 swqe_polarity; + u8 swqe_polarity_deferred; + u8 rwqe_polarity; + u8 rq_wqe_size; + u8 rq_wqe_size_multiplier; + u8 max_sq_frag_cnt; + u8 max_rq_frag_cnt; + bool deferred_flag; +}; + +struct i40iw_cq_uk { + struct i40iw_cqe *cq_base; + u32 __iomem *cqe_alloc_reg; + u64 *shadow_area; + u32 cq_id; + u32 cq_size; + struct i40iw_ring cq_ring; + u8 polarity; + bool avoid_mem_cflct; + + struct i40iw_cq_ops ops; +}; + +struct i40iw_qp_uk_init_info { + struct i40iw_qp_quanta *sq; + struct i40iw_qp_quanta *rq; + u32 __iomem *wqe_alloc_reg; + u64 *shadow_area; + struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 *push_db; + u64 *push_wqe; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u8 max_sq_frag_cnt; + u8 max_rq_frag_cnt; + +}; + +struct i40iw_cq_uk_init_info { + u32 __iomem *cqe_alloc_reg; + struct i40iw_cqe *cq_base; + u64 *shadow_area; + u32 cq_size; + u32 cq_id; + bool avoid_mem_cflct; +}; + +void i40iw_device_init_uk(struct i40iw_dev_uk *dev); + +void i40iw_qp_post_wr(struct i40iw_qp_uk *qp); +u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx, + u8 wqe_size); +u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx); +u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx); + +enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, + struct i40iw_cq_uk_init_info *info); +enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp, + struct i40iw_qp_uk_init_info *info); + +void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq); +enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id, + bool signaled, bool post_sq); +enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size); +enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size); +enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, + u8 *wqe_size); +enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift); +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c new file mode 100644 index 000000000..1ceec81bd --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -0,0 +1,1270 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/crc32.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/io.h> +#include <asm/irq.h> +#include <asm/byteorder.h> +#include <net/netevent.h> +#include <net/neighbour.h> +#include "i40iw.h" + +/** + * i40iw_arp_table - manage arp table + * @iwdev: iwarp device + * @ip_addr: ip address for device + * @mac_addr: mac address ptr + * @action: modify, delete or add + */ +int i40iw_arp_table(struct i40iw_device *iwdev, + __be32 *ip_addr, + bool ipv4, + u8 *mac_addr, + u32 action) +{ + int arp_index; + int err; + u32 ip[4]; + + if (ipv4) { + memset(ip, 0, sizeof(ip)); + ip[0] = *ip_addr; + } else { + memcpy(ip, ip_addr, sizeof(ip)); + } + + for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++) + if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0) + break; + switch (action) { + case I40IW_ARP_ADD: + if (arp_index != iwdev->arp_table_size) + return -1; + + arp_index = 0; + err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps, + iwdev->arp_table_size, + (u32 *)&arp_index, + &iwdev->next_arp_index); + + if (err) + return err; + + memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)); + ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr); + break; + case I40IW_ARP_RESOLVE: + if (arp_index == iwdev->arp_table_size) + return -1; + break; + case I40IW_ARP_DELETE: + if (arp_index == iwdev->arp_table_size) + return -1; + memset(iwdev->arp_table[arp_index].ip_addr, 0, + sizeof(iwdev->arp_table[arp_index].ip_addr)); + eth_zero_addr(iwdev->arp_table[arp_index].mac_addr); + i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index); + break; + default: + return -1; + } + return arp_index; +} + +/** + * i40iw_wr32 - write 32 bits to hw register + * @hw: hardware information including registers + * @reg: register offset + * @value: vvalue to write to register + */ +inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value) +{ + writel(value, hw->hw_addr + reg); +} + +/** + * i40iw_rd32 - read a 32 bit hw register + * @hw: hardware information including registers + * @reg: register offset + * + * Return value of register content + */ +inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +/** + * i40iw_inetaddr_event - system notifier for netdev events + * @notfier: not used + * @event: event for notifier + * @ptr: if address + */ +int i40iw_inetaddr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *event_netdev = ifa->ifa_dev->dev; + struct net_device *netdev; + struct net_device *upper_dev; + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + __be32 local_ipaddr; + + hdl = i40iw_find_netdev(event_netdev); + if (!hdl) + return NOTIFY_DONE; + + iwdev = &hdl->device; + netdev = iwdev->ldev->netdev; + upper_dev = netdev_master_upper_dev_get(netdev); + if (netdev != event_netdev) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_DOWN: + if (upper_dev) + local_ipaddr = + ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address; + else + local_ipaddr = ifa->ifa_address; + local_ipaddr = ntohl(local_ipaddr); + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + &local_ipaddr, + true, + I40IW_ARP_DELETE); + return NOTIFY_OK; + case NETDEV_UP: + if (upper_dev) + local_ipaddr = + ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address; + else + local_ipaddr = ifa->ifa_address; + local_ipaddr = ntohl(local_ipaddr); + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + &local_ipaddr, + true, + I40IW_ARP_ADD); + break; + case NETDEV_CHANGEADDR: + /* Add the address to the IP table */ + if (upper_dev) + local_ipaddr = + ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address; + else + local_ipaddr = ifa->ifa_address; + + local_ipaddr = ntohl(local_ipaddr); + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + &local_ipaddr, + true, + I40IW_ARP_ADD); + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_inet6addr_event - system notifier for ipv6 netdev events + * @notfier: not used + * @event: event for notifier + * @ptr: if address + */ +int i40iw_inet6addr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct net_device *event_netdev = ifa->idev->dev; + struct net_device *netdev; + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + __be32 local_ipaddr6[4]; + + hdl = i40iw_find_netdev(event_netdev); + if (!hdl) + return NOTIFY_DONE; + + iwdev = &hdl->device; + netdev = iwdev->ldev->netdev; + if (netdev != event_netdev) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_DOWN: + i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + local_ipaddr6, + false, + I40IW_ARP_DELETE); + return NOTIFY_OK; + case NETDEV_UP: + /* Fall through */ + case NETDEV_CHANGEADDR: + i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + local_ipaddr6, + false, + I40IW_ARP_ADD); + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_net_event - system notifier for net events + * @notfier: not used + * @event: event for notifier + * @ptr: neighbor + */ +int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) +{ + struct neighbour *neigh = ptr; + struct i40iw_device *iwdev; + struct i40iw_handler *iwhdl; + __be32 *p; + u32 local_ipaddr[4]; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev); + if (!iwhdl) + return NOTIFY_DONE; + iwdev = &iwhdl->device; + p = (__be32 *)neigh->primary_key; + i40iw_copy_ip_ntohl(local_ipaddr, p); + if (neigh->nud_state & NUD_VALID) { + i40iw_manage_arp_cache(iwdev, + neigh->ha, + local_ipaddr, + false, + I40IW_ARP_ADD); + + } else { + i40iw_manage_arp_cache(iwdev, + neigh->ha, + local_ipaddr, + false, + I40IW_ARP_DELETE); + } + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_get_cqp_request - get cqp struct + * @cqp: device cqp ptr + * @wait: cqp to be used in wait mode + */ +struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait) +{ + struct i40iw_cqp_request *cqp_request = NULL; + unsigned long flags; + + spin_lock_irqsave(&cqp->req_lock, flags); + if (!list_empty(&cqp->cqp_avail_reqs)) { + cqp_request = list_entry(cqp->cqp_avail_reqs.next, + struct i40iw_cqp_request, list); + list_del_init(&cqp_request->list); + } + spin_unlock_irqrestore(&cqp->req_lock, flags); + if (!cqp_request) { + cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); + if (cqp_request) { + cqp_request->dynamic = true; + INIT_LIST_HEAD(&cqp_request->list); + init_waitqueue_head(&cqp_request->waitq); + } + } + if (!cqp_request) { + i40iw_pr_err("CQP Request Fail: No Memory"); + return NULL; + } + + if (wait) { + atomic_set(&cqp_request->refcount, 2); + cqp_request->waiting = true; + } else { + atomic_set(&cqp_request->refcount, 1); + } + return cqp_request; +} + +/** + * i40iw_free_cqp_request - free cqp request + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) +{ + unsigned long flags; + + if (cqp_request->dynamic) { + kfree(cqp_request); + } else { + cqp_request->request_done = false; + cqp_request->callback_fcn = NULL; + cqp_request->waiting = false; + + spin_lock_irqsave(&cqp->req_lock, flags); + list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); + spin_unlock_irqrestore(&cqp->req_lock, flags); + } +} + +/** + * i40iw_put_cqp_request - dec ref count and free if 0 + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void i40iw_put_cqp_request(struct i40iw_cqp *cqp, + struct i40iw_cqp_request *cqp_request) +{ + if (atomic_dec_and_test(&cqp_request->refcount)) + i40iw_free_cqp_request(cqp, cqp_request); +} + +/** + * i40iw_free_qp - callback after destroy cqp completes + * @cqp_request: cqp request for destroy qp + * @num: not used + */ +static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num) +{ + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param; + struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; + struct i40iw_device *iwdev; + u32 qp_num = iwqp->ibqp.qp_num; + + iwdev = iwqp->iwdev; + + i40iw_rem_pdusecount(iwqp->iwpd, iwdev); + i40iw_free_qp_resources(iwdev, iwqp, qp_num); +} + +/** + * i40iw_wait_event - wait for completion + * @iwdev: iwarp device + * @cqp_request: cqp request to wait + */ +static int i40iw_wait_event(struct i40iw_device *iwdev, + struct i40iw_cqp_request *cqp_request) +{ + struct cqp_commands_info *info = &cqp_request->info; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + bool cqp_error = false; + int err_code = 0; + int timeout_ret = 0; + + timeout_ret = wait_event_timeout(cqp_request->waitq, + cqp_request->request_done, + I40IW_EVENT_TIMEOUT); + if (!timeout_ret) { + i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n", + info->cqp_cmd, timeout_ret); + err_code = -ETIME; + i40iw_request_reset(iwdev); + goto done; + } + cqp_error = cqp_request->compl_info.error; + if (cqp_error) { + i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n", + info->cqp_cmd, cqp_request->compl_info.maj_err_code, + cqp_request->compl_info.min_err_code); + err_code = -EPROTO; + goto done; + } +done: + i40iw_put_cqp_request(iwcqp, cqp_request); + return err_code; +} + +/** + * i40iw_handle_cqp_op - process cqp command + * @iwdev: iwarp device + * @cqp_request: cqp request to process + */ +enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev, + struct i40iw_cqp_request + *cqp_request) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + struct cqp_commands_info *info = &cqp_request->info; + int err_code = 0; + + status = i40iw_process_cqp_cmd(dev, info); + if (status) { + i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd); + i40iw_free_cqp_request(&iwdev->cqp, cqp_request); + return status; + } + if (cqp_request->waiting) + err_code = i40iw_wait_event(iwdev, cqp_request); + if (err_code) + status = I40IW_ERR_CQP_COMPL_ERROR; + return status; +} + +/** + * i40iw_add_pdusecount - add pd refcount + * @iwpd: pd for refcount + */ +void i40iw_add_pdusecount(struct i40iw_pd *iwpd) +{ + atomic_inc(&iwpd->usecount); +} + +/** + * i40iw_rem_pdusecount - decrement refcount for pd and free if 0 + * @iwpd: pd for refcount + * @iwdev: iwarp device + */ +void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) +{ + if (!atomic_dec_and_test(&iwpd->usecount)) + return; + i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id); + kfree(iwpd); +} + +/** + * i40iw_add_ref - add refcount for qp + * @ibqp: iqarp qp + */ +void i40iw_add_ref(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp; + + atomic_inc(&iwqp->refcount); +} + +/** + * i40iw_rem_ref - rem refcount for qp and free if 0 + * @ibqp: iqarp qp + */ +void i40iw_rem_ref(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp; + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev; + u32 qp_num; + + iwqp = to_iwqp(ibqp); + if (!atomic_dec_and_test(&iwqp->refcount)) + return; + + iwdev = iwqp->iwdev; + qp_num = iwqp->ibqp.qp_num; + iwdev->qp_table[qp_num] = NULL; + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_request->callback_fcn = i40iw_free_qp; + cqp_request->param = (void *)&iwqp->sc_qp; + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_QP_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.qp_destroy.remove_hash_idx = true; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Destroy QP fail"); +} + +/** + * i40iw_get_qp - get qp address + * @device: iwarp device + * @qpn: qp number + */ +struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn) +{ + struct i40iw_device *iwdev = to_iwdev(device); + + if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp)) + return NULL; + + return &iwdev->qp_table[qpn]->ibqp; +} + +/** + * i40iw_debug_buf - print debug msg and buffer is mask set + * @dev: hardware control device structure + * @mask: mask to compare if to print debug buffer + * @buf: points buffer addr + * @size: saize of buffer to print + */ +void i40iw_debug_buf(struct i40iw_sc_dev *dev, + enum i40iw_debug_flag mask, + char *desc, + u64 *buf, + u32 size) +{ + u32 i; + + if (!(dev->debug_mask & mask)) + return; + i40iw_debug(dev, mask, "%s\n", desc); + i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf, + (unsigned long long)virt_to_phys(buf)); + + for (i = 0; i < size; i += 8) + i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]); +} + +/** + * i40iw_get_hw_addr - return hw addr + * @par: points to shared dev + */ +u8 __iomem *i40iw_get_hw_addr(void *par) +{ + struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par; + + return dev->hw->hw_addr; +} + +/** + * i40iw_remove_head - return head entry and remove from list + * @list: list for entry + */ +void *i40iw_remove_head(struct list_head *list) +{ + struct list_head *entry; + + if (list_empty(list)) + return NULL; + + entry = (void *)list->next; + list_del(entry); + return (void *)entry; +} + +/** + * i40iw_allocate_dma_mem - Memory alloc helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + */ +enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, + struct i40iw_dma_mem *mem, + u64 size, + u32 alignment) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + + if (!mem) + return I40IW_ERR_PARAM; + mem->size = ALIGN(size, alignment); + mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, + (dma_addr_t *)&mem->pa, GFP_KERNEL); + if (!mem->va) + return I40IW_ERR_NO_MEMORY; + return 0; +} + +/** + * i40iw_free_dma_mem - Memory free helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + */ +void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + + if (!mem || !mem->va) + return; + + dma_free_coherent(&pcidev->dev, mem->size, + mem->va, (dma_addr_t)mem->pa); + mem->va = NULL; +} + +/** + * i40iw_allocate_virt_mem - virtual memory alloc helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + */ +enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem, + u32 size) +{ + if (!mem) + return I40IW_ERR_PARAM; + + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (mem->va) + return 0; + else + return I40IW_ERR_NO_MEMORY; +} + +/** + * i40iw_free_virt_mem - virtual memory free helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + */ +enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem) +{ + if (!mem) + return I40IW_ERR_PARAM; + kfree(mem->va); + mem->va = NULL; + return 0; +} + +/** + * i40iw_cqp_sds_cmd - create cqp command for sd + * @dev: hardware control device structure + * @sd_info: information for sd cqp + * + */ +enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *sdinfo) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, + sizeof(cqp_info->in.u.update_pe_sds.info)); + cqp_info->cqp_cmd = OP_UPDATE_PE_SDS; + cqp_info->post_sq = 1; + cqp_info->in.u.update_pe_sds.dev = dev; + cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Update SD's fail"); + return status; +} + +/** + * i40iw_term_modify_qp - modify qp for term message + * @qp: hardware control qp + * @next_state: qp's next state + * @term: terminate code + * @term_len: length + */ +void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len) +{ + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; + i40iw_next_iw_state(iwqp, next_state, 0, term, term_len); +}; + +/** + * i40iw_terminate_done - after terminate is completed + * @qp: hardware control qp + * @timeout_occurred: indicates if terminate timer expired + */ +void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred) +{ + struct i40iw_qp *iwqp; + u32 next_iwarp_state = I40IW_QP_STATE_ERROR; + u8 hte = 0; + bool first_time; + unsigned long flags; + + iwqp = (struct i40iw_qp *)qp->back_qp; + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->hte_added) { + iwqp->hte_added = 0; + hte = 1; + } + first_time = !(qp->term_flags & I40IW_TERM_DONE); + qp->term_flags |= I40IW_TERM_DONE; + spin_unlock_irqrestore(&iwqp->lock, flags); + if (first_time) { + if (!timeout_occurred) + i40iw_terminate_del_timer(qp); + else + next_iwarp_state = I40IW_QP_STATE_CLOSING; + + i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0); + i40iw_cm_disconn(iwqp); + } +} + +/** + * i40iw_terminate_imeout - timeout happened + * @context: points to iwarp qp + */ +static void i40iw_terminate_timeout(unsigned long context) +{ + struct i40iw_qp *iwqp = (struct i40iw_qp *)context; + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp; + + i40iw_terminate_done(qp, 1); +} + +/** + * i40iw_terminate_start_timer - start terminate timeout + * @qp: hardware control qp + */ +void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp) +{ + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; + init_timer(&iwqp->terminate_timer); + iwqp->terminate_timer.function = i40iw_terminate_timeout; + iwqp->terminate_timer.expires = jiffies + HZ; + iwqp->terminate_timer.data = (unsigned long)iwqp; + add_timer(&iwqp->terminate_timer); +} + +/** + * i40iw_terminate_del_timer - delete terminate timeout + * @qp: hardware control qp + */ +void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp) +{ + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; + del_timer(&iwqp->terminate_timer); +} + +/** + * i40iw_cqp_generic_worker - generic worker for cqp + * @work: work pointer + */ +static void i40iw_cqp_generic_worker(struct work_struct *work) +{ + struct i40iw_virtchnl_work_info *work_info = + &((struct virtchnl_work *)work)->work_info; + + if (work_info->worker_vf_dev) + work_info->callback_fcn(work_info->worker_vf_dev); +} + +/** + * i40iw_cqp_spawn_worker - spawn worket thread + * @iwdev: device struct pointer + * @work_info: work request info + * @iw_vf_idx: virtual function index + */ +void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_work_info *work_info, + u32 iw_vf_idx) +{ + struct virtchnl_work *work; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + work = &iwdev->virtchnl_w[iw_vf_idx]; + memcpy(&work->work_info, work_info, sizeof(*work_info)); + INIT_WORK(&work->work, i40iw_cqp_generic_worker); + queue_work(iwdev->virtchnl_wq, &work->work); +} + +/** + * i40iw_cqp_manage_hmc_fcn_worker - + * @work: work pointer for hmc info + */ +static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work) +{ + struct i40iw_cqp_request *cqp_request = + ((struct virtchnl_work *)work)->cqp_request; + struct i40iw_ccq_cqe_info ccq_cqe_info; + struct i40iw_hmc_fcn_info *hmcfcninfo = + &cqp_request->info.in.u.manage_hmc_pm.info; + struct i40iw_device *iwdev = + (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev; + + ccq_cqe_info.cqp = NULL; + ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code; + ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code; + ccq_cqe_info.op_code = cqp_request->compl_info.op_code; + ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val; + ccq_cqe_info.scratch = 0; + ccq_cqe_info.error = cqp_request->compl_info.error; + hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev, + hmcfcninfo->cqp_callback_param, &ccq_cqe_info); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); +} + +/** + * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion + * @cqp_request: cqp request info struct for hmc fun + * @unused: unused param of callback + */ +static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request, + u32 unused) +{ + struct virtchnl_work *work; + struct i40iw_hmc_fcn_info *hmcfcninfo = + &cqp_request->info.in.u.manage_hmc_pm.info; + struct i40iw_device *iwdev = + (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev-> + back_dev; + + if (hmcfcninfo && hmcfcninfo->callback_fcn) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__); + atomic_inc(&cqp_request->refcount); + work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx]; + work->cqp_request = cqp_request; + INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker); + queue_work(iwdev->virtchnl_wq, &work->work); + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__); + } else { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__); + } +} + +/** + * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc + * @dev: hardware control device structure + * @hmcfcninfo: info for hmc + */ +enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev, + struct i40iw_hmc_fcn_info *hmcfcninfo) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__); + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback; + cqp_request->param = hmcfcninfo; + memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo, + sizeof(*hmcfcninfo)); + cqp_info->in.u.manage_hmc_pm.dev = dev; + cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE; + cqp_info->post_sq = 1; + cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage HMC fail"); + return status; +} + +/** + * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm + * @iwdev: function device struct + * @values_mem: buffer for fpm + * @hmc_fn_id: function id for fpm + */ +enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + cqp_request->param = NULL; + cqp_info->in.u.query_fpm_values.cqp = dev->cqp; + cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa; + cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va; + cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id; + cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES; + cqp_info->post_sq = 1; + cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Query FPM fail"); + return status; +} + +/** + * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw + * @dev: hardware control device structure + * @values_mem: buffer with fpm values + * @hmc_fn_id: function id for fpm + */ +enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + cqp_request->param = NULL; + cqp_info->in.u.commit_fpm_values.cqp = dev->cqp; + cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa; + cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va; + cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id; + cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES; + cqp_info->post_sq = 1; + cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Commit FPM fail"); + return status; +} + +/** + * i40iw_vf_wait_vchnl_resp - wait for channel msg + * @iwdev: function's device struct + */ +enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev) +{ + struct i40iw_device *iwdev = dev->back_dev; + enum i40iw_status_code err_code = 0; + int timeout_ret; + + i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n", + __func__, __LINE__, dev, iwdev); + atomic_add(2, &iwdev->vchnl_msgs); + timeout_ret = wait_event_timeout(iwdev->vchnl_waitq, + (atomic_read(&iwdev->vchnl_msgs) == 1), + I40IW_VCHNL_EVENT_TIMEOUT); + atomic_dec(&iwdev->vchnl_msgs); + if (!timeout_ret) { + i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret); + err_code = I40IW_ERR_TIMEOUT; + } + return err_code; +} + +/** + * i40iw_ieq_mpa_crc_ae - generate AE for crc error + * @dev: hardware control device structure + * @qp: hardware control qp + */ +void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) +{ + struct i40iw_qp_flush_info info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__); + memset(&info, 0, sizeof(info)); + info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR; + info.generate_ae = true; + info.ae_source = 0x3; + (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false); +} + +/** + * i40iw_init_hash_desc - initialize hash for crc calculation + * @desc: cryption type + */ +enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc) +{ + struct crypto_shash *tfm; + struct shash_desc *tdesc; + + tfm = crypto_alloc_shash("crc32c", 0, 0); + if (IS_ERR(tfm)) + return I40IW_ERR_MPA_CRC; + + tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!tdesc) { + crypto_free_shash(tfm); + return I40IW_ERR_MPA_CRC; + } + tdesc->tfm = tfm; + *desc = tdesc; + + return 0; +} + +/** + * i40iw_free_hash_desc - free hash desc + * @desc: to be freed + */ +void i40iw_free_hash_desc(struct shash_desc *desc) +{ + if (desc) { + crypto_free_shash(desc->tfm); + kfree(desc); + } +} + +/** + * i40iw_alloc_query_fpm_buf - allocate buffer for fpm + * @dev: hardware control device structure + * @mem: buffer ptr for fpm to be allocated + * @return: memory allocation status + */ +enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *mem) +{ + enum i40iw_status_code status; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE, + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); + return status; +} + +/** + * i40iw_ieq_check_mpacrc - check if mpa crc is OK + * @desc: desc for hash + * @addr: address of buffer for crc + * @length: length of buffer + * @value: value to be compared + */ +enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, + void *addr, + u32 length, + u32 value) +{ + u32 crc = 0; + int ret; + enum i40iw_status_code ret_code = 0; + + crypto_shash_init(desc); + ret = crypto_shash_update(desc, addr, length); + if (!ret) + crypto_shash_final(desc, (u8 *)&crc); + if (crc != value) { + i40iw_pr_err("mpa crc check fail\n"); + ret_code = I40IW_ERR_MPA_CRC; + } + return ret_code; +} + +/** + * i40iw_ieq_get_qp - get qp based on quad in puda buffer + * @dev: hardware control device structure + * @buf: receive puda buffer on exception q + */ +struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_qp *iwqp; + struct i40iw_cm_node *cm_node; + u32 loc_addr[4], rem_addr[4]; + u16 loc_port, rem_port; + struct ipv6hdr *ip6h; + struct iphdr *iph = (struct iphdr *)buf->iph; + struct tcphdr *tcph = (struct tcphdr *)buf->tcph; + + if (iph->version == 4) { + memset(loc_addr, 0, sizeof(loc_addr)); + loc_addr[0] = ntohl(iph->daddr); + memset(rem_addr, 0, sizeof(rem_addr)); + rem_addr[0] = ntohl(iph->saddr); + } else { + ip6h = (struct ipv6hdr *)buf->iph; + i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32); + i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32); + } + loc_port = ntohs(tcph->dest); + rem_port = ntohs(tcph->source); + + cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, + loc_addr, false); + if (!cm_node) + return NULL; + iwqp = cm_node->iwqp; + return &iwqp->sc_qp; +} + +/** + * i40iw_ieq_update_tcpip_info - update tcpip in the buffer + * @buf: puda to update + * @length: length of buffer + * @seqnum: seq number for tcp + */ +void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum) +{ + struct tcphdr *tcph; + struct iphdr *iph; + u16 iphlen; + u16 packetsize; + u8 *addr = (u8 *)buf->mem.va; + + iphlen = (buf->ipv4) ? 20 : 40; + iph = (struct iphdr *)(addr + buf->maclen); + tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); + packetsize = length + buf->tcphlen + iphlen; + + iph->tot_len = htons(packetsize); + tcph->seq = htonl(seqnum); +} + +/** + * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer + * @info: to get information + * @buf: puda buffer + */ +enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, + struct i40iw_puda_buf *buf) +{ + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct tcphdr *tcph; + u16 iphlen; + u16 pkt_len; + u8 *mem = (u8 *)buf->mem.va; + struct ethhdr *ethh = (struct ethhdr *)buf->mem.va; + + if (ethh->h_proto == htons(0x8100)) { + info->vlan_valid = true; + buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK; + } + buf->maclen = (info->vlan_valid) ? 18 : 14; + iphlen = (info->l3proto) ? 40 : 20; + buf->ipv4 = (info->l3proto) ? false : true; + buf->iph = mem + buf->maclen; + iph = (struct iphdr *)buf->iph; + + buf->tcph = buf->iph + iphlen; + tcph = (struct tcphdr *)buf->tcph; + + if (buf->ipv4) { + pkt_len = ntohs(iph->tot_len); + } else { + ip6h = (struct ipv6hdr *)buf->iph; + pkt_len = ntohs(ip6h->payload_len) + iphlen; + } + + buf->totallen = pkt_len + buf->maclen; + + if (info->payload_len < buf->totallen - 4) { + i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n", + info->payload_len, buf->totallen); + return I40IW_ERR_INVALID_SIZE; + } + + buf->tcphlen = (tcph->doff) << 2; + buf->datalen = pkt_len - iphlen - buf->tcphlen; + buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL; + buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; + buf->seqnum = ntohl(tcph->seq); + return 0; +} + +/** + * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats + * @dev: hardware control device structure + */ +static void i40iw_hw_stats_timeout(unsigned long dev) +{ + struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev; + struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat; + struct i40iw_dev_pestat *vf_devstat = NULL; + u16 iw_vf_idx; + unsigned long flags; + + /*PF*/ + pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats); + for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { + spin_lock_irqsave(&pf_devstat->stats_lock, flags); + if (pf_dev->vf_dev[iw_vf_idx]) { + if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) { + vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat; + vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats); + } + } + spin_unlock_irqrestore(&pf_devstat->stats_lock, flags); + } + + mod_timer(&pf_devstat->stats_timer, + jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); +} + +/** + * i40iw_hw_stats_start_timer - Start periodic stats timer + * @dev: hardware control device structure + */ +void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev) +{ + struct i40iw_dev_pestat *devstat = &dev->dev_pestat; + + init_timer(&devstat->stats_timer); + devstat->stats_timer.function = i40iw_hw_stats_timeout; + devstat->stats_timer.data = (unsigned long)dev; + mod_timer(&devstat->stats_timer, + jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); +} + +/** + * i40iw_hw_stats_del_timer - Delete periodic stats timer + * @dev: hardware control device structure + */ +void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev) +{ + struct i40iw_dev_pestat *devstat = &dev->dev_pestat; + + del_timer_sync(&devstat->stats_timer); +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c new file mode 100644 index 000000000..1fe3b84a0 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -0,0 +1,2437 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/random.h> +#include <linux/highmem.h> +#include <linux/time.h> +#include <asm/byteorder.h> +#include <net/ip.h> +#include <rdma/ib_verbs.h> +#include <rdma/iw_cm.h> +#include <rdma/ib_user_verbs.h> +#include <rdma/ib_umem.h> +#include "i40iw.h" + +/** + * i40iw_query_device - get device attributes + * @ibdev: device pointer from stack + * @props: returning device attributes + * @udata: user data + */ +static int i40iw_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + + if (udata->inlen || udata->outlen) + return -EINVAL; + memset(props, 0, sizeof(*props)); + ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + props->fw_ver = I40IW_FW_VERSION; + props->device_cap_flags = iwdev->device_cap_flags; + props->vendor_id = iwdev->vendor_id; + props->vendor_part_id = iwdev->vendor_part_id; + props->hw_ver = (u32)iwdev->sc_dev.hw_rev; + props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE; + props->max_qp = iwdev->max_qp; + props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1; + props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + props->max_cq = iwdev->max_cq; + props->max_cqe = iwdev->max_cqe; + props->max_mr = iwdev->max_mr; + props->max_pd = iwdev->max_pd; + props->max_sge_rd = 1; + props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE; + props->max_qp_init_rd_atom = props->max_qp_rd_atom; + props->atomic_cap = IB_ATOMIC_NONE; + props->max_map_per_fmr = 1; + return 0; +} + +/** + * i40iw_query_port - get port attrubutes + * @ibdev: device pointer from stack + * @port: port number for query + * @props: returning device attributes + */ +static int i40iw_query_port(struct ib_device *ibdev, + u8 port, + struct ib_port_attr *props) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct net_device *netdev = iwdev->netdev; + + memset(props, 0, sizeof(*props)); + + props->max_mtu = IB_MTU_4096; + if (netdev->mtu >= 4096) + props->active_mtu = IB_MTU_4096; + else if (netdev->mtu >= 2048) + props->active_mtu = IB_MTU_2048; + else if (netdev->mtu >= 1024) + props->active_mtu = IB_MTU_1024; + else if (netdev->mtu >= 512) + props->active_mtu = IB_MTU_512; + else + props->active_mtu = IB_MTU_256; + + props->lid = 1; + if (netif_carrier_ok(iwdev->netdev)) + props->state = IB_PORT_ACTIVE; + else + props->state = IB_PORT_DOWN; + props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | + IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; + props->gid_tbl_len = 1; + props->pkey_tbl_len = 1; + props->active_width = IB_WIDTH_4X; + props->active_speed = 1; + props->max_msg_sz = 0x80000000; + return 0; +} + +/** + * i40iw_alloc_ucontext - Allocate the user context data structure + * @ibdev: device pointer from stack + * @udata: user data + * + * This keeps track of all objects associated with a particular + * user-mode client. + */ +static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_alloc_ucontext_req req; + struct i40iw_alloc_ucontext_resp uresp; + struct i40iw_ucontext *ucontext; + + if (ib_copy_from_udata(&req, udata, sizeof(req))) + return ERR_PTR(-EINVAL); + + if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) { + i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n", + req.userspace_ver, I40IW_ABI_USERSPACE_VER); + return ERR_PTR(-EINVAL); + } + + memset(&uresp, 0, sizeof(uresp)); + uresp.max_qps = iwdev->max_qp; + uresp.max_pds = iwdev->max_pd; + uresp.wq_size = iwdev->max_qp_wr * 2; + uresp.kernel_ver = I40IW_ABI_KERNEL_VER; + + ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); + if (!ucontext) + return ERR_PTR(-ENOMEM); + + ucontext->iwdev = iwdev; + + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { + kfree(ucontext); + return ERR_PTR(-EFAULT); + } + + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + + return &ucontext->ibucontext; +} + +/** + * i40iw_dealloc_ucontext - deallocate the user context data structure + * @context: user context created during alloc + */ +static int i40iw_dealloc_ucontext(struct ib_ucontext *context) +{ + struct i40iw_ucontext *ucontext = to_ucontext(context); + unsigned long flags; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + if (!list_empty(&ucontext->cq_reg_mem_list)) { + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + return -EBUSY; + } + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + if (!list_empty(&ucontext->qp_reg_mem_list)) { + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + return -EBUSY; + } + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + + kfree(ucontext); + return 0; +} + +/** + * i40iw_mmap - user memory map + * @context: context created during alloc + * @vma: kernel info for user memory map + */ +static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct i40iw_ucontext *ucontext; + u64 db_addr_offset; + u64 push_offset; + + ucontext = to_ucontext(context); + if (ucontext->iwdev->sc_dev.is_pf) { + db_addr_offset = I40IW_DB_ADDR_OFFSET; + push_offset = I40IW_PUSH_OFFSET; + if (vma->vm_pgoff) + vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1; + } else { + db_addr_offset = I40IW_VF_DB_ADDR_OFFSET; + push_offset = I40IW_VF_PUSH_OFFSET; + if (vma->vm_pgoff) + vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1; + } + + vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT; + + if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) { + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_private_data = ucontext; + } else { + if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + else + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + } + + if (io_remap_pfn_range(vma, vma->vm_start, + vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT), + PAGE_SIZE, vma->vm_page_prot)) + return -EAGAIN; + + return 0; +} + +/** + * i40iw_alloc_push_page - allocate a push page for qp + * @iwdev: iwarp device + * @qp: hardware control qp + */ +static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp) +{ + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + + if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX) + return; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return; + + atomic_inc(&cqp_request->refcount); + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE; + cqp_info->post_sq = 1; + + cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle; + cqp_info->in.u.manage_push_page.info.free_page = 0; + cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + qp->push_idx = cqp_request->compl_info.op_ret_val; + else + i40iw_pr_err("CQP-OP Push page fail"); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); +} + +/** + * i40iw_dealloc_push_page - free a push page for qp + * @iwdev: iwarp device + * @qp: hardware control qp + */ +static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp) +{ + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + + if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) + return; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE; + cqp_info->post_sq = 1; + + cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx; + cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle; + cqp_info->in.u.manage_push_page.info.free_page = 1; + cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; + else + i40iw_pr_err("CQP-OP Push page fail"); +} + +/** + * i40iw_alloc_pd - allocate protection domain + * @ibdev: device pointer from stack + * @context: user context created during alloc + * @udata: user data + */ +static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct i40iw_pd *iwpd; + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_alloc_pd_resp uresp; + struct i40iw_sc_pd *sc_pd; + u32 pd_id = 0; + int err; + + err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds, + iwdev->max_pd, &pd_id, &iwdev->next_pd); + if (err) { + i40iw_pr_err("alloc resource failed\n"); + return ERR_PTR(err); + } + + iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); + if (!iwpd) { + err = -ENOMEM; + goto free_res; + } + + sc_pd = &iwpd->sc_pd; + dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id); + + if (context) { + memset(&uresp, 0, sizeof(uresp)); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { + err = -EFAULT; + goto error; + } + } + + i40iw_add_pdusecount(iwpd); + return &iwpd->ibpd; +error: + kfree(iwpd); +free_res: + i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id); + return ERR_PTR(err); +} + +/** + * i40iw_dealloc_pd - deallocate pd + * @ibpd: ptr of pd to be deallocated + */ +static int i40iw_dealloc_pd(struct ib_pd *ibpd) +{ + struct i40iw_pd *iwpd = to_iwpd(ibpd); + struct i40iw_device *iwdev = to_iwdev(ibpd->device); + + i40iw_rem_pdusecount(iwpd, iwdev); + return 0; +} + +/** + * i40iw_qp_roundup - return round up qp ring size + * @wr_ring_size: ring size to round up + */ +static int i40iw_qp_roundup(u32 wr_ring_size) +{ + int scount = 1; + + if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE) + wr_ring_size = I40IWQP_SW_MIN_WQSIZE; + + for (wr_ring_size--; scount <= 16; scount *= 2) + wr_ring_size |= wr_ring_size >> scount; + return ++wr_ring_size; +} + +/** + * i40iw_get_pbl - Retrieve pbl from a list given a virtual + * address + * @va: user virtual address + * @pbl_list: pbl list to search in (QP's or CQ's) + */ +static struct i40iw_pbl *i40iw_get_pbl(unsigned long va, + struct list_head *pbl_list) +{ + struct i40iw_pbl *iwpbl; + + list_for_each_entry(iwpbl, pbl_list, list) { + if (iwpbl->user_base == va) { + list_del(&iwpbl->list); + return iwpbl; + } + } + return NULL; +} + +/** + * i40iw_free_qp_resources - free up memory resources for qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @qp_num: qp number assigned + */ +void i40iw_free_qp_resources(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + u32 qp_num) +{ + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); + if (qp_num) + i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); + kfree(iwqp->kqp.wrid_mem); + iwqp->kqp.wrid_mem = NULL; + kfree(iwqp->allocated_buffer); + iwqp->allocated_buffer = NULL; +} + +/** + * i40iw_clean_cqes - clean cq entries for qp + * @iwqp: qp ptr (user or kernel) + * @iwcq: cq ptr + */ +static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq) +{ + struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; + + ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq); +} + +/** + * i40iw_destroy_qp - destroy qp + * @ibqp: qp's ib pointer also to get to device's qp address + */ +static int i40iw_destroy_qp(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + + iwqp->destroyed = 1; + + if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS) + i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0); + + if (!iwqp->user_mode) { + if (iwqp->iwscq) { + i40iw_clean_cqes(iwqp, iwqp->iwscq); + if (iwqp->iwrcq != iwqp->iwscq) + i40iw_clean_cqes(iwqp, iwqp->iwrcq); + } + } + + i40iw_rem_ref(&iwqp->ibqp); + return 0; +} + +/** + * i40iw_setup_virt_qp - setup for allocation of virtual qp + * @dev: iwarp device + * @qp: qp ptr + * @init_info: initialize info to return + */ +static int i40iw_setup_virt_qp(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + struct i40iw_qp_init_info *init_info) +{ + struct i40iw_pbl *iwpbl = iwqp->iwpbl; + struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; + + iwqp->page = qpmr->sq_page; + init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow); + if (iwpbl->pbl_allocated) { + init_info->virtual_map = true; + init_info->sq_pa = qpmr->sq_pbl.idx; + init_info->rq_pa = qpmr->rq_pbl.idx; + } else { + init_info->sq_pa = qpmr->sq_pbl.addr; + init_info->rq_pa = qpmr->rq_pbl.addr; + } + return 0; +} + +/** + * i40iw_setup_kmode_qp - setup initialization for kernel mode qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: initialize info to return + */ +static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + struct i40iw_qp_init_info *info) +{ + struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem; + u32 sqdepth, rqdepth; + u32 sq_size, rq_size; + u8 sqshift, rqshift; + u32 size; + enum i40iw_status_code status; + struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; + + ukinfo->max_sq_frag_cnt = I40IW_MAX_WQ_FRAGMENT_COUNT; + + sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1); + rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1); + + status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, &sqshift); + if (!status) + status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, &rqshift); + + if (status) + return -ENOSYS; + + sqdepth = sq_size << sqshift; + rqdepth = rq_size << rqshift; + + size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3); + iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL); + + ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem; + if (!ukinfo->sq_wrtrk_array) + return -ENOMEM; + + ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth]; + + size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE; + size += (I40IW_SHADOW_AREA_SIZE << 3); + + status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256); + if (status) { + kfree(ukinfo->sq_wrtrk_array); + ukinfo->sq_wrtrk_array = NULL; + return -ENOMEM; + } + + ukinfo->sq = mem->va; + info->sq_pa = mem->pa; + + ukinfo->rq = &ukinfo->sq[sqdepth]; + info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE); + + ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; + info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE); + + ukinfo->sq_size = sq_size; + ukinfo->rq_size = rq_size; + ukinfo->qp_id = iwqp->ibqp.qp_num; + return 0; +} + +/** + * i40iw_create_qp - create qp + * @ibpd: ptr of pd + * @init_attr: attributes for qp + * @udata: user data for create qp + */ +static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct i40iw_pd *iwpd = to_iwpd(ibpd); + struct i40iw_device *iwdev = to_iwdev(ibpd->device); + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_qp *iwqp; + struct i40iw_ucontext *ucontext; + struct i40iw_create_qp_req req; + struct i40iw_create_qp_resp uresp; + u32 qp_num = 0; + void *mem; + enum i40iw_status_code ret; + int err_code; + int sq_size; + int rq_size; + struct i40iw_sc_qp *qp; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_qp_init_info init_info; + struct i40iw_create_qp_info *qp_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + struct i40iw_qp_host_ctx_info *ctx_info; + struct i40iwarp_offload_info *iwarp_info; + unsigned long flags; + + if (init_attr->create_flags) + return ERR_PTR(-EINVAL); + if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE) + init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; + + memset(&init_info, 0, sizeof(init_info)); + + sq_size = init_attr->cap.max_send_wr; + rq_size = init_attr->cap.max_recv_wr; + + init_info.qp_uk_init_info.sq_size = sq_size; + init_info.qp_uk_init_info.rq_size = rq_size; + init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; + init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; + + mem = kzalloc(sizeof(*iwqp), GFP_KERNEL); + if (!mem) + return ERR_PTR(-ENOMEM); + + iwqp = (struct i40iw_qp *)mem; + qp = &iwqp->sc_qp; + qp->back_qp = (void *)iwqp; + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; + + iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info; + + if (i40iw_allocate_dma_mem(dev->hw, + &iwqp->q2_ctx_mem, + I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE, + 256)) { + i40iw_pr_err("dma_mem failed\n"); + err_code = -ENOMEM; + goto error; + } + + init_info.q2 = iwqp->q2_ctx_mem.va; + init_info.q2_pa = iwqp->q2_ctx_mem.pa; + + init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE; + init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE; + + err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp, + &qp_num, &iwdev->next_qp); + if (err_code) { + i40iw_pr_err("qp resource\n"); + goto error; + } + + iwqp->allocated_buffer = mem; + iwqp->iwdev = iwdev; + iwqp->iwpd = iwpd; + iwqp->ibqp.qp_num = qp_num; + qp = &iwqp->sc_qp; + iwqp->iwscq = to_iwcq(init_attr->send_cq); + iwqp->iwrcq = to_iwcq(init_attr->recv_cq); + + iwqp->host_ctx.va = init_info.host_ctx; + iwqp->host_ctx.pa = init_info.host_ctx_pa; + iwqp->host_ctx.size = I40IW_QP_CTX_SIZE; + + init_info.pd = &iwpd->sc_pd; + init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; + iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; + + if (init_attr->qp_type != IB_QPT_RC) { + err_code = -ENOSYS; + goto error; + } + if (iwdev->push_mode) + i40iw_alloc_push_page(iwdev, qp); + if (udata) { + err_code = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err_code) { + i40iw_pr_err("ib_copy_from_data\n"); + goto error; + } + iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + if (ibpd->uobject && ibpd->uobject->context) { + iwqp->user_mode = 1; + ucontext = to_ucontext(ibpd->uobject->context); + + if (req.user_wqe_buffers) { + spin_lock_irqsave( + &ucontext->qp_reg_mem_list_lock, flags); + iwqp->iwpbl = i40iw_get_pbl( + (unsigned long)req.user_wqe_buffers, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore( + &ucontext->qp_reg_mem_list_lock, flags); + + if (!iwqp->iwpbl) { + err_code = -ENODATA; + i40iw_pr_err("no pbl info\n"); + goto error; + } + } + } + err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); + } else { + err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info); + } + + if (err_code) { + i40iw_pr_err("setup qp failed\n"); + goto error; + } + + init_info.type = I40IW_QP_TYPE_IWARP; + ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info); + if (ret) { + err_code = -EPROTO; + i40iw_pr_err("qp_init fail\n"); + goto error; + } + ctx_info = &iwqp->ctx_info; + iwarp_info = &iwqp->iwarp_info; + iwarp_info->rd_enable = true; + iwarp_info->wr_rdresp_en = true; + if (!iwqp->user_mode) + iwarp_info->priv_mode_en = true; + iwarp_info->ddp_ver = 1; + iwarp_info->rdmap_ver = 1; + + ctx_info->iwarp_info_valid = true; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) { + ctx_info->push_mode_en = false; + } else { + ctx_info->push_mode_en = true; + ctx_info->push_idx = qp->push_idx; + } + + ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, + (u64 *)iwqp->host_ctx.va, + ctx_info); + ctx_info->iwarp_info_valid = false; + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto error; + } + cqp_info = &cqp_request->info; + qp_info = &cqp_request->info.in.u.qp_create.info; + + memset(qp_info, 0, sizeof(*qp_info)); + + qp_info->cq_num_valid = true; + qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE; + + cqp_info->cqp_cmd = OP_QP_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_create.qp = qp; + cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; + ret = i40iw_handle_cqp_op(iwdev, cqp_request); + if (ret) { + i40iw_pr_err("CQP-OP QP create fail"); + err_code = -EACCES; + goto error; + } + + i40iw_add_ref(&iwqp->ibqp); + spin_lock_init(&iwqp->lock); + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + iwdev->qp_table[qp_num] = iwqp; + i40iw_add_pdusecount(iwqp->iwpd); + if (ibpd->uobject && udata) { + memset(&uresp, 0, sizeof(uresp)); + uresp.actual_sq_size = sq_size; + uresp.actual_rq_size = rq_size; + uresp.qp_id = qp_num; + uresp.push_idx = qp->push_idx; + err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err_code) { + i40iw_pr_err("copy_to_udata failed\n"); + i40iw_destroy_qp(&iwqp->ibqp); + /* let the completion of the qp destroy free the qp */ + return ERR_PTR(err_code); + } + } + + return &iwqp->ibqp; +error: + i40iw_free_qp_resources(iwdev, iwqp, qp_num); + kfree(mem); + return ERR_PTR(err_code); +} + +/** + * i40iw_query - query qp attributes + * @ibqp: qp pointer + * @attr: attributes pointer + * @attr_mask: Not used + * @init_attr: qp attributes to return + */ +static int i40iw_query_qp(struct ib_qp *ibqp, + struct ib_qp_attr *attr, + int attr_mask, + struct ib_qp_init_attr *init_attr) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + + attr->qp_access_flags = 0; + attr->cap.max_send_wr = qp->qp_uk.sq_size; + attr->cap.max_recv_wr = qp->qp_uk.rq_size; + attr->cap.max_recv_sge = 1; + attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; + init_attr->event_handler = iwqp->ibqp.event_handler; + init_attr->qp_context = iwqp->ibqp.qp_context; + init_attr->send_cq = iwqp->ibqp.send_cq; + init_attr->recv_cq = iwqp->ibqp.recv_cq; + init_attr->srq = iwqp->ibqp.srq; + init_attr->cap = attr->cap; + return 0; +} + +/** + * i40iw_hw_modify_qp - setup cqp for modify qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: info for modify qp + * @wait: flag to wait or not for modify qp completion + */ +void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, + struct i40iw_modify_qp_info *info, bool wait) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_modify_qp_info *m_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + m_info = &cqp_info->in.u.qp_modify.info; + memcpy(m_info, info, sizeof(*m_info)); + cqp_info->cqp_cmd = OP_QP_MODIFY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Modify QP fail"); +} + +/** + * i40iw_modify_qp - modify qp request + * @ibqp: qp's pointer for modify + * @attr: access attributes + * @attr_mask: state mask + * @udata: user data + */ +int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct i40iw_device *iwdev = iwqp->iwdev; + struct i40iw_qp_host_ctx_info *ctx_info; + struct i40iwarp_offload_info *iwarp_info; + struct i40iw_modify_qp_info info; + u8 issue_modify_qp = 0; + u8 dont_wait = 0; + u32 err; + unsigned long flags; + + memset(&info, 0, sizeof(info)); + ctx_info = &iwqp->ctx_info; + iwarp_info = &iwqp->iwarp_info; + + spin_lock_irqsave(&iwqp->lock, flags); + + if (attr_mask & IB_QP_STATE) { + switch (attr->qp_state) { + case IB_QPS_INIT: + case IB_QPS_RTR: + if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) { + err = -EINVAL; + goto exit; + } + if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) { + info.next_iwarp_state = I40IW_QP_STATE_IDLE; + issue_modify_qp = 1; + } + break; + case IB_QPS_RTS: + if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) || + (!iwqp->cm_id)) { + err = -EINVAL; + goto exit; + } + + issue_modify_qp = 1; + iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED; + iwqp->hte_added = 1; + info.next_iwarp_state = I40IW_QP_STATE_RTS; + info.tcp_ctx_valid = true; + info.ord_valid = true; + info.arp_cache_idx_valid = true; + info.cq_num_valid = true; + break; + case IB_QPS_SQD: + if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) { + err = 0; + goto exit; + } + if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) || + (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) { + err = 0; + goto exit; + } + if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) { + err = -EINVAL; + goto exit; + } + info.next_iwarp_state = I40IW_QP_STATE_CLOSING; + issue_modify_qp = 1; + break; + case IB_QPS_SQE: + if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) { + err = -EINVAL; + goto exit; + } + info.next_iwarp_state = I40IW_QP_STATE_TERMINATE; + issue_modify_qp = 1; + break; + case IB_QPS_ERR: + case IB_QPS_RESET: + if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) { + err = -EINVAL; + goto exit; + } + if (iwqp->sc_qp.term_flags) + del_timer(&iwqp->terminate_timer); + info.next_iwarp_state = I40IW_QP_STATE_ERROR; + if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) && + iwdev->iw_status && + (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT)) + info.reset_tcp_conn = true; + else + dont_wait = 1; + issue_modify_qp = 1; + info.next_iwarp_state = I40IW_QP_STATE_ERROR; + break; + default: + err = -EINVAL; + goto exit; + } + + iwqp->ibqp_state = attr->qp_state; + + if (issue_modify_qp) + iwqp->iwarp_state = info.next_iwarp_state; + else + info.next_iwarp_state = iwqp->iwarp_state; + } + if (attr_mask & IB_QP_ACCESS_FLAGS) { + ctx_info->iwarp_info_valid = true; + if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) + iwarp_info->wr_rdresp_en = true; + if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) + iwarp_info->wr_rdresp_en = true; + if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) + iwarp_info->rd_enable = true; + if (attr->qp_access_flags & IB_ACCESS_MW_BIND) + iwarp_info->bind_en = true; + + if (iwqp->user_mode) { + iwarp_info->rd_enable = true; + iwarp_info->wr_rdresp_en = true; + iwarp_info->priv_mode_en = false; + } + } + + if (ctx_info->iwarp_info_valid) { + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + int ret; + + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, + (u64 *)iwqp->host_ctx.va, + ctx_info); + if (ret) { + i40iw_pr_err("setting QP context\n"); + err = -EINVAL; + goto exit; + } + } + + spin_unlock_irqrestore(&iwqp->lock, flags); + + if (issue_modify_qp) + i40iw_hw_modify_qp(iwdev, iwqp, &info, true); + + if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) { + if (dont_wait) { + if (iwqp->cm_id && iwqp->hw_tcp_state) { + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; + iwqp->last_aeq = I40IW_AE_RESET_SENT; + spin_unlock_irqrestore(&iwqp->lock, flags); + } + } + } + return 0; +exit: + spin_unlock_irqrestore(&iwqp->lock, flags); + return err; +} + +/** + * cq_free_resources - free up recources for cq + * @iwdev: iwarp device + * @iwcq: cq ptr + */ +static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq) +{ + struct i40iw_sc_cq *cq = &iwcq->sc_cq; + + if (!iwcq->user_mode) + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem); + i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id); +} + +/** + * cq_wq_destroy - send cq destroy cqp + * @iwdev: iwarp device + * @cq: hardware control cq + */ +static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + + cqp_info->cqp_cmd = OP_CQ_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_destroy.cq = cq; + cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Destroy QP fail"); +} + +/** + * i40iw_destroy_cq - destroy cq + * @ib_cq: cq pointer + */ +static int i40iw_destroy_cq(struct ib_cq *ib_cq) +{ + struct i40iw_cq *iwcq; + struct i40iw_device *iwdev; + struct i40iw_sc_cq *cq; + + if (!ib_cq) { + i40iw_pr_err("ib_cq == NULL\n"); + return 0; + } + + iwcq = to_iwcq(ib_cq); + iwdev = to_iwdev(ib_cq->device); + cq = &iwcq->sc_cq; + cq_wq_destroy(iwdev, cq); + cq_free_resources(iwdev, iwcq); + kfree(iwcq); + return 0; +} + +/** + * i40iw_create_cq - create cq + * @ibdev: device pointer from stack + * @attr: attributes for cq + * @context: user context created during alloc + * @udata: user data + */ +static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_cq *iwcq; + struct i40iw_pbl *iwpbl; + u32 cq_num = 0; + struct i40iw_sc_cq *cq; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cq_init_info info; + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; + unsigned long flags; + int err_code; + int entries = attr->cqe; + + if (entries > iwdev->max_cqe) + return ERR_PTR(-EINVAL); + + iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); + if (!iwcq) + return ERR_PTR(-ENOMEM); + + memset(&info, 0, sizeof(info)); + + err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs, + iwdev->max_cq, &cq_num, + &iwdev->next_cq); + if (err_code) + goto error; + + cq = &iwcq->sc_cq; + cq->back_cq = (void *)iwcq; + spin_lock_init(&iwcq->lock); + + info.dev = dev; + ukinfo->cq_size = max(entries, 4); + ukinfo->cq_id = cq_num; + iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; + info.ceqe_mask = 0; + info.ceq_id = 0; + info.ceq_id_valid = true; + info.ceqe_mask = 1; + info.type = I40IW_CQ_TYPE_IWARP; + if (context) { + struct i40iw_ucontext *ucontext; + struct i40iw_create_cq_req req; + struct i40iw_cq_mr *cqmr; + + memset(&req, 0, sizeof(req)); + iwcq->user_mode = true; + ucontext = to_ucontext(context); + if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) + goto cq_free_resources; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + if (!iwpbl) { + err_code = -EPROTO; + goto cq_free_resources; + } + + iwcq->iwpbl = iwpbl; + iwcq->cq_mem_size = 0; + cqmr = &iwpbl->cq_mr; + info.shadow_area_pa = cpu_to_le64(cqmr->shadow); + if (iwpbl->pbl_allocated) { + info.virtual_map = true; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = cqmr->cq_pbl.idx; + } else { + info.cq_base_pa = cqmr->cq_pbl.addr; + } + } else { + /* Kmode allocations */ + int rsize; + int shadow; + + rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe); + rsize = round_up(rsize, 256); + shadow = I40IW_SHADOW_AREA_SIZE << 3; + status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem, + rsize + shadow, 256); + if (status) { + err_code = -ENOMEM; + goto cq_free_resources; + } + ukinfo->cq_base = iwcq->kmem.va; + info.cq_base_pa = iwcq->kmem.pa; + info.shadow_area_pa = info.cq_base_pa + rsize; + ukinfo->shadow_area = iwcq->kmem.va + rsize; + } + + if (dev->iw_priv_cq_ops->cq_init(cq, &info)) { + i40iw_pr_err("init cq fail\n"); + err_code = -EPROTO; + goto cq_free_resources; + } + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto cq_free_resources; + } + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_CQ_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_create.cq = cq; + cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) { + i40iw_pr_err("CQP-OP Create QP fail"); + err_code = -EPROTO; + goto cq_free_resources; + } + + if (context) { + struct i40iw_create_cq_resp resp; + + memset(&resp, 0, sizeof(resp)); + resp.cq_id = info.cq_uk_init_info.cq_id; + resp.cq_size = info.cq_uk_init_info.cq_size; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + i40iw_pr_err("copy to user data\n"); + err_code = -EPROTO; + goto cq_destroy; + } + } + + return (struct ib_cq *)iwcq; + +cq_destroy: + cq_wq_destroy(iwdev, cq); +cq_free_resources: + cq_free_resources(iwdev, iwcq); +error: + kfree(iwcq); + return ERR_PTR(err_code); +} + +/** + * i40iw_get_user_access - get hw access from IB access + * @acc: IB access to return hw access + */ +static inline u16 i40iw_get_user_access(int acc) +{ + u16 access = 0; + + access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0; + access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0; + access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0; + access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0; + return access; +} + +/** + * i40iw_free_stag - free stag resource + * @iwdev: iwarp device + * @stag: stag to free + */ +static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag) +{ + u32 stag_idx; + + stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT; + i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx); +} + +/** + * i40iw_create_stag - create random stag + * @iwdev: iwarp device + */ +static u32 i40iw_create_stag(struct i40iw_device *iwdev) +{ + u32 stag = 0; + u32 stag_index = 0; + u32 next_stag_index; + u32 driver_key; + u32 random; + u8 consumer_key; + int ret; + + get_random_bytes(&random, sizeof(random)); + consumer_key = (u8)random; + + driver_key = random & ~iwdev->mr_stagmask; + next_stag_index = (random & iwdev->mr_stagmask) >> 8; + next_stag_index %= iwdev->max_mr; + + ret = i40iw_alloc_resource(iwdev, + iwdev->allocated_mrs, iwdev->max_mr, + &stag_index, &next_stag_index); + if (!ret) { + stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT; + stag |= driver_key; + stag += (u32)consumer_key; + } + return stag; +} + +/** + * i40iw_next_pbl_addr - Get next pbl address + * @palloc: Poiner to allocated pbles + * @pbl: pointer to a pble + * @pinfo: info pointer + * @idx: index + */ +static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc, + u64 *pbl, + struct i40iw_pble_info **pinfo, + u32 *idx) +{ + *idx += 1; + if ((!(*pinfo)) || (*idx != (*pinfo)->cnt)) + return ++pbl; + *idx = 0; + (*pinfo)++; + return (u64 *)(*pinfo)->addr; +} + +/** + * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally + * @iwmr: iwmr for IB's user page addresses + * @pbl: ple pointer to save 1 level or 0 level pble + * @level: indicated level 0, 1 or 2 + */ +static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr, + u64 *pbl, + enum i40iw_pble_level level) +{ + struct ib_umem *region = iwmr->region; + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + int chunk_pages, entry, pg_shift, i; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_pble_info *pinfo; + struct scatterlist *sg; + u32 idx = 0; + + pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf; + pg_shift = ffs(region->page_size) - 1; + for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { + chunk_pages = sg_dma_len(sg) >> pg_shift; + if ((iwmr->type == IW_MEMREG_TYPE_QP) && + !iwpbl->qp_mr.sq_page) + iwpbl->qp_mr.sq_page = sg_page(sg); + for (i = 0; i < chunk_pages; i++) { + *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i); + pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx); + } + } +} + +/** + * i40iw_setup_pbles - copy user pg address to pble's + * @iwdev: iwarp device + * @iwmr: mr pointer for this memory registration + * @use_pbles: flag if to use pble's or memory (level 0) + */ +static int i40iw_setup_pbles(struct i40iw_device *iwdev, + struct i40iw_mr *iwmr, + bool use_pbles) +{ + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_pble_info *pinfo; + u64 *pbl; + enum i40iw_status_code status; + enum i40iw_pble_level level = I40IW_LEVEL_1; + + if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS)) + return -ENOMEM; + + if (use_pbles) { + mutex_lock(&iwdev->pbl_mutex); + status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); + mutex_unlock(&iwdev->pbl_mutex); + if (status) + return -ENOMEM; + + iwpbl->pbl_allocated = true; + level = palloc->level; + pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf; + pbl = (u64 *)pinfo->addr; + } else { + pbl = iwmr->pgaddrmem; + } + + i40iw_copy_user_pgaddrs(iwmr, pbl, level); + return 0; +} + +/** + * i40iw_handle_q_mem - handle memory for qp and cq + * @iwdev: iwarp device + * @req: information for q memory management + * @iwpbl: pble struct + * @use_pbles: flag to use pble + */ +static int i40iw_handle_q_mem(struct i40iw_device *iwdev, + struct i40iw_mem_reg_req *req, + struct i40iw_pbl *iwpbl, + bool use_pbles) +{ + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_mr *iwmr = iwpbl->iwmr; + struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; + struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr; + struct i40iw_hmc_pble *hmc_p; + u64 *arr = iwmr->pgaddrmem; + int err; + int total; + + total = req->sq_pages + req->rq_pages + req->cq_pages; + + err = i40iw_setup_pbles(iwdev, iwmr, use_pbles); + if (err) + return err; + if (use_pbles && (palloc->level != I40IW_LEVEL_1)) { + i40iw_free_pble(iwdev->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + return -ENOMEM; + } + + if (use_pbles) + arr = (u64 *)palloc->level1.addr; + if (req->reg_type == IW_MEMREG_TYPE_QP) { + hmc_p = &qpmr->sq_pbl; + qpmr->shadow = (dma_addr_t)arr[total]; + if (use_pbles) { + hmc_p->idx = palloc->level1.idx; + hmc_p = &qpmr->rq_pbl; + hmc_p->idx = palloc->level1.idx + req->sq_pages; + } else { + hmc_p->addr = arr[0]; + hmc_p = &qpmr->rq_pbl; + hmc_p->addr = arr[1]; + } + } else { /* CQ */ + hmc_p = &cqmr->cq_pbl; + cqmr->shadow = (dma_addr_t)arr[total]; + if (use_pbles) + hmc_p->idx = palloc->level1.idx; + else + hmc_p->addr = arr[0]; + } + return err; +} + +/** + * i40iw_hwreg_mr - send cqp command for memory registration + * @iwdev: iwarp device + * @iwmr: iwarp mr pointer + * @access: access for MR + */ +static int i40iw_hwreg_mr(struct i40iw_device *iwdev, + struct i40iw_mr *iwmr, + u16 access) +{ + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_reg_ns_stag_info *stag_info; + struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + enum i40iw_status_code status; + int err = 0; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + stag_info = &cqp_info->in.u.mr_reg_non_shared.info; + memset(stag_info, 0, sizeof(*stag_info)); + stag_info->va = (void *)(unsigned long)iwpbl->user_base; + stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; + stag_info->stag_key = (u8)iwmr->stag; + stag_info->total_len = iwmr->length; + stag_info->access_rights = access; + stag_info->pd_id = iwpd->sc_pd.pd_id; + stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; + + if (iwmr->page_cnt > 1) { + if (palloc->level == I40IW_LEVEL_1) { + stag_info->first_pm_pbl_index = palloc->level1.idx; + stag_info->chunk_size = 1; + } else { + stag_info->first_pm_pbl_index = palloc->level2.root.idx; + stag_info->chunk_size = 3; + } + } else { + stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; + } + + cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED; + cqp_info->post_sq = 1; + cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev; + cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) { + err = -ENOMEM; + i40iw_pr_err("CQP-OP MR Reg fail"); + } + return err; +} + +/** + * i40iw_reg_user_mr - Register a user memory region + * @pd: ptr of pd + * @start: virtual start address + * @length: length of mr + * @virt: virtual address + * @acc: access of mr + * @udata: user data + */ +static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, + u64 start, + u64 length, + u64 virt, + int acc, + struct ib_udata *udata) +{ + struct i40iw_pd *iwpd = to_iwpd(pd); + struct i40iw_device *iwdev = to_iwdev(pd->device); + struct i40iw_ucontext *ucontext; + struct i40iw_pble_alloc *palloc; + struct i40iw_pbl *iwpbl; + struct i40iw_mr *iwmr; + struct ib_umem *region; + struct i40iw_mem_reg_req req; + u32 pbl_depth = 0; + u32 stag = 0; + u16 access; + u32 region_length; + bool use_pbles = false; + unsigned long flags; + int err = -ENOSYS; + + region = ib_umem_get(pd->uobject->context, start, length, acc, 0); + if (IS_ERR(region)) + return (struct ib_mr *)region; + + if (ib_copy_from_udata(&req, udata, sizeof(req))) { + ib_umem_release(region); + return ERR_PTR(-EFAULT); + } + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) { + ib_umem_release(region); + return ERR_PTR(-ENOMEM); + } + + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->region = region; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + ucontext = to_ucontext(pd->uobject->context); + region_length = region->length + (start & 0xfff); + pbl_depth = region_length >> 12; + pbl_depth += (region_length & (4096 - 1)) ? 1 : 0; + iwmr->length = region->length; + + iwpbl->user_base = virt; + palloc = &iwpbl->pble_alloc; + + iwmr->type = req.reg_type; + iwmr->page_cnt = pbl_depth; + + switch (req.reg_type) { + case IW_MEMREG_TYPE_QP: + use_pbles = ((req.sq_pages + req.rq_pages) > 2); + err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + case IW_MEMREG_TYPE_CQ: + use_pbles = (req.cq_pages > 1); + err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case IW_MEMREG_TYPE_MEM: + access = I40IW_ACCESS_FLAGS_LOCALREAD; + + use_pbles = (iwmr->page_cnt != 1); + err = i40iw_setup_pbles(iwdev, iwmr, use_pbles); + if (err) + goto error; + + access |= i40iw_get_user_access(acc); + stag = i40iw_create_stag(iwdev); + if (!stag) { + err = -ENOMEM; + goto error; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + + err = i40iw_hwreg_mr(iwdev, iwmr, access); + if (err) { + i40iw_free_stag(iwdev, stag); + goto error; + } + break; + default: + goto error; + } + + iwmr->type = req.reg_type; + if (req.reg_type == IW_MEMREG_TYPE_MEM) + i40iw_add_pdusecount(iwpd); + return &iwmr->ibmr; + +error: + if (palloc->level != I40IW_LEVEL_0) + i40iw_free_pble(iwdev->pble_rsrc, palloc); + ib_umem_release(region); + kfree(iwmr); + return ERR_PTR(err); +} + +/** + * i40iw_reg_phys_mr - register kernel physical memory + * @pd: ibpd pointer + * @addr: physical address of memory to register + * @size: size of memory to register + * @acc: Access rights + * @iova_start: start of virtual address for physical buffers + */ +struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd, + u64 addr, + u64 size, + int acc, + u64 *iova_start) +{ + struct i40iw_pd *iwpd = to_iwpd(pd); + struct i40iw_device *iwdev = to_iwdev(pd->device); + struct i40iw_pbl *iwpbl; + struct i40iw_mr *iwmr; + enum i40iw_status_code status; + u32 stag; + u16 access = I40IW_ACCESS_FLAGS_LOCALREAD; + int ret; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = IW_MEMREG_TYPE_MEM; + iwpbl->user_base = *iova_start; + stag = i40iw_create_stag(iwdev); + if (!stag) { + ret = -EOVERFLOW; + goto err; + } + access |= i40iw_get_user_access(acc); + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->page_cnt = 1; + iwmr->pgaddrmem[0] = addr; + status = i40iw_hwreg_mr(iwdev, iwmr, access); + if (status) { + i40iw_free_stag(iwdev, stag); + ret = -ENOMEM; + goto err; + } + + i40iw_add_pdusecount(iwpd); + return &iwmr->ibmr; + err: + kfree(iwmr); + return ERR_PTR(ret); +} + +/** + * i40iw_get_dma_mr - register physical mem + * @pd: ptr of pd + * @acc: access for memory + */ +static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc) +{ + u64 kva = 0; + + return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva); +} + +/** + * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP + * @iwmr: iwmr for IB's user page addresses + * @ucontext: ptr to user context + */ +static void i40iw_del_memlist(struct i40iw_mr *iwmr, + struct i40iw_ucontext *ucontext) +{ + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + unsigned long flags; + + switch (iwmr->type) { + case IW_MEMREG_TYPE_CQ: + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + if (!list_empty(&ucontext->cq_reg_mem_list)) + list_del(&iwpbl->list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case IW_MEMREG_TYPE_QP: + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + if (!list_empty(&ucontext->qp_reg_mem_list)) + list_del(&iwpbl->list); + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + default: + break; + } +} + +/** + * i40iw_dereg_mr - deregister mr + * @ib_mr: mr ptr for dereg + */ +static int i40iw_dereg_mr(struct ib_mr *ib_mr) +{ + struct ib_pd *ibpd = ib_mr->pd; + struct i40iw_pd *iwpd = to_iwpd(ibpd); + struct i40iw_mr *iwmr = to_iwmr(ib_mr); + struct i40iw_device *iwdev = to_iwdev(ib_mr->device); + enum i40iw_status_code status; + struct i40iw_dealloc_stag_info *info; + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + u32 stag_idx; + + if (iwmr->region) + ib_umem_release(iwmr->region); + + if (iwmr->type != IW_MEMREG_TYPE_MEM) { + if (ibpd->uobject) { + struct i40iw_ucontext *ucontext; + + ucontext = to_ucontext(ibpd->uobject->context); + i40iw_del_memlist(iwmr, ucontext); + } + if (iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, palloc); + kfree(iwpbl->iwmr); + iwpbl->iwmr = NULL; + return 0; + } + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + + info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff); + info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT); + stag_idx = info->stag_idx; + info->mr = true; + if (iwpbl->pbl_allocated) + info->dealloc_pbl = true; + + cqp_info->cqp_cmd = OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx); + i40iw_rem_pdusecount(iwpd, iwdev); + i40iw_free_stag(iwdev, iwmr->stag); + if (iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, palloc); + kfree(iwmr); + return 0; +} + +/** + * i40iw_show_rev + */ +static ssize_t i40iw_show_rev(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i40iw_ib_device *iwibdev = container_of(dev, + struct i40iw_ib_device, + ibdev.dev); + u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev; + + return sprintf(buf, "%x\n", hw_rev); +} + +/** + * i40iw_show_fw_ver + */ +static ssize_t i40iw_show_fw_ver(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 firmware_version = I40IW_FW_VERSION; + + return sprintf(buf, "%u.%u\n", firmware_version, + (firmware_version & 0x000000ff)); +} + +/** + * i40iw_show_hca + */ +static ssize_t i40iw_show_hca(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "I40IW\n"); +} + +/** + * i40iw_show_board + */ +static ssize_t i40iw_show_board(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%.*s\n", 32, "I40IW Board ID"); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL); +static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL); + +static struct device_attribute *i40iw_dev_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_fw_ver, + &dev_attr_hca_type, + &dev_attr_board_id +}; + +/** + * i40iw_copy_sg_list - copy sg list for qp + * @sg_list: copied into sg_list + * @sgl: copy from sgl + * @num_sges: count of sg entries + */ +static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges) +{ + unsigned int i; + + for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) { + sg_list[i].tag_off = sgl[i].addr; + sg_list[i].len = sgl[i].length; + sg_list[i].stag = sgl[i].lkey; + } +} + +/** + * i40iw_post_send - kernel application wr + * @ibqp: qp ptr for wr + * @ib_wr: work request ptr + * @bad_wr: return of bad wr if err + */ +static int i40iw_post_send(struct ib_qp *ibqp, + struct ib_send_wr *ib_wr, + struct ib_send_wr **bad_wr) +{ + struct i40iw_qp *iwqp; + struct i40iw_qp_uk *ukqp; + struct i40iw_post_sq_info info; + enum i40iw_status_code ret; + int err = 0; + unsigned long flags; + + iwqp = (struct i40iw_qp *)ibqp; + ukqp = &iwqp->sc_qp.qp_uk; + + spin_lock_irqsave(&iwqp->lock, flags); + while (ib_wr) { + memset(&info, 0, sizeof(info)); + info.wr_id = (u64)(ib_wr->wr_id); + if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) + info.signaled = true; + if (ib_wr->send_flags & IB_SEND_FENCE) + info.read_fence = true; + + switch (ib_wr->opcode) { + case IB_WR_SEND: + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.op_type = I40IW_OP_TYPE_SEND_SOL; + else + info.op_type = I40IW_OP_TYPE_SEND; + + if (ib_wr->send_flags & IB_SEND_INLINE) { + info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; + info.op.inline_send.len = ib_wr->sg_list[0].length; + ret = ukqp->ops.iw_inline_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false); + } else { + info.op.send.num_sges = ib_wr->num_sge; + info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list; + ret = ukqp->ops.iw_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false); + } + + if (ret) + err = -EIO; + break; + case IB_WR_RDMA_WRITE: + info.op_type = I40IW_OP_TYPE_RDMA_WRITE; + + if (ib_wr->send_flags & IB_SEND_INLINE) { + info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; + info.op.inline_rdma_write.len = ib_wr->sg_list[0].length; + info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; + info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length; + ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false); + } else { + info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; + info.op.rdma_write.num_lo_sges = ib_wr->num_sge; + info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; + info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length; + ret = ukqp->ops.iw_rdma_write(ukqp, &info, false); + } + + if (ret) + err = -EIO; + break; + case IB_WR_RDMA_READ: + info.op_type = I40IW_OP_TYPE_RDMA_READ; + info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; + info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length; + info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr; + info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey; + info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length; + ret = ukqp->ops.iw_rdma_read(ukqp, &info, false, false); + if (ret) + err = -EIO; + break; + default: + err = -EINVAL; + i40iw_pr_err(" upost_send bad opcode = 0x%x\n", + ib_wr->opcode); + break; + } + + if (err) + break; + ib_wr = ib_wr->next; + } + + if (err) + *bad_wr = ib_wr; + else + ukqp->ops.iw_qp_post_wr(ukqp); + spin_unlock_irqrestore(&iwqp->lock, flags); + + return err; +} + +/** + * i40iw_post_recv - post receive wr for kernel application + * @ibqp: ib qp pointer + * @ib_wr: work request for receive + * @bad_wr: bad wr caused an error + */ +static int i40iw_post_recv(struct ib_qp *ibqp, + struct ib_recv_wr *ib_wr, + struct ib_recv_wr **bad_wr) +{ + struct i40iw_qp *iwqp; + struct i40iw_qp_uk *ukqp; + struct i40iw_post_rq_info post_recv; + struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT]; + enum i40iw_status_code ret = 0; + unsigned long flags; + + iwqp = (struct i40iw_qp *)ibqp; + ukqp = &iwqp->sc_qp.qp_uk; + + memset(&post_recv, 0, sizeof(post_recv)); + spin_lock_irqsave(&iwqp->lock, flags); + while (ib_wr) { + post_recv.num_sges = ib_wr->num_sge; + post_recv.wr_id = ib_wr->wr_id; + i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); + post_recv.sg_list = sg_list; + ret = ukqp->ops.iw_post_receive(ukqp, &post_recv); + if (ret) { + i40iw_pr_err(" post_recv err %d\n", ret); + *bad_wr = ib_wr; + goto out; + } + ib_wr = ib_wr->next; + } + out: + spin_unlock_irqrestore(&iwqp->lock, flags); + if (ret) + return -ENOSYS; + return 0; +} + +/** + * i40iw_poll_cq - poll cq for completion (kernel apps) + * @ibcq: cq to poll + * @num_entries: number of entries to poll + * @entry: wr of entry completed + */ +static int i40iw_poll_cq(struct ib_cq *ibcq, + int num_entries, + struct ib_wc *entry) +{ + struct i40iw_cq *iwcq; + int cqe_count = 0; + struct i40iw_cq_poll_info cq_poll_info; + enum i40iw_status_code ret; + struct i40iw_cq_uk *ukcq; + struct i40iw_sc_qp *qp; + unsigned long flags; + + iwcq = (struct i40iw_cq *)ibcq; + ukcq = &iwcq->sc_cq.cq_uk; + + spin_lock_irqsave(&iwcq->lock, flags); + while (cqe_count < num_entries) { + ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true); + if (ret == I40IW_ERR_QUEUE_EMPTY) { + break; + } else if (ret) { + if (!cqe_count) + cqe_count = -1; + break; + } + entry->wc_flags = 0; + entry->wr_id = cq_poll_info.wr_id; + if (!cq_poll_info.error) + entry->status = IB_WC_SUCCESS; + else + entry->status = IB_WC_WR_FLUSH_ERR; + + switch (cq_poll_info.op_type) { + case I40IW_OP_TYPE_RDMA_WRITE: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case I40IW_OP_TYPE_RDMA_READ_INV_STAG: + case I40IW_OP_TYPE_RDMA_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case I40IW_OP_TYPE_SEND_SOL: + case I40IW_OP_TYPE_SEND_SOL_INV: + case I40IW_OP_TYPE_SEND_INV: + case I40IW_OP_TYPE_SEND: + entry->opcode = IB_WC_SEND; + break; + case I40IW_OP_TYPE_REC: + entry->opcode = IB_WC_RECV; + break; + default: + entry->opcode = IB_WC_RECV; + break; + } + + entry->vendor_err = + cq_poll_info.major_err << 16 | cq_poll_info.minor_err; + entry->ex.imm_data = 0; + qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle; + entry->qp = (struct ib_qp *)qp->back_qp; + entry->src_qp = cq_poll_info.qp_id; + entry->byte_len = cq_poll_info.bytes_xfered; + entry++; + cqe_count++; + } + spin_unlock_irqrestore(&iwcq->lock, flags); + return cqe_count; +} + +/** + * i40iw_req_notify_cq - arm cq kernel application + * @ibcq: cq to arm + * @notify_flags: notofication flags + */ +static int i40iw_req_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags) +{ + struct i40iw_cq *iwcq; + struct i40iw_cq_uk *ukcq; + enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED; + + iwcq = (struct i40iw_cq *)ibcq; + ukcq = &iwcq->sc_cq.cq_uk; + if (notify_flags == IB_CQ_NEXT_COMP) + cq_notify = IW_CQ_COMPL_EVENT; + ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); + return 0; +} + +/** + * i40iw_port_immutable - return port's immutable data + * @ibdev: ib dev struct + * @port_num: port number + * @immutable: immutable data for the port return + */ +static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = i40iw_query_port(ibdev, port_num, &attr); + + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + + return 0; +} + +/** + * i40iw_get_protocol_stats - Populates the rdma_stats structure + * @ibdev: ib dev struct + * @stats: iw protocol stats struct + */ +static int i40iw_get_protocol_stats(struct ib_device *ibdev, + union rdma_protocol_stats *stats) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_dev_pestat *devstat = &dev->dev_pestat; + struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; + struct timespec curr_time; + static struct timespec last_rd_time = {0, 0}; + enum i40iw_status_code status = 0; + unsigned long flags; + + curr_time = current_kernel_time(); + memset(stats, 0, sizeof(*stats)); + + if (dev->is_pf) { + spin_lock_irqsave(&devstat->stats_lock, flags); + devstat->ops.iw_hw_stat_read_all(devstat, + &devstat->hw_stats); + spin_unlock_irqrestore(&devstat->stats_lock, flags); + } else { + if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1) + status = i40iw_vchnl_vf_get_pe_stats(dev, + &devstat->hw_stats); + + if (status) + return -ENOSYS; + } + + stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] + + hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS]; + stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] + + hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC]; + stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] + + hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD]; + stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] + + hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE]; + stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] + + hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS]; + stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] + + hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS]; + stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] + + hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS]; + stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] + + hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS]; + stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG]; + stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS]; + stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG]; + + last_rd_time = curr_time; + return 0; +} + +/** + * i40iw_query_gid - Query port GID + * @ibdev: device pointer from stack + * @port: port number + * @index: Entry index + * @gid: Global ID + */ +static int i40iw_query_gid(struct ib_device *ibdev, + u8 port, + int index, + union ib_gid *gid) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + + memset(gid->raw, 0, sizeof(gid->raw)); + ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); + return 0; +} + +/** + * i40iw_modify_port Modify port properties + * @ibdev: device pointer from stack + * @port: port number + * @port_modify_mask: mask for port modifications + * @props: port properties + */ +static int i40iw_modify_port(struct ib_device *ibdev, + u8 port, + int port_modify_mask, + struct ib_port_modify *props) +{ + return 0; +} + +/** + * i40iw_query_pkey - Query partition key + * @ibdev: device pointer from stack + * @port: port number + * @index: index of pkey + * @pkey: pointer to store the pkey + */ +static int i40iw_query_pkey(struct ib_device *ibdev, + u8 port, + u16 index, + u16 *pkey) +{ + *pkey = 0; + return 0; +} + +/** + * i40iw_create_ah - create address handle + * @ibpd: ptr of pd + * @ah_attr: address handle attributes + */ +static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd, + struct ib_ah_attr *attr) +{ + return ERR_PTR(-ENOSYS); +} + +/** + * i40iw_destroy_ah - Destroy address handle + * @ah: pointer to address handle + */ +static int i40iw_destroy_ah(struct ib_ah *ah) +{ + return -ENOSYS; +} + +/** + * i40iw_init_rdma_device - initialization of iwarp device + * @iwdev: iwarp device + */ +static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev) +{ + struct i40iw_ib_device *iwibdev; + struct net_device *netdev = iwdev->netdev; + struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context; + + iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev)); + if (!iwibdev) { + i40iw_pr_err("iwdev == NULL\n"); + return NULL; + } + strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX); + iwibdev->ibdev.owner = THIS_MODULE; + iwdev->iwibdev = iwibdev; + iwibdev->iwdev = iwdev; + + iwibdev->ibdev.node_type = RDMA_NODE_RNIC; + ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr); + + iwibdev->ibdev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_POLL_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_POST_RECV) | + (1ull << IB_USER_VERBS_CMD_POST_SEND); + iwibdev->ibdev.phys_port_cnt = 1; + iwibdev->ibdev.num_comp_vectors = 1; + iwibdev->ibdev.dma_device = &pcidev->dev; + iwibdev->ibdev.dev.parent = &pcidev->dev; + iwibdev->ibdev.query_port = i40iw_query_port; + iwibdev->ibdev.modify_port = i40iw_modify_port; + iwibdev->ibdev.query_pkey = i40iw_query_pkey; + iwibdev->ibdev.query_gid = i40iw_query_gid; + iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext; + iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext; + iwibdev->ibdev.mmap = i40iw_mmap; + iwibdev->ibdev.alloc_pd = i40iw_alloc_pd; + iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd; + iwibdev->ibdev.create_qp = i40iw_create_qp; + iwibdev->ibdev.modify_qp = i40iw_modify_qp; + iwibdev->ibdev.query_qp = i40iw_query_qp; + iwibdev->ibdev.destroy_qp = i40iw_destroy_qp; + iwibdev->ibdev.create_cq = i40iw_create_cq; + iwibdev->ibdev.destroy_cq = i40iw_destroy_cq; + iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; + iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; + iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; + iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats; + iwibdev->ibdev.query_device = i40iw_query_device; + iwibdev->ibdev.create_ah = i40iw_create_ah; + iwibdev->ibdev.destroy_ah = i40iw_destroy_ah; + iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL); + if (!iwibdev->ibdev.iwcm) { + ib_dealloc_device(&iwibdev->ibdev); + i40iw_pr_err("iwcm == NULL\n"); + return NULL; + } + + iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref; + iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref; + iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp; + iwibdev->ibdev.iwcm->connect = i40iw_connect; + iwibdev->ibdev.iwcm->accept = i40iw_accept; + iwibdev->ibdev.iwcm->reject = i40iw_reject; + iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen; + iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen; + memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name, + sizeof(iwibdev->ibdev.iwcm->ifname)); + iwibdev->ibdev.get_port_immutable = i40iw_port_immutable; + iwibdev->ibdev.poll_cq = i40iw_poll_cq; + iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq; + iwibdev->ibdev.post_send = i40iw_post_send; + iwibdev->ibdev.post_recv = i40iw_post_recv; + + return iwibdev; +} + +/** + * i40iw_port_ibevent - indicate port event + * @iwdev: iwarp device + */ +void i40iw_port_ibevent(struct i40iw_device *iwdev) +{ + struct i40iw_ib_device *iwibdev = iwdev->iwibdev; + struct ib_event event; + + event.device = &iwibdev->ibdev; + event.element.port_num = 1; + event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + +/** + * i40iw_unregister_rdma_device - unregister of iwarp from IB + * @iwibdev: rdma device ptr + */ +static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) + device_remove_file(&iwibdev->ibdev.dev, + i40iw_dev_attributes[i]); + ib_unregister_device(&iwibdev->ibdev); +} + +/** + * i40iw_destroy_rdma_device - destroy rdma device and free resources + * @iwibdev: IB device ptr + */ +void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev) +{ + if (!iwibdev) + return; + + i40iw_unregister_rdma_device(iwibdev); + kfree(iwibdev->ibdev.iwcm); + iwibdev->ibdev.iwcm = NULL; + ib_dealloc_device(&iwibdev->ibdev); +} + +/** + * i40iw_register_rdma_device - register iwarp device to IB + * @iwdev: iwarp device + */ +int i40iw_register_rdma_device(struct i40iw_device *iwdev) +{ + int i, ret; + struct i40iw_ib_device *iwibdev; + + iwdev->iwibdev = i40iw_init_rdma_device(iwdev); + if (!iwdev->iwibdev) + return -ENOSYS; + iwibdev = iwdev->iwibdev; + + ret = ib_register_device(&iwibdev->ibdev, NULL); + if (ret) + goto error; + + for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) { + ret = + device_create_file(&iwibdev->ibdev.dev, + i40iw_dev_attributes[i]); + if (ret) { + while (i > 0) { + i--; + device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]); + } + ib_unregister_device(&iwibdev->ibdev); + goto error; + } + } + return 0; +error: + kfree(iwdev->iwibdev->ibdev.iwcm); + iwdev->iwibdev->ibdev.iwcm = NULL; + ib_dealloc_device(&iwdev->iwibdev->ibdev); + return -ENOSYS; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h new file mode 100644 index 000000000..1101f7708 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h @@ -0,0 +1,173 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_VERBS_H +#define I40IW_VERBS_H + +struct i40iw_ucontext { + struct ib_ucontext ibucontext; + struct i40iw_device *iwdev; + struct list_head cq_reg_mem_list; + spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */ + struct list_head qp_reg_mem_list; + spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */ +}; + +struct i40iw_pd { + struct ib_pd ibpd; + struct i40iw_sc_pd sc_pd; + atomic_t usecount; +}; + +struct i40iw_hmc_pble { + union { + u32 idx; + dma_addr_t addr; + }; +}; + +struct i40iw_cq_mr { + struct i40iw_hmc_pble cq_pbl; + dma_addr_t shadow; +}; + +struct i40iw_qp_mr { + struct i40iw_hmc_pble sq_pbl; + struct i40iw_hmc_pble rq_pbl; + dma_addr_t shadow; + struct page *sq_page; +}; + +struct i40iw_pbl { + struct list_head list; + union { + struct i40iw_qp_mr qp_mr; + struct i40iw_cq_mr cq_mr; + }; + + bool pbl_allocated; + u64 user_base; + struct i40iw_pble_alloc pble_alloc; + struct i40iw_mr *iwmr; +}; + +#define MAX_SAVE_PAGE_ADDRS 4 +struct i40iw_mr { + union { + struct ib_mr ibmr; + struct ib_mw ibmw; + struct ib_fmr ibfmr; + }; + struct ib_umem *region; + u16 type; + u32 page_cnt; + u32 stag; + u64 length; + u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS]; + struct i40iw_pbl iwpbl; +}; + +struct i40iw_cq { + struct ib_cq ibcq; + struct i40iw_sc_cq sc_cq; + u16 cq_head; + u16 cq_size; + u16 cq_number; + bool user_mode; + u32 polled_completions; + u32 cq_mem_size; + struct i40iw_dma_mem kmem; + spinlock_t lock; /* for poll cq */ + struct i40iw_pbl *iwpbl; +}; + +struct disconn_work { + struct work_struct work; + struct i40iw_qp *iwqp; +}; + +struct iw_cm_id; +struct ietf_mpa_frame; +struct i40iw_ud_file; + +struct i40iw_qp_kmode { + struct i40iw_dma_mem dma_mem; + u64 *wrid_mem; +}; + +struct i40iw_qp { + struct ib_qp ibqp; + struct i40iw_sc_qp sc_qp; + struct i40iw_device *iwdev; + struct i40iw_cq *iwscq; + struct i40iw_cq *iwrcq; + struct i40iw_pd *iwpd; + struct i40iw_qp_host_ctx_info ctx_info; + struct i40iwarp_offload_info iwarp_info; + void *allocated_buffer; + atomic_t refcount; + struct iw_cm_id *cm_id; + void *cm_node; + struct ib_mr *lsmm_mr; + struct work_struct work; + enum ib_qp_state ibqp_state; + u32 iwarp_state; + u32 qp_mem_size; + u32 last_aeq; + atomic_t close_timer_started; + spinlock_t lock; /* for post work requests */ + struct i40iw_qp_context *iwqp_context; + void *pbl_vbase; + dma_addr_t pbl_pbase; + struct page *page; + u8 active_conn:1; + u8 user_mode:1; + u8 hte_added:1; + u8 flush_issued:1; + u8 destroyed:1; + u8 sig_all:1; + u8 pau_mode:1; + u8 rsvd:1; + u16 term_sq_flush_code; + u16 term_rq_flush_code; + u8 hw_iwarp_state; + u8 hw_tcp_state; + struct i40iw_qp_kmode kqp; + struct i40iw_dma_mem host_ctx; + struct timer_list terminate_timer; + struct i40iw_pbl *iwpbl; + struct i40iw_dma_mem q2_ctx_mem; + struct i40iw_dma_mem ietf_mem; +}; +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c new file mode 100644 index 000000000..cb0f18340 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_vf.c @@ -0,0 +1,85 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_vf.h" + +/** + * i40iw_manage_vf_pble_bp - manage vf pble + * @cqp: cqp for cqp' sq wqe + * @info: pble info + * @scratch: pointer for completion + * @post_sq: to post and ring + */ +enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp, + struct i40iw_manage_vf_pble_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp, header, pd_pl_pba = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) | + LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) | + LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX); + set_64bit_val(wqe, 16, temp); + + header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) | + LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + set_64bit_val(wqe, 24, header); + + pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA); + set_64bit_val(wqe, 32, pd_pl_pba); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +struct i40iw_vf_cqp_ops iw_vf_cqp_ops = { + i40iw_manage_vf_pble_bp +}; diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h new file mode 100644 index 000000000..f649f3a62 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_vf.h @@ -0,0 +1,62 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_VF_H +#define I40IW_VF_H + +struct i40iw_sc_cqp; + +struct i40iw_manage_vf_pble_info { + u32 sd_index; + u16 first_pd_index; + u16 pd_entry_cnt; + u8 inv_pd_ent; + u64 pd_pl_pba; +}; + +struct i40iw_vf_cqp_ops { + enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *, + struct i40iw_manage_vf_pble_info *, + u64, + bool); +}; + +enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp, + struct i40iw_manage_vf_pble_info *info, + u64 scratch, + bool post_sq); + +extern struct i40iw_vf_cqp_ops iw_vf_cqp_ops; + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c new file mode 100644 index 000000000..6b68f7890 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c @@ -0,0 +1,748 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_virtchnl.h" + +/** + * vchnl_vf_send_get_ver_req - Request Channel version + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + + if (!dev->vchnl_up) + return ret_code; + + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg); + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + + if (!dev->vchnl_up) + return ret_code; + + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg); + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + + if (!dev->vchnl_up) + return ret_code; + + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1; + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_add_hmc_objs_req - Add HMC objects + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj; + + if (!dev->vchnl_up) + return ret_code; + + add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + memset(add_hmc_obj, 0, sizeof(*add_hmc_obj)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1; + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0; + add_hmc_obj->obj_type = (u16)rsrc_type; + add_hmc_obj->start_index = start_index; + add_hmc_obj->obj_count = rsrc_count; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_del_hmc_objs_req - del HMC objects + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + * @ rsrc_type - resource type to delete + * @ start_index - starting index for resource + * @ rsrc_count - number of resource type to delete + */ +static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj; + + if (!dev->vchnl_up) + return ret_code; + + add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + memset(add_hmc_obj, 0, sizeof(*add_hmc_obj)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1; + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0; + add_hmc_obj->obj_type = (u16)rsrc_type; + add_hmc_obj->start_index = start_index; + add_hmc_obj->obj_count = rsrc_count; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_pf_send_get_ver_resp - Send channel version to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + */ +static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev, + u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(*resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS; + *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + */ +static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev, + u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg, + u16 hmc_fcn) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(*resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS; + *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + * @hw_stats: HW Stats struct + */ + +static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev, + u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg, + struct i40iw_dev_hw_stats hw_stats) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(*resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS; + *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = hw_stats; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * vchnl_pf_send_error_resp - Send an error response to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + */ +static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg, + u16 op_ret_code) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn + * @cqp_req_param: CQP Request param value + * @not_used: unused CQP callback parameter + */ +static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param, + struct i40iw_ccq_cqe_info *cqe_info) +{ + struct i40iw_vfdev *vf_dev = callback_param; + struct i40iw_virt_mem vf_dev_mem; + + if (cqe_info->error) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n", + cqe_info->maj_err_code, cqe_info->min_err_code); + dev->vf_dev[vf_dev->iw_vf_idx] = NULL; + vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg, + (u16)I40IW_ERR_CQP_COMPL_ERROR); + vf_dev_mem.va = vf_dev; + vf_dev_mem.size = sizeof(*vf_dev); + i40iw_free_virt_mem(dev->hw, &vf_dev_mem); + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "CQP Completion Operation Return information = 0x%08x\n", + cqe_info->op_ret_val); + vf_dev->pmf_index = (u16)cqe_info->op_ret_val; + vf_dev->msg_count--; + vchnl_pf_send_get_hmc_fcn_resp(dev, + vf_dev->vf_id, + &vf_dev->vf_msg_buffer.vchnl_msg, + vf_dev->pmf_index); + } +} + +/** + * pf_add_hmc_obj - Callback for Add HMC Object + * @vf_dev: pointer to the VF Device + */ +static void pf_add_hmc_obj_callback(void *work_vf_dev) +{ + struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev; + struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info; + struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg; + struct i40iw_hmc_create_obj_info info; + struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj; + enum i40iw_status_code ret_code; + + if (!vf_dev->pf_hmc_initialized) { + ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL); + if (ret_code) + goto add_out; + vf_dev->pf_hmc_initialized = true; + } + + add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + + memset(&info, 0, sizeof(info)); + info.hmc_info = hmc_info; + info.is_pf = false; + info.rsrc_type = (u32)add_hmc_obj->obj_type; + info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT; + info.start_idx = add_hmc_obj->start_index; + info.count = add_hmc_obj->obj_count; + i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT, + "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n", + info.count, info.rsrc_type); + ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info); + if (!ret_code) + vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count; +add_out: + vf_dev->msg_count--; + vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code); +} + +/** + * pf_del_hmc_obj_callback - Callback for delete HMC Object + * @work_vf_dev: pointer to the VF Device + */ +static void pf_del_hmc_obj_callback(void *work_vf_dev) +{ + struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev; + struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info; + struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg; + struct i40iw_hmc_del_obj_info info; + struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj; + enum i40iw_status_code ret_code = I40IW_SUCCESS; + + if (!vf_dev->pf_hmc_initialized) + goto del_out; + + del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + + memset(&info, 0, sizeof(info)); + info.hmc_info = hmc_info; + info.is_pf = false; + info.rsrc_type = (u32)del_hmc_obj->obj_type; + info.start_idx = del_hmc_obj->start_index; + info.count = del_hmc_obj->obj_count; + i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT, + "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n", + info.count, info.rsrc_type); + ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false); +del_out: + vf_dev->msg_count--; + vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code); +} + +/** + * i40iw_vchnl_recv_pf - Receive PF virtual channel messages + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @msg: Virtual channel message buffer pointer + * @len: Length of the virtual channels message + */ +enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg; + struct i40iw_vfdev *vf_dev = NULL; + struct i40iw_hmc_fcn_info hmc_fcn_info; + u16 iw_vf_idx; + u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT; + struct i40iw_virt_mem vf_dev_mem; + struct i40iw_virtchnl_work_info work_info; + struct i40iw_dev_pestat *devstat; + enum i40iw_status_code ret_code; + unsigned long flags; + + if (!dev || !msg || !len) + return I40IW_ERR_PARAM; + + if (!dev->vchnl_up) + return I40IW_ERR_NOT_READY; + if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { + if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); + else + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); + return I40IW_SUCCESS; + } + for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; + iw_vf_idx++) { + if (!dev->vf_dev[iw_vf_idx]) { + if (first_avail_iw_vf == + I40IW_MAX_PE_ENABLED_VF_COUNT) + first_avail_iw_vf = iw_vf_idx; + continue; + } + if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) { + vf_dev = dev->vf_dev[iw_vf_idx]; + break; + } + } + if (vf_dev) { + if (!vf_dev->msg_count) { + vf_dev->msg_count++; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u already has a channel message in progress.\n", + vf_id); + return I40IW_SUCCESS; + } + } + switch (vchnl_msg->iw_op_code) { + case I40IW_VCHNL_OP_GET_HMC_FCN: + if (!vf_dev && + (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) { + ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) + + (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX)); + if (!ret_code) { + vf_dev = vf_dev_mem.va; + vf_dev->stats_initialized = false; + vf_dev->pf_dev = dev; + vf_dev->msg_count = 1; + vf_dev->vf_id = vf_id; + vf_dev->iw_vf_idx = first_avail_iw_vf; + vf_dev->pf_hmc_initialized = false; + vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]); + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "vf_dev %p, hmc_info %p, hmc_obj %p\n", + vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj); + dev->vf_dev[first_avail_iw_vf] = vf_dev; + iw_vf_idx = first_avail_iw_vf; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u Unable to allocate a VF device structure.\n", + vf_id); + vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY); + return I40IW_SUCCESS; + } + memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len); + hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback; + hmc_fcn_info.vf_id = vf_id; + hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx; + hmc_fcn_info.cqp_callback_param = vf_dev; + hmc_fcn_info.free_fcn = false; + ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u error CQP HMC Function operation.\n", + vf_id); + ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u - i40iw_device_init_pestat failed\n", + vf_id); + vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat, + (u8)vf_dev->pmf_index, + dev->hw, false); + vf_dev->stats_initialized = true; + } else { + if (vf_dev) { + vf_dev->msg_count--; + vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index); + } else { + vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, + (u16)I40IW_ERR_NO_MEMORY); + } + } + break; + case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE: + if (!vf_dev) + return I40IW_ERR_BAD_PTR; + work_info.worker_vf_dev = vf_dev; + work_info.callback_fcn = pf_add_hmc_obj_callback; + memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len); + i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx); + break; + case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE: + if (!vf_dev) + return I40IW_ERR_BAD_PTR; + work_info.worker_vf_dev = vf_dev; + work_info.callback_fcn = pf_del_hmc_obj_callback; + memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len); + i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx); + break; + case I40IW_VCHNL_OP_GET_STATS: + if (!vf_dev) + return I40IW_ERR_BAD_PTR; + devstat = &vf_dev->dev_pestat; + spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags); + devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats); + spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags); + vf_dev->msg_count--; + vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats); + break; + default: + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n", + vchnl_msg->iw_op_code); + vchnl_pf_send_error_resp(dev, vf_id, + vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED); + } + return I40IW_SUCCESS; +} + +/** + * i40iw_vchnl_recv_vf - Receive VF virtual channel messages + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @msg: Virtual channel message buffer pointer + * @len: Length of the virtual channels message + */ +enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg; + struct i40iw_virtchnl_req *vchnl_req; + + vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx; + vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code; + if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) { + if (vchnl_req->parm_len && vchnl_req->parm) + memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len); + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: Got response, data size %u\n", __func__, + vchnl_req->parm_len); + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: error length on response, Got %u, expected %u\n", __func__, + len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)); + } + + return I40IW_SUCCESS; +} + +/** + * i40iw_vchnl_vf_get_ver - Request Channel version + * @dev: IWARP device pointer + * @vchnl_ver: Virtual channel message version pointer + */ +enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev, + u32 *vchnl_ver) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.parm = vchnl_ver; + vchnl_req.parm_len = sizeof(*vchnl_ver); + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req); + if (!ret_code) { + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (!ret_code) + ret_code = vchnl_req.ret_code; + else + dev->vchnl_up = false; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + } + return ret_code; +} + +/** + * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function + * @dev: IWARP device pointer + * @hmc_fcn: HMC function index pointer + */ +enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev, + u16 *hmc_fcn) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.parm = hmc_fcn; + vchnl_req.parm_len = sizeof(*hmc_fcn); + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req); + if (!ret_code) { + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (!ret_code) + ret_code = vchnl_req.ret_code; + else + dev->vchnl_up = false; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + } + return ret_code; +} + +/** + * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object + * @dev: IWARP device pointer + * @rsrc_type: HMC Resource type + * @start_index: Starting index of the objects to be added + * @rsrc_count: Number of resources to be added + */ +enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + ret_code = vchnl_vf_send_add_hmc_objs_req(dev, + &vchnl_req, + rsrc_type, + start_index, + rsrc_count); + if (!ret_code) { + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (!ret_code) + ret_code = vchnl_req.ret_code; + else + dev->vchnl_up = false; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + } + return ret_code; +} + +/** + * i40iw_vchnl_vf_del_hmc_obj - del HMC obj + * @dev: IWARP device pointer + * @rsrc_type: HMC Resource type + * @start_index: Starting index of the object to delete + * @rsrc_count: Number of resources to be delete + */ +enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + ret_code = vchnl_vf_send_del_hmc_objs_req(dev, + &vchnl_req, + rsrc_type, + start_index, + rsrc_count); + if (!ret_code) { + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (!ret_code) + ret_code = vchnl_req.ret_code; + else + dev->vchnl_up = false; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + } + return ret_code; +} + +/** + * i40iw_vchnl_vf_get_pe_stats - Get PE stats + * @dev: IWARP device pointer + * @hw_stats: HW stats struct + */ +enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev, + struct i40iw_dev_hw_stats *hw_stats) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.parm = hw_stats; + vchnl_req.parm_len = sizeof(*hw_stats); + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req); + if (!ret_code) { + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (!ret_code) + ret_code = vchnl_req.ret_code; + else + dev->vchnl_up = false; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + } + return ret_code; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h new file mode 100644 index 000000000..24886ef08 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h @@ -0,0 +1,124 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_VIRTCHNL_H +#define I40IW_VIRTCHNL_H + +#include "i40iw_hmc.h" + +#pragma pack(push, 1) + +struct i40iw_virtchnl_op_buf { + u16 iw_op_code; + u16 iw_op_ver; + u16 iw_chnl_buf_len; + u16 rsvd; + u64 iw_chnl_op_ctx; + /* Member alignment MUST be maintained above this location */ + u8 iw_chnl_buf[1]; +}; + +struct i40iw_virtchnl_resp_buf { + u64 iw_chnl_op_ctx; + u16 iw_chnl_buf_len; + s16 iw_op_ret_code; + /* Member alignment MUST be maintained above this location */ + u16 rsvd[2]; + u8 iw_chnl_buf[1]; +}; + +enum i40iw_virtchnl_ops { + I40IW_VCHNL_OP_GET_VER = 0, + I40IW_VCHNL_OP_GET_HMC_FCN, + I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE, + I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE, + I40IW_VCHNL_OP_GET_STATS +}; + +#define I40IW_VCHNL_OP_GET_VER_V0 0 +#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0 +#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0 +#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0 +#define I40IW_VCHNL_OP_GET_STATS_V0 0 +#define I40IW_VCHNL_CHNL_VER_V0 0 + +struct i40iw_dev_hw_stats; + +struct i40iw_virtchnl_hmc_obj_range { + u16 obj_type; + u16 rsvd; + u32 start_index; + u32 obj_count; +}; + +enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len); + +enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len); + +struct i40iw_virtchnl_req { + struct i40iw_sc_dev *dev; + struct i40iw_virtchnl_op_buf *vchnl_msg; + void *parm; + u32 vf_id; + u16 parm_len; + s16 ret_code; +}; + +#pragma pack(pop) + +enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev, + u32 *vchnl_ver); + +enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev, + u16 *hmc_fcn); + +enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count); + +enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count); + +enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev, + struct i40iw_dev_hw_stats *hw_stats); +#endif diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig index fc01deac1..db4aa13eb 100644 --- a/drivers/infiniband/hw/mlx4/Kconfig +++ b/drivers/infiniband/hw/mlx4/Kconfig @@ -1,6 +1,7 @@ config MLX4_INFINIBAND tristate "Mellanox ConnectX HCA support" depends on NETDEVICES && ETHERNET && PCI && INET + depends on MAY_USE_DEVLINK select NET_VENDOR_MELLANOX select MLX4_CORE ---help--- diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 21cb41a60..c74ef2620 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status, if (status) { pr_debug("(port: %d) failed: status = %d\n", cb_ctx->port, status); - rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC; + rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC; goto out; } @@ -416,7 +416,7 @@ next_entry: be64_to_cpu((__force __be64)rec->guid_indexes), be64_to_cpu((__force __be64)applied_guid_indexes), be64_to_cpu((__force __be64)declined_guid_indexes)); - rec->time_to_run = ktime_get_real_ns() + + rec->time_to_run = ktime_get_boot_ns() + resched_delay_sec * NSEC_PER_SEC; } else { rec->status = MLX4_GUID_INFO_STATUS_SET; @@ -708,7 +708,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port, } } if (resched_delay_sec) { - u64 curr_time = ktime_get_real_ns(); + u64 curr_time = ktime_get_boot_ns(); *resched_delay_sec = (low_record_time < curr_time) ? 0 : div_u64((low_record_time - curr_time), NSEC_PER_SEC); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1c7ab6cab..f014eaf59 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -41,6 +41,7 @@ #include <linux/if_vlan.h> #include <net/ipv6.h> #include <net/addrconf.h> +#include <net/devlink.h> #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> @@ -1643,6 +1644,56 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_ return err; } +static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev, + struct ib_flow_attr *flow_attr, + enum mlx4_net_trans_promisc_mode *type) +{ + int err = 0; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) || + (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) || + (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) { + return -EOPNOTSUPP; + } + + if (flow_attr->num_of_specs == 0) { + type[0] = MLX4_FS_MC_SNIFFER; + type[1] = MLX4_FS_UC_SNIFFER; + } else { + union ib_flow_spec *ib_spec; + + ib_spec = (union ib_flow_spec *)(flow_attr + 1); + if (ib_spec->type != IB_FLOW_SPEC_ETH) + return -EINVAL; + + /* if all is zero than MC and UC */ + if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) { + type[0] = MLX4_FS_MC_SNIFFER; + type[1] = MLX4_FS_UC_SNIFFER; + } else { + u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01, + ib_spec->eth.mask.dst_mac[1], + ib_spec->eth.mask.dst_mac[2], + ib_spec->eth.mask.dst_mac[3], + ib_spec->eth.mask.dst_mac[4], + ib_spec->eth.mask.dst_mac[5]}; + + /* Above xor was only on MC bit, non empty mask is valid + * only if this bit is set and rest are zero. + */ + if (!is_zero_ether_addr(&mac[0])) + return -EINVAL; + + if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac)) + type[0] = MLX4_FS_MC_SNIFFER; + else + type[0] = MLX4_FS_UC_SNIFFER; + } + } + + return err; +} + static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain) @@ -1653,6 +1704,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct mlx4_dev *dev = (to_mdev(qp->device))->dev; int is_bonded = mlx4_is_bonded(dev); + if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && + (flow_attr->type != IB_FLOW_ATTR_NORMAL)) + return ERR_PTR(-EOPNOTSUPP); + memset(type, 0, sizeof(type)); mflow = kzalloc(sizeof(*mflow), GFP_KERNEL); @@ -1663,7 +1718,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, switch (flow_attr->type) { case IB_FLOW_ATTR_NORMAL: - type[0] = MLX4_FS_REGULAR; + /* If dont trap flag (continue match) is set, under specific + * condition traffic be replicated to given qp, + * without stealing it + */ + if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) { + err = mlx4_ib_add_dont_trap_rule(dev, + flow_attr, + type); + if (err) + goto err_free; + } else { + type[0] = MLX4_FS_REGULAR; + } break; case IB_FLOW_ATTR_ALL_DEFAULT: @@ -1675,8 +1742,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, break; case IB_FLOW_ATTR_SNIFFER: - type[0] = MLX4_FS_UC_SNIFFER; - type[1] = MLX4_FS_MC_SNIFFER; + type[0] = MLX4_FS_MIRROR_RX_PORT; + type[1] = MLX4_FS_MIRROR_SX_PORT; break; default: @@ -2519,6 +2586,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) } ibdev->ib_active = true; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i), + &ibdev->ib_dev); if (mlx4_is_mfunc(ibdev->dev)) init_pkeys(ibdev); @@ -2643,7 +2713,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) { struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; + int i; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); ibdev->ib_active = false; flush_workqueue(wq); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 52ce7b000..1eca01ceb 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -711,7 +711,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata); int mlx4_ib_dereg_mr(struct ib_mr *mr); -struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); +struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata); int mlx4_ib_dealloc_mw(struct ib_mw *mw); struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index 242b94ec1..ce0b5aa8e 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -32,6 +32,7 @@ */ #include <linux/slab.h> +#include <rdma/ib_user_verbs.h> #include "mlx4_ib.h" @@ -334,7 +335,8 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr) return 0; } -struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) +struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata) { struct mlx4_ib_dev *dev = to_mdev(pd->device); struct mlx4_ib_mw *mw; diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile index 27a70159e..7493a83ac 100644 --- a/drivers/infiniband/hw/mlx5/Makefile +++ b/drivers/infiniband/hw/mlx5/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o -mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o +mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index fd1de31e0..a00ba4418 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -207,7 +207,10 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, break; case MLX5_CQE_RESP_SEND: wc->opcode = IB_WC_RECV; - wc->wc_flags = 0; + wc->wc_flags = IB_WC_IP_CSUM_OK; + if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) && + (cqe->hds_ip_ext & CQE_L4_OK)))) + wc->wc_flags = 0; break; case MLX5_CQE_RESP_SEND_IMM: wc->opcode = IB_WC_RECV; @@ -431,7 +434,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, struct mlx5_core_qp *mqp; struct mlx5_ib_wq *wq; struct mlx5_sig_err_cqe *sig_err_cqe; - struct mlx5_core_mr *mmr; + struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr; uint8_t opcode; uint32_t qpn; @@ -536,17 +539,17 @@ repoll: case MLX5_CQE_SIG_ERR: sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64; - read_lock(&dev->mdev->priv.mr_table.lock); - mmr = __mlx5_mr_lookup(dev->mdev, - mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); - if (unlikely(!mmr)) { - read_unlock(&dev->mdev->priv.mr_table.lock); + read_lock(&dev->mdev->priv.mkey_table.lock); + mmkey = __mlx5_mr_lookup(dev->mdev, + mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); + if (unlikely(!mmkey)) { + read_unlock(&dev->mdev->priv.mkey_table.lock); mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n", cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey)); return -EINVAL; } - mr = to_mibmr(mmr); + mr = to_mibmr(mmkey); get_sig_err_item(sig_err_cqe, &mr->sig->err_item); mr->sig->sig_err_exists = true; mr->sig->sigerr_count++; @@ -558,25 +561,51 @@ repoll: mr->sig->err_item.expected, mr->sig->err_item.actual); - read_unlock(&dev->mdev->priv.mr_table.lock); + read_unlock(&dev->mdev->priv.mkey_table.lock); goto repoll; } return 0; } +static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, + struct ib_wc *wc) +{ + struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); + struct mlx5_ib_wc *soft_wc, *next; + int npolled = 0; + + list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { + if (npolled >= num_entries) + break; + + mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n", + cq->mcq.cqn); + + wc[npolled++] = soft_wc->wc; + list_del(&soft_wc->list); + kfree(soft_wc); + } + + return npolled; +} + int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { struct mlx5_ib_cq *cq = to_mcq(ibcq); struct mlx5_ib_qp *cur_qp = NULL; unsigned long flags; + int soft_polled = 0; int npolled; int err = 0; spin_lock_irqsave(&cq->lock, flags); - for (npolled = 0; npolled < num_entries; npolled++) { - err = mlx5_poll_one(cq, &cur_qp, wc + npolled); + if (unlikely(!list_empty(&cq->wc_list))) + soft_polled = poll_soft_wc(cq, num_entries, wc); + + for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { + err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); if (err) break; } @@ -587,7 +616,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) spin_unlock_irqrestore(&cq->lock, flags); if (err == 0 || err == -EAGAIN) - return npolled; + return soft_polled + npolled; else return err; } @@ -595,16 +624,27 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev; + struct mlx5_ib_cq *cq = to_mcq(ibcq); void __iomem *uar_page = mdev->priv.uuari.uars[0].map; + unsigned long irq_flags; + int ret = 0; + + spin_lock_irqsave(&cq->lock, irq_flags); + if (cq->notify_flags != IB_CQ_NEXT_COMP) + cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; - mlx5_cq_arm(&to_mcq(ibcq)->mcq, + if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) + ret = 1; + spin_unlock_irqrestore(&cq->lock, irq_flags); + + mlx5_cq_arm(&cq->mcq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT, uar_page, MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock), to_mcq(ibcq)->mcq.cons_index); - return 0; + return ret; } static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, @@ -757,6 +797,14 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) mlx5_db_free(dev->mdev, &cq->db); } +static void notify_soft_wc_handler(struct work_struct *work) +{ + struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, + notify_work); + + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_ucontext *context, @@ -807,6 +855,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, &index, &inlen); if (err) goto err_create; + + INIT_WORK(&cq->notify_work, notify_soft_wc_handler); } cq->cqe_size = cqe_size; @@ -832,6 +882,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, cq->mcq.comp = mlx5_ib_cq_comp; cq->mcq.event = mlx5_ib_cq_event; + INIT_LIST_HEAD(&cq->wc_list); + if (context) if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { err = -EFAULT; @@ -1219,3 +1271,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq) cq = to_mcq(ibcq); return cq->cqe_size; } + +/* Called from atomic context */ +int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc) +{ + struct mlx5_ib_wc *soft_wc; + struct mlx5_ib_cq *cq = to_mcq(ibcq); + unsigned long flags; + + soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC); + if (!soft_wc) + return -ENOMEM; + + soft_wc->wc = *wc; + spin_lock_irqsave(&cq->lock, flags); + list_add_tail(&soft_wc->list, &cq->wc_list); + if (cq->notify_flags == IB_CQ_NEXT_COMP || + wc->status != IB_WC_SUCCESS) { + cq->notify_flags = 0; + schedule_work(&cq->notify_work); + } + spin_unlock_irqrestore(&cq->lock, flags); + + return 0; +} diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c new file mode 100644 index 000000000..53e03c8ed --- /dev/null +++ b/drivers/infiniband/hw/mlx5/gsi.c @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "mlx5_ib.h" + +struct mlx5_ib_gsi_wr { + struct ib_cqe cqe; + struct ib_wc wc; + int send_flags; + bool completed:1; +}; + +struct mlx5_ib_gsi_qp { + struct ib_qp ibqp; + struct ib_qp *rx_qp; + u8 port_num; + struct ib_qp_cap cap; + enum ib_sig_type sq_sig_type; + /* Serialize qp state modifications */ + struct mutex mutex; + struct ib_cq *cq; + struct mlx5_ib_gsi_wr *outstanding_wrs; + u32 outstanding_pi, outstanding_ci; + int num_qps; + /* Protects access to the tx_qps. Post send operations synchronize + * with tx_qp creation in setup_qp(). Also protects the + * outstanding_wrs array and indices. + */ + spinlock_t lock; + struct ib_qp **tx_qps; +}; + +static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp) +{ + return container_of(qp, struct mlx5_ib_gsi_qp, ibqp); +} + +static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev) +{ + return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn); +} + +static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index) +{ + return ++index % gsi->cap.max_send_wr; +} + +#define for_each_outstanding_wr(gsi, index) \ + for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \ + index = next_outstanding(gsi, index)) + +/* Call with gsi->lock locked */ +static void generate_completions(struct mlx5_ib_gsi_qp *gsi) +{ + struct ib_cq *gsi_cq = gsi->ibqp.send_cq; + struct mlx5_ib_gsi_wr *wr; + u32 index; + + for_each_outstanding_wr(gsi, index) { + wr = &gsi->outstanding_wrs[index]; + + if (!wr->completed) + break; + + if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR || + wr->send_flags & IB_SEND_SIGNALED) + WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc)); + + wr->completed = false; + } + + gsi->outstanding_ci = index; +} + +static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc) +{ + struct mlx5_ib_gsi_qp *gsi = cq->cq_context; + struct mlx5_ib_gsi_wr *wr = + container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe); + u64 wr_id; + unsigned long flags; + + spin_lock_irqsave(&gsi->lock, flags); + wr->completed = true; + wr_id = wr->wc.wr_id; + wr->wc = *wc; + wr->wc.wr_id = wr_id; + wr->wc.qp = &gsi->ibqp; + + generate_completions(gsi); + spin_unlock_irqrestore(&gsi->lock, flags); +} + +struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_ib_gsi_qp *gsi; + struct ib_qp_init_attr hw_init_attr = *init_attr; + const u8 port_num = init_attr->port_num; + const int num_pkeys = pd->device->attrs.max_pkeys; + const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0; + int ret; + + mlx5_ib_dbg(dev, "creating GSI QP\n"); + + if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) { + mlx5_ib_warn(dev, + "invalid port number %d during GSI QP creation\n", + port_num); + return ERR_PTR(-EINVAL); + } + + gsi = kzalloc(sizeof(*gsi), GFP_KERNEL); + if (!gsi) + return ERR_PTR(-ENOMEM); + + gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL); + if (!gsi->tx_qps) { + ret = -ENOMEM; + goto err_free; + } + + gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr, + sizeof(*gsi->outstanding_wrs), + GFP_KERNEL); + if (!gsi->outstanding_wrs) { + ret = -ENOMEM; + goto err_free_tx; + } + + mutex_init(&gsi->mutex); + + mutex_lock(&dev->devr.mutex); + + if (dev->devr.ports[port_num - 1].gsi) { + mlx5_ib_warn(dev, "GSI QP already exists on port %d\n", + port_num); + ret = -EBUSY; + goto err_free_wrs; + } + gsi->num_qps = num_qps; + spin_lock_init(&gsi->lock); + + gsi->cap = init_attr->cap; + gsi->sq_sig_type = init_attr->sq_sig_type; + gsi->ibqp.qp_num = 1; + gsi->port_num = port_num; + + gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0, + IB_POLL_SOFTIRQ); + if (IS_ERR(gsi->cq)) { + mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n", + PTR_ERR(gsi->cq)); + ret = PTR_ERR(gsi->cq); + goto err_free_wrs; + } + + hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI; + hw_init_attr.send_cq = gsi->cq; + if (num_qps) { + hw_init_attr.cap.max_send_wr = 0; + hw_init_attr.cap.max_send_sge = 0; + hw_init_attr.cap.max_inline_data = 0; + } + gsi->rx_qp = ib_create_qp(pd, &hw_init_attr); + if (IS_ERR(gsi->rx_qp)) { + mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n", + PTR_ERR(gsi->rx_qp)); + ret = PTR_ERR(gsi->rx_qp); + goto err_destroy_cq; + } + + dev->devr.ports[init_attr->port_num - 1].gsi = gsi; + + mutex_unlock(&dev->devr.mutex); + + return &gsi->ibqp; + +err_destroy_cq: + ib_free_cq(gsi->cq); +err_free_wrs: + mutex_unlock(&dev->devr.mutex); + kfree(gsi->outstanding_wrs); +err_free_tx: + kfree(gsi->tx_qps); +err_free: + kfree(gsi); + return ERR_PTR(ret); +} + +int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->device); + struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); + const int port_num = gsi->port_num; + int qp_index; + int ret; + + mlx5_ib_dbg(dev, "destroying GSI QP\n"); + + mutex_lock(&dev->devr.mutex); + ret = ib_destroy_qp(gsi->rx_qp); + if (ret) { + mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n", + ret); + mutex_unlock(&dev->devr.mutex); + return ret; + } + dev->devr.ports[port_num - 1].gsi = NULL; + mutex_unlock(&dev->devr.mutex); + gsi->rx_qp = NULL; + + for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) { + if (!gsi->tx_qps[qp_index]) + continue; + WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index])); + gsi->tx_qps[qp_index] = NULL; + } + + ib_free_cq(gsi->cq); + + kfree(gsi->outstanding_wrs); + kfree(gsi->tx_qps); + kfree(gsi); + + return 0; +} + +static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi) +{ + struct ib_pd *pd = gsi->rx_qp->pd; + struct ib_qp_init_attr init_attr = { + .event_handler = gsi->rx_qp->event_handler, + .qp_context = gsi->rx_qp->qp_context, + .send_cq = gsi->cq, + .recv_cq = gsi->rx_qp->recv_cq, + .cap = { + .max_send_wr = gsi->cap.max_send_wr, + .max_send_sge = gsi->cap.max_send_sge, + .max_inline_data = gsi->cap.max_inline_data, + }, + .sq_sig_type = gsi->sq_sig_type, + .qp_type = IB_QPT_UD, + .create_flags = mlx5_ib_create_qp_sqpn_qp1(), + }; + + return ib_create_qp(pd, &init_attr); +} + +static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp, + u16 qp_index) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->device); + struct ib_qp_attr attr; + int mask; + int ret; + + mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT; + attr.qp_state = IB_QPS_INIT; + attr.pkey_index = qp_index; + attr.qkey = IB_QP1_QKEY; + attr.port_num = gsi->port_num; + ret = ib_modify_qp(qp, &attr, mask); + if (ret) { + mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n", + qp->qp_num, ret); + return ret; + } + + attr.qp_state = IB_QPS_RTR; + ret = ib_modify_qp(qp, &attr, IB_QP_STATE); + if (ret) { + mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n", + qp->qp_num, ret); + return ret; + } + + attr.qp_state = IB_QPS_RTS; + attr.sq_psn = 0; + ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN); + if (ret) { + mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n", + qp->qp_num, ret); + return ret; + } + + return 0; +} + +static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index) +{ + struct ib_device *device = gsi->rx_qp->device; + struct mlx5_ib_dev *dev = to_mdev(device); + struct ib_qp *qp; + unsigned long flags; + u16 pkey; + int ret; + + ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey); + if (ret) { + mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n", + gsi->port_num, qp_index); + return; + } + + if (!pkey) { + mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n", + gsi->port_num, qp_index); + return; + } + + spin_lock_irqsave(&gsi->lock, flags); + qp = gsi->tx_qps[qp_index]; + spin_unlock_irqrestore(&gsi->lock, flags); + if (qp) { + mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n", + gsi->port_num, qp_index); + return; + } + + qp = create_gsi_ud_qp(gsi); + if (IS_ERR(qp)) { + mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n", + PTR_ERR(qp)); + return; + } + + ret = modify_to_rts(gsi, qp, qp_index); + if (ret) + goto err_destroy_qp; + + spin_lock_irqsave(&gsi->lock, flags); + WARN_ON_ONCE(gsi->tx_qps[qp_index]); + gsi->tx_qps[qp_index] = qp; + spin_unlock_irqrestore(&gsi->lock, flags); + + return; + +err_destroy_qp: + WARN_ON_ONCE(qp); +} + +static void setup_qps(struct mlx5_ib_gsi_qp *gsi) +{ + u16 qp_index; + + for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) + setup_qp(gsi, qp_index); +} + +int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, + int attr_mask) +{ + struct mlx5_ib_dev *dev = to_mdev(qp->device); + struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); + int ret; + + mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state); + + mutex_lock(&gsi->mutex); + ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask); + if (ret) { + mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret); + goto unlock; + } + + if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS) + setup_qps(gsi); + +unlock: + mutex_unlock(&gsi->mutex); + + return ret; +} + +int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); + int ret; + + mutex_lock(&gsi->mutex); + ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr); + qp_init_attr->cap = gsi->cap; + mutex_unlock(&gsi->mutex); + + return ret; +} + +/* Call with gsi->lock locked */ +static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi, + struct ib_ud_wr *wr, struct ib_wc *wc) +{ + struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); + struct mlx5_ib_gsi_wr *gsi_wr; + + if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) { + mlx5_ib_warn(dev, "no available GSI work request.\n"); + return -ENOMEM; + } + + gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi]; + gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi); + + if (!wc) { + memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc)); + gsi_wr->wc.pkey_index = wr->pkey_index; + gsi_wr->wc.wr_id = wr->wr.wr_id; + } else { + gsi_wr->wc = *wc; + gsi_wr->completed = true; + } + + gsi_wr->cqe.done = &handle_single_completion; + wr->wr.wr_cqe = &gsi_wr->cqe; + + return 0; +} + +/* Call with gsi->lock locked */ +static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi, + struct ib_ud_wr *wr) +{ + struct ib_wc wc = { + { .wr_id = wr->wr.wr_id }, + .status = IB_WC_SUCCESS, + .opcode = IB_WC_SEND, + .qp = &gsi->ibqp, + }; + int ret; + + ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc); + if (ret) + return ret; + + generate_completions(gsi); + + return 0; +} + +/* Call with gsi->lock locked */ +static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr) +{ + struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device); + int qp_index = wr->pkey_index; + + if (!mlx5_ib_deth_sqpn_cap(dev)) + return gsi->rx_qp; + + if (qp_index >= gsi->num_qps) + return NULL; + + return gsi->tx_qps[qp_index]; +} + +int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr) +{ + struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); + struct ib_qp *tx_qp; + unsigned long flags; + int ret; + + for (; wr; wr = wr->next) { + struct ib_ud_wr cur_wr = *ud_wr(wr); + + cur_wr.wr.next = NULL; + + spin_lock_irqsave(&gsi->lock, flags); + tx_qp = get_tx_qp(gsi, &cur_wr); + if (!tx_qp) { + ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr); + if (ret) + goto err; + spin_unlock_irqrestore(&gsi->lock, flags); + continue; + } + + ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL); + if (ret) + goto err; + + ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr); + if (ret) { + /* Undo the effect of adding the outstanding wr */ + gsi->outstanding_pi = (gsi->outstanding_pi - 1) % + gsi->cap.max_send_wr; + goto err; + } + spin_unlock_irqrestore(&gsi->lock, flags); + } + + return 0; + +err: + spin_unlock_irqrestore(&gsi->lock, flags); + *bad_wr = wr; + return ret; +} + +int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr) +{ + struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); + + return ib_post_recv(gsi->rx_qp, wr, bad_wr); +} + +void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi) +{ + if (!gsi) + return; + + mutex_lock(&gsi->mutex); + setup_qps(gsi); + mutex_unlock(&gsi->mutex); +} diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c new file mode 100644 index 000000000..c1b9de800 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/ib_virt.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/mlx5/vport.h> +#include "mlx5_ib.h" + +static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy) +{ + switch (mlx_policy) { + case MLX5_POLICY_DOWN: + return IFLA_VF_LINK_STATE_DISABLE; + case MLX5_POLICY_UP: + return IFLA_VF_LINK_STATE_ENABLE; + case MLX5_POLICY_FOLLOW: + return IFLA_VF_LINK_STATE_AUTO; + default: + return __IFLA_VF_LINK_STATE_MAX; + } +} + +int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port, + struct ifla_vf_info *info) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_hca_vport_context *rep; + int err; + + rep = kzalloc(sizeof(*rep), GFP_KERNEL); + if (!rep) + return -ENOMEM; + + err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep); + if (err) { + mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n", + vf, err); + goto free; + } + memset(info, 0, sizeof(*info)); + info->linkstate = mlx_to_net_policy(rep->policy); + if (info->linkstate == __IFLA_VF_LINK_STATE_MAX) + err = -EINVAL; + +free: + kfree(rep); + return err; +} + +static inline enum port_state_policy net_to_mlx_policy(int policy) +{ + switch (policy) { + case IFLA_VF_LINK_STATE_DISABLE: + return MLX5_POLICY_DOWN; + case IFLA_VF_LINK_STATE_ENABLE: + return MLX5_POLICY_UP; + case IFLA_VF_LINK_STATE_AUTO: + return MLX5_POLICY_FOLLOW; + default: + return MLX5_POLICY_INVALID; + } +} + +int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, + u8 port, int state) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_hca_vport_context *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->policy = net_to_mlx_policy(state); + if (in->policy == MLX5_POLICY_INVALID) { + err = -EINVAL; + goto out; + } + in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY; + err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); + +out: + kfree(in); + return err; +} + +int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, + u8 port, struct ifla_vf_stats *stats) +{ + int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); + struct mlx5_core_dev *mdev; + struct mlx5_ib_dev *dev; + void *out; + int err; + + dev = to_mdev(device); + mdev = dev->mdev; + + out = kzalloc(out_sz, GFP_KERNEL); + if (!out) + return -ENOMEM; + + err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz); + if (err) + goto ex; + + stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets); + stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets); + stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets); + stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets); + stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets); + +ex: + kfree(out); + return err; +} + +static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_hca_vport_context *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID; + in->node_guid = guid; + err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); + kfree(in); + return err; +} + +static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + struct mlx5_core_dev *mdev = dev->mdev; + struct mlx5_hca_vport_context *in; + int err; + + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID; + in->port_guid = guid; + err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in); + kfree(in); + return err; +} + +int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, + u64 guid, int type) +{ + if (type == IFLA_VF_IB_NODE_GUID) + return set_vf_node_guid(device, vf, port, guid); + else if (type == IFLA_VF_IB_PORT_GUID) + return set_vf_port_guid(device, vf, port, guid); + + return -EINVAL; +} diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index b84d13a48..1534af113 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -31,8 +31,10 @@ */ #include <linux/mlx5/cmd.h> +#include <linux/mlx5/vport.h> #include <rdma/ib_mad.h> #include <rdma/ib_smi.h> +#include <rdma/ib_pma.h> #include "mlx5_ib.h" enum { @@ -57,20 +59,12 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey, return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port); } -int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, - const struct ib_wc *in_wc, const struct ib_grh *in_grh, - const struct ib_mad_hdr *in, size_t in_mad_size, - struct ib_mad_hdr *out, size_t *out_mad_size, - u16 *out_mad_pkey_index) +static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad *in_mad, struct ib_mad *out_mad) { u16 slid; int err; - const struct ib_mad *in_mad = (const struct ib_mad *)in; - struct ib_mad *out_mad = (struct ib_mad *)out; - - if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || - *out_mad_size != sizeof(*out_mad))) - return IB_MAD_RESULT_FAILURE; slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); @@ -117,6 +111,156 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } +static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext, + void *out) +{ +#define MLX5_SUM_CNT(p, cntr1, cntr2) \ + (MLX5_GET64(query_vport_counter_out, p, cntr1) + \ + MLX5_GET64(query_vport_counter_out, p, cntr2)) + + pma_cnt_ext->port_xmit_data = + cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets, + transmitted_ib_multicast.octets) >> 2); + pma_cnt_ext->port_xmit_data = + cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets, + received_ib_multicast.octets) >> 2); + pma_cnt_ext->port_xmit_packets = + cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets, + transmitted_ib_multicast.packets)); + pma_cnt_ext->port_rcv_packets = + cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets, + received_ib_multicast.packets)); + pma_cnt_ext->port_unicast_xmit_packets = + MLX5_GET64_BE(query_vport_counter_out, + out, transmitted_ib_unicast.packets); + pma_cnt_ext->port_unicast_rcv_packets = + MLX5_GET64_BE(query_vport_counter_out, + out, received_ib_unicast.packets); + pma_cnt_ext->port_multicast_xmit_packets = + MLX5_GET64_BE(query_vport_counter_out, + out, transmitted_ib_multicast.packets); + pma_cnt_ext->port_multicast_rcv_packets = + MLX5_GET64_BE(query_vport_counter_out, + out, received_ib_multicast.packets); +} + +static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt, + void *out) +{ + /* Traffic counters will be reported in + * their 64bit form via ib_pma_portcounters_ext by default. + */ + void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out, + counter_set); + +#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \ + counter_var = MLX5_GET_BE(typeof(counter_var), \ + ib_port_cntrs_grp_data_layout, \ + out_pma, counter_name); \ + } + + MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter, + symbol_error_counter); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter, + link_error_recovery_counter); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter, + link_downed_counter); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors, + port_rcv_errors); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors, + port_rcv_remote_physical_errors); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors, + port_rcv_switch_relay_errors); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards, + port_xmit_discards); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors, + port_xmit_constraint_errors); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors, + port_rcv_constraint_errors); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors, + link_overrun_errors); + MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped, + vl_15_dropped); +} + +static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, + const struct ib_mad *in_mad, struct ib_mad *out_mad) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + int err; + void *out_cnt; + + /* Decalring support of extended counters */ + if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { + struct ib_class_port_info cpi = {}; + + cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; + memcpy((out_mad->data + 40), &cpi, sizeof(cpi)); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + } + + if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) { + struct ib_pma_portcounters_ext *pma_cnt_ext = + (struct ib_pma_portcounters_ext *)(out_mad->data + 40); + int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); + + out_cnt = mlx5_vzalloc(sz); + if (!out_cnt) + return IB_MAD_RESULT_FAILURE; + + err = mlx5_core_query_vport_counter(dev->mdev, 0, 0, + port_num, out_cnt, sz); + if (!err) + pma_cnt_ext_assign(pma_cnt_ext, out_cnt); + } else { + struct ib_pma_portcounters *pma_cnt = + (struct ib_pma_portcounters *)(out_mad->data + 40); + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + + out_cnt = mlx5_vzalloc(sz); + if (!out_cnt) + return IB_MAD_RESULT_FAILURE; + + err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num, + out_cnt, sz); + if (!err) + pma_cnt_assign(pma_cnt, out_cnt); + } + + kvfree(out_cnt); + if (err) + return IB_MAD_RESULT_FAILURE; + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, + const struct ib_wc *in_wc, const struct ib_grh *in_grh, + const struct ib_mad_hdr *in, size_t in_mad_size, + struct ib_mad_hdr *out, size_t *out_mad_size, + u16 *out_mad_pkey_index) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + struct mlx5_core_dev *mdev = dev->mdev; + const struct ib_mad *in_mad = (const struct ib_mad *)in; + struct ib_mad *out_mad = (struct ib_mad *)out; + + if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || + *out_mad_size != sizeof(*out_mad))) + return IB_MAD_RESULT_FAILURE; + + memset(out_mad->data, 0, sizeof(out_mad->data)); + + if (MLX5_CAP_GEN(mdev, vport_counters) && + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && + in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) { + return process_pma_cmd(ibdev, port_num, in_mad, out_mad); + } else { + return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh, + in_mad, out_mad); + } +} + int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) { struct ib_smp *in_mad = NULL; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ed9cefa1f..6ad0489cb 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -42,6 +42,7 @@ #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> +#include <linux/mlx5/port.h> #include <linux/mlx5/vport.h> #include <rdma/ib_smi.h> #include <rdma/ib_umem.h> @@ -283,7 +284,7 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) { - return !dev->mdev->issi; + return !MLX5_CAP_GEN(dev->mdev, ib_virt); } enum { @@ -487,6 +488,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; if (MLX5_CAP_GEN(mdev, xrc)) props->device_cap_flags |= IB_DEVICE_XRC; + if (MLX5_CAP_GEN(mdev, imaicl)) { + props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | + IB_DEVICE_MEM_WINDOW_TYPE_2B; + props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); + /* We support 'Gappy' memory registration too */ + props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; + } props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; if (MLX5_CAP_GEN(mdev, sho)) { props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; @@ -504,6 +512,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, (MLX5_CAP_ETH(dev->mdev, csum_cap))) props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; + if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { + props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; + props->device_cap_flags |= IB_DEVICE_UD_TSO; + } + props->vendor_part_id = mdev->pdev->device; props->hw_ver = mdev->pdev->revision; @@ -529,7 +542,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; props->max_srq_sge = max_rq_sg - 1; - props->max_fast_reg_page_list_len = (unsigned int)-1; + props->max_fast_reg_page_list_len = + 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); get_atomic_caps(dev, props); props->masked_atomic_cap = IB_ATOMIC_NONE; props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); @@ -549,6 +563,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_GEN(mdev, cd)) props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; + if (!mlx5_core_is_pf(mdev)) + props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; + return 0; } @@ -654,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_hca_vport_context *rep; - int max_mtu; - int oper_mtu; + u16 max_mtu; + u16 oper_mtu; int err; u8 ib_link_width_oper; u8 vl_hw_cap; @@ -686,6 +703,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, props->qkey_viol_cntr = rep->qkey_violation_counter; props->subnet_timeout = rep->subnet_timeout; props->init_type_reply = rep->init_type_reply; + props->grh_required = rep->grh_required; err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); if (err) @@ -1369,11 +1387,20 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) return 0; } +static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) +{ + priority *= 2; + if (!dont_trap) + priority++; + return priority; +} + #define MLX5_FS_MAX_TYPES 10 #define MLX5_FS_MAX_ENTRIES 32000UL static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, struct ib_flow_attr *flow_attr) { + bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; struct mlx5_flow_namespace *ns = NULL; struct mlx5_ib_flow_prio *prio; struct mlx5_flow_table *ft; @@ -1383,10 +1410,12 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, int err = 0; if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { - if (flow_is_multicast_only(flow_attr)) + if (flow_is_multicast_only(flow_attr) && + !dont_trap) priority = MLX5_IB_FLOW_MCAST_PRIO; else - priority = flow_attr->priority; + priority = ib_prio_to_core_prio(flow_attr->priority, + dont_trap); ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS); num_entries = MLX5_FS_MAX_ENTRIES; @@ -1434,6 +1463,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, unsigned int spec_index; u32 *match_c; u32 *match_v; + u32 action; int err = 0; if (!is_valid_attr(flow_attr)) @@ -1459,9 +1489,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, /* Outer header support only */ match_criteria_enable = (!outer_header_zero(match_c)) << 0; + action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : + MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_c, match_v, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + action, MLX5_FS_DEFAULT_FLOW_TAG, dst); @@ -1481,6 +1513,29 @@ free: return err ? ERR_PTR(err) : handler; } +static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + struct ib_flow_attr *flow_attr, + struct mlx5_flow_destination *dst) +{ + struct mlx5_ib_flow_handler *handler_dst = NULL; + struct mlx5_ib_flow_handler *handler = NULL; + + handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); + if (!IS_ERR(handler)) { + handler_dst = create_flow_rule(dev, ft_prio, + flow_attr, dst); + if (IS_ERR(handler_dst)) { + mlx5_del_flow_rule(handler->rule); + kfree(handler); + handler = handler_dst; + } else { + list_add(&handler_dst->list, &handler->list); + } + } + + return handler; +} enum { LEFTOVERS_MC, LEFTOVERS_UC, @@ -1558,7 +1613,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, if (domain != IB_FLOW_DOMAIN_USER || flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || - flow_attr->flags) + (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) return ERR_PTR(-EINVAL); dst = kzalloc(sizeof(*dst), GFP_KERNEL); @@ -1577,8 +1632,13 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn; if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { - handler = create_flow_rule(dev, ft_prio, flow_attr, - dst); + if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { + handler = create_dont_trap_rule(dev, ft_prio, + flow_attr, dst); + } else { + handler = create_flow_rule(dev, ft_prio, flow_attr, + dst); + } } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { handler = create_leftovers_rule(dev, ft_prio, flow_attr, @@ -1716,6 +1776,17 @@ static struct device_attribute *mlx5_class_attributes[] = { &dev_attr_reg_pages, }; +static void pkey_change_handler(struct work_struct *work) +{ + struct mlx5_ib_port_resources *ports = + container_of(work, struct mlx5_ib_port_resources, + pkey_change_work); + + mutex_lock(&ports->devr->mutex); + mlx5_ib_gsi_pkey_change(ports->gsi); + mutex_unlock(&ports->devr->mutex); +} + static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, enum mlx5_dev_event event, unsigned long param) { @@ -1752,6 +1823,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, case MLX5_DEV_EVENT_PKEY_CHANGE: ibev.event = IB_EVENT_PKEY_CHANGE; port = (u8)param; + + schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); break; case MLX5_DEV_EVENT_GUID_CHANGE: @@ -1838,7 +1911,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "mr cache cleanup failed\n"); mlx5_ib_destroy_qp(dev->umrc.qp); - ib_destroy_cq(dev->umrc.cq); + ib_free_cq(dev->umrc.cq); ib_dealloc_pd(dev->umrc.pd); } @@ -1853,7 +1926,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev) struct ib_pd *pd; struct ib_cq *cq; struct ib_qp *qp; - struct ib_cq_init_attr cq_attr = {}; int ret; attr = kzalloc(sizeof(*attr), GFP_KERNEL); @@ -1870,15 +1942,12 @@ static int create_umr_res(struct mlx5_ib_dev *dev) goto error_0; } - cq_attr.cqe = 128; - cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, - &cq_attr); + cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); if (IS_ERR(cq)) { mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); ret = PTR_ERR(cq); goto error_2; } - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); init_attr->send_cq = cq; init_attr->recv_cq = cq; @@ -1945,7 +2014,7 @@ error_4: mlx5_ib_destroy_qp(qp); error_3: - ib_destroy_cq(cq); + ib_free_cq(cq); error_2: ib_dealloc_pd(pd); @@ -1961,10 +2030,13 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) struct ib_srq_init_attr attr; struct mlx5_ib_dev *dev; struct ib_cq_init_attr cq_attr = {.cqe = 1}; + int port; int ret = 0; dev = container_of(devr, struct mlx5_ib_dev, devr); + mutex_init(&devr->mutex); + devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); if (IS_ERR(devr->p0)) { ret = PTR_ERR(devr->p0); @@ -2052,6 +2124,12 @@ static int create_dev_resources(struct mlx5_ib_resources *devr) atomic_inc(&devr->p0->usecnt); atomic_set(&devr->s0->usecnt, 0); + for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { + INIT_WORK(&devr->ports[port].pkey_change_work, + pkey_change_handler); + devr->ports[port].devr = devr; + } + return 0; error5: @@ -2070,12 +2148,20 @@ error0: static void destroy_dev_resources(struct mlx5_ib_resources *devr) { + struct mlx5_ib_dev *dev = + container_of(devr, struct mlx5_ib_dev, devr); + int port; + mlx5_ib_destroy_srq(devr->s1); mlx5_ib_destroy_srq(devr->s0); mlx5_ib_dealloc_xrcd(devr->x0); mlx5_ib_dealloc_xrcd(devr->x1); mlx5_ib_destroy_cq(devr->c0); mlx5_ib_dealloc_pd(devr->p0); + + /* Make sure no change P_Key work items are still executing */ + for (port = 0; port < dev->num_ports; ++port) + cancel_work_sync(&devr->ports[port].pkey_change_work); } static u32 get_core_cap_flags(struct ib_device *ibdev) @@ -2198,6 +2284,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_REREG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | @@ -2258,6 +2345,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; + dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; @@ -2266,9 +2354,23 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; + if (mlx5_core_is_pf(mdev)) { + dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; + dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; + dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; + dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; + } mlx5_ib_internal_fill_odp_caps(dev); + if (MLX5_CAP_GEN(mdev, imaicl)) { + dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; + dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; + dev->ib_dev.uverbs_cmd_mask |= + (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); + } + if (MLX5_CAP_GEN(mdev, xrc)) { dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index d2b9737ba..b46c25542 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -43,6 +43,7 @@ #include <linux/mlx5/srq.h> #include <linux/types.h> #include <linux/mlx5/transobj.h> +#include <rdma/ib_user_verbs.h> #define mlx5_ib_dbg(dev, format, arg...) \ pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \ @@ -126,7 +127,7 @@ struct mlx5_ib_pd { }; #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1) -#define MLX5_IB_FLOW_LAST_PRIO (MLX5_IB_FLOW_MCAST_PRIO - 1) +#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1) #if (MLX5_IB_FLOW_LAST_PRIO <= 0) #error "Invalid number of bypass priorities" #endif @@ -162,9 +163,31 @@ struct mlx5_ib_flow_db { #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1) #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2) + +#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 3) +#define MLX5_IB_SEND_UMR_UPDATE_PD (IB_SEND_RESERVED_START << 4) +#define MLX5_IB_SEND_UMR_UPDATE_ACCESS IB_SEND_RESERVED_END + #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1 +/* + * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI + * creates the actual hardware QP. + */ +#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2 #define MLX5_IB_WR_UMR IB_WR_RESERVED1 +/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags. + * + * These flags are intended for internal use by the mlx5_ib driver, and they + * rely on the range reserved for that use in the ib_qp_create_flags enum. + */ + +/* Create a UD QP whose source QP number is 1 */ +static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void) +{ + return IB_QP_CREATE_RESERVED_START; +} + struct wr_list { u16 opcode; u16 next; @@ -325,11 +348,14 @@ struct mlx5_ib_cq_buf { }; enum mlx5_ib_qp_flags { - MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = 1 << 0, - MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 1, - MLX5_IB_QP_CROSS_CHANNEL = 1 << 2, - MLX5_IB_QP_MANAGED_SEND = 1 << 3, - MLX5_IB_QP_MANAGED_RECV = 1 << 4, + MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, + MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, + MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL, + MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND, + MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV, + MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5, + /* QP uses 1 as its source QP number */ + MLX5_IB_QP_SQPN_QP1 = 1 << 6, }; struct mlx5_umr_wr { @@ -373,6 +399,14 @@ struct mlx5_ib_cq { struct ib_umem *resize_umem; int cqe_size; u32 create_flags; + struct list_head wc_list; + enum ib_cq_notify_flags notify_flags; + struct work_struct notify_work; +}; + +struct mlx5_ib_wc { + struct ib_wc wc; + struct list_head list; }; struct mlx5_ib_srq { @@ -413,7 +447,8 @@ struct mlx5_ib_mr { int ndescs; int max_descs; int desc_size; - struct mlx5_core_mr mmr; + int access_mode; + struct mlx5_core_mkey mmkey; struct ib_umem *umem; struct mlx5_shared_mr_info *smr_info; struct list_head list; @@ -425,19 +460,20 @@ struct mlx5_ib_mr { struct mlx5_core_sig_ctx *sig; int live; void *descs_alloc; + int access_flags; /* Needed for rereg MR */ +}; + +struct mlx5_ib_mw { + struct ib_mw ibmw; + struct mlx5_core_mkey mmkey; }; struct mlx5_ib_umr_context { + struct ib_cqe cqe; enum ib_wc_status status; struct completion done; }; -static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) -{ - context->status = -1; - init_completion(&context->done); -} - struct umr_common { struct ib_pd *pd; struct ib_cq *cq; @@ -487,6 +523,14 @@ struct mlx5_mr_cache { unsigned long last_add; }; +struct mlx5_ib_gsi_qp; + +struct mlx5_ib_port_resources { + struct mlx5_ib_resources *devr; + struct mlx5_ib_gsi_qp *gsi; + struct work_struct pkey_change_work; +}; + struct mlx5_ib_resources { struct ib_cq *c0; struct ib_xrcd *x0; @@ -494,6 +538,9 @@ struct mlx5_ib_resources { struct ib_pd *p0; struct ib_srq *s0; struct ib_srq *s1; + struct mlx5_ib_port_resources ports[2]; + /* Protects changes to the port resources */ + struct mutex mutex; }; struct mlx5_roce { @@ -558,9 +605,9 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; } -static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr) +static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) { - return container_of(mmr, struct mlx5_ib_mr, mmr); + return container_of(mmkey, struct mlx5_ib_mr, mmkey); } static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd) @@ -588,6 +635,11 @@ static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr) return container_of(ibmr, struct mlx5_ib_mr, ibmr); } +static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct mlx5_ib_mw, ibmw); +} + struct mlx5_ib_ah { struct ib_ah ibah; struct mlx5_av av; @@ -648,8 +700,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc); struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata); +struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata); +int mlx5_ib_dealloc_mw(struct ib_mw *mw); int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, int zap); +int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 length, u64 virt_addr, int access_flags, + struct ib_pd *pd, struct ib_udata *udata); int mlx5_ib_dereg_mr(struct ib_mr *ibmr); struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, @@ -700,7 +758,6 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq); int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); -void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context); int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, struct ib_mr_status *mr_status); @@ -719,7 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); - #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { @@ -736,9 +792,35 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ +int mlx5_ib_get_vf_config(struct ib_device *device, int vf, + u8 port, struct ifla_vf_info *info); +int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, + u8 port, int state); +int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, + u8 port, struct ifla_vf_stats *stats); +int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, + u64 guid, int type); + __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, int index); +/* GSI QP helper functions */ +struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr); +int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp); +int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr, + int attr_mask); +int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); +int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr, + struct ib_send_wr **bad_wr); +int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr, + struct ib_recv_wr **bad_wr); +void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi); + +int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc); + static inline void init_query_mad(struct ib_smp *mad) { mad->base_version = 1; @@ -758,7 +840,7 @@ static inline u8 convert_access(int acc) static inline int is_qp1(enum ib_qp_type qp_type) { - return qp_type == IB_QPT_GSI; + return qp_type == MLX5_IB_QPT_HW_GSI; } #define MLX5_MAX_UMR_SHIFT 16 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 6000f7aee..4d5bff151 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -40,6 +40,7 @@ #include <rdma/ib_umem_odp.h> #include <rdma/ib_verbs.h> #include "mlx5_ib.h" +#include "user.h" enum { MAX_PENDING_REG_MR = 8, @@ -57,7 +58,7 @@ static int clean_mr(struct mlx5_ib_mr *mr); static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { - int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr); + int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING /* Wait until all page fault handlers using the mr complete. */ @@ -77,6 +78,40 @@ static int order2idx(struct mlx5_ib_dev *dev, int order) return order - cache->ent[0].order; } +static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length) +{ + return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >= + length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1)); +} + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING +static void update_odp_mr(struct mlx5_ib_mr *mr) +{ + if (mr->umem->odp_data) { + /* + * This barrier prevents the compiler from moving the + * setting of umem->odp_data->private to point to our + * MR, before reg_umr finished, to ensure that the MR + * initialization have finished before starting to + * handle invalidations. + */ + smp_wmb(); + mr->umem->odp_data->private = mr; + /* + * Make sure we will see the new + * umem->odp_data->private value in the invalidation + * routines, before we can get page faults on the + * MR. Page faults can happen once we put the MR in + * the tree, below this line. Without the barrier, + * there can be a fault handling and an invalidation + * before umem->odp_data->private == mr is visible to + * the invalidation handler. + */ + smp_wmb(); + } +} +#endif + static void reg_mr_callback(int status, void *context) { struct mlx5_ib_mr *mr = context; @@ -86,7 +121,7 @@ static void reg_mr_callback(int status, void *context) struct mlx5_cache_ent *ent = &cache->ent[c]; u8 key; unsigned long flags; - struct mlx5_mr_table *table = &dev->mdev->priv.mr_table; + struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table; int err; spin_lock_irqsave(&ent->lock, flags); @@ -113,7 +148,7 @@ static void reg_mr_callback(int status, void *context) spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags); key = dev->mdev->priv.mkey_key++; spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags); - mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; + mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key; cache->last_add = jiffies; @@ -124,10 +159,10 @@ static void reg_mr_callback(int status, void *context) spin_unlock_irqrestore(&ent->lock, flags); write_lock_irqsave(&table->lock, flags); - err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key), - &mr->mmr); + err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key), + &mr->mmkey); if (err) - pr_err("Error inserting to mr tree. 0x%x\n", -err); + pr_err("Error inserting to mkey tree. 0x%x\n", -err); write_unlock_irqrestore(&table->lock, flags); } @@ -168,7 +203,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) spin_lock_irq(&ent->lock); ent->pending++; spin_unlock_irq(&ent->lock); - err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, + err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in), reg_mr_callback, mr, &mr->out); if (err) { @@ -657,14 +692,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc) seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); seg->start_addr = 0; - err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL, + err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL, NULL); if (err) goto err_in; kfree(in); - mr->ibmr.lkey = mr->mmr.key; - mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.lkey = mr->mmkey.key; + mr->ibmr.rkey = mr->mmkey.key; mr->umem = NULL; return &mr->ibmr; @@ -693,10 +728,40 @@ static int use_umr(int order) return order <= MLX5_MAX_UMR_SHIFT; } -static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, - struct ib_sge *sg, u64 dma, int n, u32 key, - int page_shift, u64 virt_addr, u64 len, - int access_flags) +static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, + int npages, int page_shift, int *size, + __be64 **mr_pas, dma_addr_t *dma) +{ + __be64 *pas; + struct device *ddev = dev->ib_dev.dma_device; + + /* + * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. + * To avoid copying garbage after the pas array, we allocate + * a little more. + */ + *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); + *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); + if (!(*mr_pas)) + return -ENOMEM; + + pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN); + mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); + /* Clear padding after the actual pages. */ + memset(pas + npages, 0, *size - npages * sizeof(u64)); + + *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE); + if (dma_mapping_error(ddev, *dma)) { + kfree(*mr_pas); + return -ENOMEM; + } + + return 0; +} + +static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr, + struct ib_sge *sg, u64 dma, int n, u32 key, + int page_shift) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_umr_wr *umrwr = umr_wr(wr); @@ -706,7 +771,6 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, sg->lkey = dev->umrc.pd->local_dma_lkey; wr->next = NULL; - wr->send_flags = 0; wr->sg_list = sg; if (n) wr->num_sge = 1; @@ -718,6 +782,19 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, umrwr->npages = n; umrwr->page_shift = page_shift; umrwr->mkey = key; +} + +static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, + struct ib_sge *sg, u64 dma, int n, u32 key, + int page_shift, u64 virt_addr, u64 len, + int access_flags) +{ + struct mlx5_umr_wr *umrwr = umr_wr(wr); + + prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift); + + wr->send_flags = 0; + umrwr->target.virt_addr = virt_addr; umrwr->length = len; umrwr->access_flags = access_flags; @@ -734,26 +811,45 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, umrwr->mkey = key; } -void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) +static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length, + int access_flags, int *npages, + int *page_shift, int *ncont, int *order) { - struct mlx5_ib_umr_context *context; - struct ib_wc wc; - int err; - - while (1) { - err = ib_poll_cq(cq, 1, &wc); - if (err < 0) { - pr_warn("poll cq error %d\n", err); - return; - } - if (err == 0) - break; + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length, + access_flags, 0); + if (IS_ERR(umem)) { + mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); + return (void *)umem; + } - context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id; - context->status = wc.status; - complete(&context->done); + mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order); + if (!*npages) { + mlx5_ib_warn(dev, "avoid zero region\n"); + ib_umem_release(umem); + return ERR_PTR(-EINVAL); } - ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + + mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", + *npages, *ncont, *order, *page_shift); + + return umem; +} + +static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc) +{ + struct mlx5_ib_umr_context *context = + container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); + + context->status = wc->status; + complete(&context->done); +} + +static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) +{ + context->cqe.done = mlx5_ib_umr_done; + context->status = -1; + init_completion(&context->done); } static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, @@ -764,13 +860,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, struct device *ddev = dev->ib_dev.dma_device; struct umr_common *umrc = &dev->umrc; struct mlx5_ib_umr_context umr_context; - struct mlx5_umr_wr umrwr; + struct mlx5_umr_wr umrwr = {}; struct ib_send_wr *bad; struct mlx5_ib_mr *mr; struct ib_sge sg; int size; __be64 *mr_pas; - __be64 *pas; dma_addr_t dma; int err = 0; int i; @@ -790,33 +885,17 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, if (!mr) return ERR_PTR(-EAGAIN); - /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. - * To avoid copying garbage after the pas array, we allocate - * a little more. */ - size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT); - mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); - if (!mr_pas) { - err = -ENOMEM; + err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas, + &dma); + if (err) goto free_mr; - } - pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN); - mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); - /* Clear padding after the actual pages. */ - memset(pas + npages, 0, size - npages * sizeof(u64)); - - dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE); - if (dma_mapping_error(ddev, dma)) { - err = -ENOMEM; - goto free_pas; - } + mlx5_ib_init_umr_context(&umr_context); - memset(&umrwr, 0, sizeof(umrwr)); - umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; - prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key, + umrwr.wr.wr_cqe = &umr_context.cqe; + prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, page_shift, virt_addr, len, access_flags); - mlx5_ib_init_umr_context(&umr_context); down(&umrc->sem); err = ib_post_send(umrc->qp, &umrwr.wr, &bad); if (err) { @@ -830,9 +909,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, } } - mr->mmr.iova = virt_addr; - mr->mmr.size = len; - mr->mmr.pd = to_mpd(pd)->pdn; + mr->mmkey.iova = virt_addr; + mr->mmkey.size = len; + mr->mmkey.pd = to_mpd(pd)->pdn; mr->live = 1; @@ -840,7 +919,6 @@ unmap_dma: up(&umrc->sem); dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); -free_pas: kfree(mr_pas); free_mr: @@ -929,8 +1007,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); + mlx5_ib_init_umr_context(&umr_context); + memset(&wr, 0, sizeof(wr)); - wr.wr.wr_id = (u64)(unsigned long)&umr_context; + wr.wr.wr_cqe = &umr_context.cqe; sg.addr = dma; sg.length = ALIGN(npages * sizeof(u64), @@ -944,10 +1024,9 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, wr.wr.opcode = MLX5_IB_WR_UMR; wr.npages = sg.length / sizeof(u64); wr.page_shift = PAGE_SHIFT; - wr.mkey = mr->mmr.key; + wr.mkey = mr->mmkey.key; wr.target.offset = start_page_index; - mlx5_ib_init_umr_context(&umr_context); down(&umrc->sem); err = ib_post_send(umrc->qp, &wr.wr, &bad); if (err) { @@ -974,10 +1053,14 @@ free_pas: } #endif -static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, - u64 length, struct ib_umem *umem, - int npages, int page_shift, - int access_flags) +/* + * If ibmr is NULL it will be allocated by reg_create. + * Else, the given ibmr will be used. + */ +static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, + u64 virt_addr, u64 length, + struct ib_umem *umem, int npages, + int page_shift, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_create_mkey_mbox_in *in; @@ -986,7 +1069,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, int err; bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg)); - mr = kzalloc(sizeof(*mr), GFP_KERNEL); + mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -1013,7 +1096,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift)); - err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL, + err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL, NULL, NULL); if (err) { mlx5_ib_warn(dev, "create mkey failed\n"); @@ -1024,7 +1107,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, mr->live = 1; kvfree(in); - mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); + mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); return mr; @@ -1032,11 +1115,23 @@ err_2: kvfree(in); err_1: - kfree(mr); + if (!ibmr) + kfree(mr); return ERR_PTR(err); } +static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, + int npages, u64 length, int access_flags) +{ + mr->npages = npages; + atomic_add(npages, &dev->mdev->priv.reg_pages); + mr->ibmr.lkey = mr->mmkey.key; + mr->ibmr.rkey = mr->mmkey.key; + mr->ibmr.length = length; + mr->access_flags = access_flags; +} + struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) @@ -1052,22 +1147,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", start, virt_addr, length, access_flags); - umem = ib_umem_get(pd->uobject->context, start, length, access_flags, - 0); - if (IS_ERR(umem)) { - mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); - return (void *)umem; - } + umem = mr_umem_get(pd, start, length, access_flags, &npages, + &page_shift, &ncont, &order); - mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order); - if (!npages) { - mlx5_ib_warn(dev, "avoid zero region\n"); - err = -EINVAL; - goto error; - } - - mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", - npages, ncont, order, page_shift); + if (IS_ERR(umem)) + return (void *)umem; if (use_umr(order)) { mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, @@ -1083,45 +1167,21 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, } if (!mr) - mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift, - access_flags); + mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, + page_shift, access_flags); if (IS_ERR(mr)) { err = PTR_ERR(mr); goto error; } - mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key); + mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key); mr->umem = umem; - mr->npages = npages; - atomic_add(npages, &dev->mdev->priv.reg_pages); - mr->ibmr.lkey = mr->mmr.key; - mr->ibmr.rkey = mr->mmr.key; + set_mr_fileds(dev, mr, npages, length, access_flags); #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (umem->odp_data) { - /* - * This barrier prevents the compiler from moving the - * setting of umem->odp_data->private to point to our - * MR, before reg_umr finished, to ensure that the MR - * initialization have finished before starting to - * handle invalidations. - */ - smp_wmb(); - mr->umem->odp_data->private = mr; - /* - * Make sure we will see the new - * umem->odp_data->private value in the invalidation - * routines, before we can get page faults on the - * MR. Page faults can happen once we put the MR in - * the tree, below this line. Without the barrier, - * there can be a fault handling and an invalidation - * before umem->odp_data->private == mr is visible to - * the invalidation handler. - */ - smp_wmb(); - } + update_odp_mr(mr); #endif return &mr->ibmr; @@ -1135,15 +1195,15 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct umr_common *umrc = &dev->umrc; struct mlx5_ib_umr_context umr_context; - struct mlx5_umr_wr umrwr; + struct mlx5_umr_wr umrwr = {}; struct ib_send_wr *bad; int err; - memset(&umrwr.wr, 0, sizeof(umrwr)); - umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; - prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key); - mlx5_ib_init_umr_context(&umr_context); + + umrwr.wr.wr_cqe = &umr_context.cqe; + prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key); + down(&umrc->sem); err = ib_post_send(umrc->qp, &umrwr.wr, &bad); if (err) { @@ -1165,6 +1225,167 @@ error: return err; } +static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr, + u64 length, int npages, int page_shift, int order, + int access_flags, int flags) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct device *ddev = dev->ib_dev.dma_device; + struct mlx5_ib_umr_context umr_context; + struct ib_send_wr *bad; + struct mlx5_umr_wr umrwr = {}; + struct ib_sge sg; + struct umr_common *umrc = &dev->umrc; + dma_addr_t dma = 0; + __be64 *mr_pas = NULL; + int size; + int err; + + mlx5_ib_init_umr_context(&umr_context); + + umrwr.wr.wr_cqe = &umr_context.cqe; + umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; + + if (flags & IB_MR_REREG_TRANS) { + err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size, + &mr_pas, &dma); + if (err) + return err; + + umrwr.target.virt_addr = virt_addr; + umrwr.length = length; + umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION; + } + + prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, + page_shift); + + if (flags & IB_MR_REREG_PD) { + umrwr.pd = pd; + umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD; + } + + if (flags & IB_MR_REREG_ACCESS) { + umrwr.access_flags = access_flags; + umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS; + } + + /* post send request to UMR QP */ + down(&umrc->sem); + err = ib_post_send(umrc->qp, &umrwr.wr, &bad); + + if (err) { + mlx5_ib_warn(dev, "post send failed, err %d\n", err); + } else { + wait_for_completion(&umr_context.done); + if (umr_context.status != IB_WC_SUCCESS) { + mlx5_ib_warn(dev, "reg umr failed (%u)\n", + umr_context.status); + err = -EFAULT; + } + } + + up(&umrc->sem); + if (flags & IB_MR_REREG_TRANS) { + dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE); + kfree(mr_pas); + } + return err; +} + +int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, + u64 length, u64 virt_addr, int new_access_flags, + struct ib_pd *new_pd, struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(ib_mr->device); + struct mlx5_ib_mr *mr = to_mmr(ib_mr); + struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd; + int access_flags = flags & IB_MR_REREG_ACCESS ? + new_access_flags : + mr->access_flags; + u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address; + u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length; + int page_shift = 0; + int npages = 0; + int ncont = 0; + int order = 0; + int err; + + mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", + start, virt_addr, length, access_flags); + + if (flags != IB_MR_REREG_PD) { + /* + * Replace umem. This needs to be done whether or not UMR is + * used. + */ + flags |= IB_MR_REREG_TRANS; + ib_umem_release(mr->umem); + mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages, + &page_shift, &ncont, &order); + if (IS_ERR(mr->umem)) { + err = PTR_ERR(mr->umem); + mr->umem = NULL; + return err; + } + } + + if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) { + /* + * UMR can't be used - MKey needs to be replaced. + */ + if (mr->umred) { + err = unreg_umr(dev, mr); + if (err) + mlx5_ib_warn(dev, "Failed to unregister MR\n"); + } else { + err = destroy_mkey(dev, mr); + if (err) + mlx5_ib_warn(dev, "Failed to destroy MKey\n"); + } + if (err) + return err; + + mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, + page_shift, access_flags); + + if (IS_ERR(mr)) + return PTR_ERR(mr); + + mr->umred = 0; + } else { + /* + * Send a UMR WQE + */ + err = rereg_umr(pd, mr, addr, len, npages, page_shift, + order, access_flags, flags); + if (err) { + mlx5_ib_warn(dev, "Failed to rereg UMR\n"); + return err; + } + } + + if (flags & IB_MR_REREG_PD) { + ib_mr->pd = pd; + mr->mmkey.pd = to_mpd(pd)->pdn; + } + + if (flags & IB_MR_REREG_ACCESS) + mr->access_flags = access_flags; + + if (flags & IB_MR_REREG_TRANS) { + atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); + set_mr_fileds(dev, mr, npages, len, access_flags); + mr->mmkey.iova = addr; + mr->mmkey.size = len; + } +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + update_odp_mr(mr); +#endif + + return 0; +} + static int mlx5_alloc_priv_descs(struct ib_device *device, struct mlx5_ib_mr *mr, @@ -1236,7 +1457,7 @@ static int clean_mr(struct mlx5_ib_mr *mr) err = destroy_mkey(dev, mr); if (err) { mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", - mr->mmr.key, err); + mr->mmkey.key, err); return err; } } else { @@ -1300,8 +1521,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_create_mkey_mbox_in *in; struct mlx5_ib_mr *mr; - int access_mode, err; - int ndescs = roundup(max_num_sg, 4); + int ndescs = ALIGN(max_num_sg, 4); + int err; mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) @@ -1319,7 +1540,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); if (mr_type == IB_MR_TYPE_MEM_REG) { - access_mode = MLX5_ACCESS_MODE_MTT; + mr->access_mode = MLX5_ACCESS_MODE_MTT; in->seg.log2_page_size = PAGE_SHIFT; err = mlx5_alloc_priv_descs(pd->device, mr, @@ -1329,6 +1550,15 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, mr->desc_size = sizeof(u64); mr->max_descs = ndescs; + } else if (mr_type == IB_MR_TYPE_SG_GAPS) { + mr->access_mode = MLX5_ACCESS_MODE_KLM; + + err = mlx5_alloc_priv_descs(pd->device, mr, + ndescs, sizeof(struct mlx5_klm)); + if (err) + goto err_free_in; + mr->desc_size = sizeof(struct mlx5_klm); + mr->max_descs = ndescs; } else if (mr_type == IB_MR_TYPE_SIGNATURE) { u32 psv_index[2]; @@ -1347,7 +1577,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, if (err) goto err_free_sig; - access_mode = MLX5_ACCESS_MODE_KLM; + mr->access_mode = MLX5_ACCESS_MODE_KLM; mr->sig->psv_memory.psv_idx = psv_index[0]; mr->sig->psv_wire.psv_idx = psv_index[1]; @@ -1361,14 +1591,14 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, goto err_free_in; } - in->seg.flags = MLX5_PERM_UMR_EN | access_mode; - err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), + in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode; + err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL, NULL); if (err) goto err_destroy_psv; - mr->ibmr.lkey = mr->mmr.key; - mr->ibmr.rkey = mr->mmr.key; + mr->ibmr.lkey = mr->mmkey.key; + mr->ibmr.rkey = mr->mmkey.key; mr->umem = NULL; kfree(in); @@ -1395,6 +1625,88 @@ err_free: return ERR_PTR(err); } +struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, + struct ib_udata *udata) +{ + struct mlx5_ib_dev *dev = to_mdev(pd->device); + struct mlx5_create_mkey_mbox_in *in = NULL; + struct mlx5_ib_mw *mw = NULL; + int ndescs; + int err; + struct mlx5_ib_alloc_mw req = {}; + struct { + __u32 comp_mask; + __u32 response_length; + } resp = {}; + + err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); + if (err) + return ERR_PTR(err); + + if (req.comp_mask || req.reserved1 || req.reserved2) + return ERR_PTR(-EOPNOTSUPP); + + if (udata->inlen > sizeof(req) && + !ib_is_udata_cleared(udata, sizeof(req), + udata->inlen - sizeof(req))) + return ERR_PTR(-EOPNOTSUPP); + + ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4); + + mw = kzalloc(sizeof(*mw), GFP_KERNEL); + in = kzalloc(sizeof(*in), GFP_KERNEL); + if (!mw || !in) { + err = -ENOMEM; + goto free; + } + + in->seg.status = MLX5_MKEY_STATUS_FREE; + in->seg.xlt_oct_size = cpu_to_be32(ndescs); + in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn); + in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM | + MLX5_PERM_LOCAL_READ; + if (type == IB_MW_TYPE_2) + in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); + in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + + err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in), + NULL, NULL, NULL); + if (err) + goto free; + + mw->ibmw.rkey = mw->mmkey.key; + + resp.response_length = min(offsetof(typeof(resp), response_length) + + sizeof(resp.response_length), udata->outlen); + if (resp.response_length) { + err = ib_copy_to_udata(udata, &resp, resp.response_length); + if (err) { + mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); + goto free; + } + } + + kfree(in); + return &mw->ibmw; + +free: + kfree(mw); + kfree(in); + return ERR_PTR(err); +} + +int mlx5_ib_dealloc_mw(struct ib_mw *mw) +{ + struct mlx5_ib_mw *mmw = to_mmw(mw); + int err; + + err = mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev, + &mmw->mmkey); + if (!err) + kfree(mmw); + return err; +} + int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, struct ib_mr_status *mr_status) { @@ -1436,6 +1748,32 @@ done: return ret; } +static int +mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, + struct scatterlist *sgl, + unsigned short sg_nents) +{ + struct scatterlist *sg = sgl; + struct mlx5_klm *klms = mr->descs; + u32 lkey = mr->ibmr.pd->local_dma_lkey; + int i; + + mr->ibmr.iova = sg_dma_address(sg); + mr->ibmr.length = 0; + mr->ndescs = sg_nents; + + for_each_sg(sgl, sg, sg_nents, i) { + if (unlikely(i > mr->max_descs)) + break; + klms[i].va = cpu_to_be64(sg_dma_address(sg)); + klms[i].bcount = cpu_to_be32(sg_dma_len(sg)); + klms[i].key = cpu_to_be32(lkey); + mr->ibmr.length += sg_dma_len(sg); + } + + return i; +} + static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) { struct mlx5_ib_mr *mr = to_mmr(ibmr); @@ -1463,7 +1801,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, mr->desc_size * mr->max_descs, DMA_TO_DEVICE); - n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page); + if (mr->access_mode == MLX5_ACCESS_MODE_KLM) + n = mlx5_ib_sg_to_klms(mr, sg, sg_nents); + else + n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page); ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, mr->desc_size * mr->max_descs, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index b8d76361a..34e79e709 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -142,13 +142,13 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, u32 key) { u32 base_key = mlx5_base_mkey(key); - struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key); - struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr); + struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key); + struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); - if (!mmr || mmr->key != key || !mr->live) + if (!mmkey || mmkey->key != key || !mr->live) return NULL; - return container_of(mmr, struct mlx5_ib_mr, mmr); + return container_of(mmkey, struct mlx5_ib_mr, mmkey); } static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, @@ -232,7 +232,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp, io_virt += pfault->mpfault.bytes_committed; bcnt -= pfault->mpfault.bytes_committed; - start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT; + start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT; if (mr->umem->writable) access_mask |= ODP_WRITE_ALLOWED_BIT; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 34cb8e87c..8dee8bc1e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -58,6 +58,7 @@ enum { static const u32 mlx5_ib_opcode[] = { [IB_WR_SEND] = MLX5_OPCODE_SEND, + [IB_WR_LSO] = MLX5_OPCODE_LSO, [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM, [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM, @@ -72,6 +73,9 @@ static const u32 mlx5_ib_opcode[] = { [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR, }; +struct mlx5_wqe_eth_pad { + u8 rsvd0[16]; +}; static int is_qp0(enum ib_qp_type qp_type) { @@ -260,11 +264,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, return 0; } -static int sq_overhead(enum ib_qp_type qp_type) +static int sq_overhead(struct ib_qp_init_attr *attr) { int size = 0; - switch (qp_type) { + switch (attr->qp_type) { case IB_QPT_XRC_INI: size += sizeof(struct mlx5_wqe_xrc_seg); /* fall through */ @@ -287,8 +291,12 @@ static int sq_overhead(enum ib_qp_type qp_type) break; case IB_QPT_UD: + if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO) + size += sizeof(struct mlx5_wqe_eth_pad) + + sizeof(struct mlx5_wqe_eth_seg); + /* fall through */ case IB_QPT_SMI: - case IB_QPT_GSI: + case MLX5_IB_QPT_HW_GSI: size += sizeof(struct mlx5_wqe_ctrl_seg) + sizeof(struct mlx5_wqe_datagram_seg); break; @@ -311,7 +319,7 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) int inl_size = 0; int size; - size = sq_overhead(attr->qp_type); + size = sq_overhead(attr); if (size < 0) return size; @@ -348,8 +356,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, return -EINVAL; } - qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) - - sizeof(struct mlx5_wqe_inline_seg); + qp->max_inline_data = wqe_size - sq_overhead(attr) - + sizeof(struct mlx5_wqe_inline_seg); attr->cap.max_inline_data = qp->max_inline_data; if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN) @@ -590,7 +598,7 @@ static int to_mlx5_st(enum ib_qp_type type) case IB_QPT_XRC_INI: case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC; case IB_QPT_SMI: return MLX5_QP_ST_QP0; - case IB_QPT_GSI: return MLX5_QP_ST_QP1; + case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1; case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6; case IB_QPT_RAW_PACKET: case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE; @@ -783,7 +791,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, int err; uuari = &dev->mdev->priv.uuari; - if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)) + if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | + IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK | + IB_QP_CREATE_IPOIB_UD_LSO | + mlx5_ib_create_qp_sqpn_qp1())) return -EINVAL; if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) @@ -828,6 +839,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, (*in)->ctx.params1 |= cpu_to_be32(1 << 11); (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4); + if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) { + (*in)->ctx.deth_sqpn = cpu_to_be32(1); + qp->flags |= MLX5_IB_QP_SQPN_QP1; + } + mlx5_fill_page_array(&qp->buf, (*in)->pas); err = mlx5_db_alloc(dev->mdev, &qp->db); @@ -1228,6 +1244,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV) qp->flags |= MLX5_IB_QP_MANAGED_RECV; } + + if (init_attr->qp_type == IB_QPT_UD && + (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) + if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { + mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n"); + return -EOPNOTSUPP; + } + if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE; @@ -1271,6 +1295,11 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ucmd.sq_wqe_count, max_wqes); return -EINVAL; } + if (init_attr->create_flags & + mlx5_ib_create_qp_sqpn_qp1()) { + mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n"); + return -EINVAL; + } err = create_user_qp(dev, pd, qp, udata, init_attr, &in, &resp, &inlen, base); if (err) @@ -1385,6 +1414,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, /* 0xffffff means we ask to work with cqe version 0 */ MLX5_SET(qpc, qpc, user_index, uidx); } + /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */ + if (init_attr->qp_type == IB_QPT_UD && + (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) { + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1); + qp->flags |= MLX5_IB_QP_LSO; + } if (init_attr->qp_type == IB_QPT_RAW_PACKET) { qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; @@ -1494,7 +1530,7 @@ static void get_cqs(struct mlx5_ib_qp *qp, break; case IB_QPT_SMI: - case IB_QPT_GSI: + case MLX5_IB_QPT_HW_GSI: case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_UD: @@ -1657,7 +1693,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, case IB_QPT_UC: case IB_QPT_UD: case IB_QPT_SMI: - case IB_QPT_GSI: + case MLX5_IB_QPT_HW_GSI: case MLX5_IB_QPT_REG_UMR: qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) @@ -1686,6 +1722,9 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, break; + case IB_QPT_GSI: + return mlx5_ib_gsi_create_qp(pd, init_attr); + case IB_QPT_RAW_IPV6: case IB_QPT_RAW_ETHERTYPE: case IB_QPT_MAX: @@ -1704,6 +1743,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp) struct mlx5_ib_dev *dev = to_mdev(qp->device); struct mlx5_ib_qp *mqp = to_mqp(qp); + if (unlikely(qp->qp_type == IB_QPT_GSI)) + return mlx5_ib_gsi_destroy_qp(qp); + destroy_qp_common(dev, mqp); kfree(mqp); @@ -2161,8 +2203,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, context = &in->ctx; err = to_mlx5_st(ibqp->qp_type); - if (err < 0) + if (err < 0) { + mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type); goto out; + } context->flags = cpu_to_be32(err << 16); @@ -2182,7 +2226,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, } } - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) { + if (is_sqp(ibqp->qp_type)) { context->mtu_msgmax = (IB_MTU_256 << 5) | 8; } else if (ibqp->qp_type == IB_QPT_UD || ibqp->qp_type == MLX5_IB_QPT_REG_UMR) { @@ -2284,6 +2328,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) context->sq_crq_size |= cpu_to_be16(1 << 4); + if (qp->flags & MLX5_IB_QP_SQPN_QP1) + context->deth_sqpn = cpu_to_be32(1); mlx5_cur = to_mlx5_state(cur_state); mlx5_new = to_mlx5_state(new_state); @@ -2363,11 +2409,18 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, { struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_qp *qp = to_mqp(ibqp); + enum ib_qp_type qp_type; enum ib_qp_state cur_state, new_state; int err = -EINVAL; int port; enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED; + if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask); + + qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ? + IB_QPT_GSI : ibqp->qp_type; + mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; @@ -2378,32 +2431,46 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port); } - if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && - !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, - ll)) + if (qp_type != MLX5_IB_QPT_REG_UMR && + !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) { + mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n", + cur_state, new_state, ibqp->qp_type, attr_mask); goto out; + } if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || - attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) + attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) { + mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n", + attr->port_num, dev->num_ports); goto out; + } if (attr_mask & IB_QP_PKEY_INDEX) { port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; if (attr->pkey_index >= - dev->mdev->port_caps[port - 1].pkey_table_len) + dev->mdev->port_caps[port - 1].pkey_table_len) { + mlx5_ib_dbg(dev, "invalid pkey index %d\n", + attr->pkey_index); goto out; + } } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) + (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) { + mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n", + attr->max_rd_atomic); goto out; + } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic > - (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) + (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) { + mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n", + attr->max_dest_rd_atomic); goto out; + } if (cur_state == new_state && cur_state == IB_QPS_RESET) { err = 0; @@ -2442,6 +2509,59 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, rseg->reserved = 0; } +static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, + struct ib_send_wr *wr, void *qend, + struct mlx5_ib_qp *qp, int *size) +{ + void *seg = eseg; + + memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg)); + + if (wr->send_flags & IB_SEND_IP_CSUM) + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | + MLX5_ETH_WQE_L4_CSUM; + + seg += sizeof(struct mlx5_wqe_eth_seg); + *size += sizeof(struct mlx5_wqe_eth_seg) / 16; + + if (wr->opcode == IB_WR_LSO) { + struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); + int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start); + u64 left, leftlen, copysz; + void *pdata = ud_wr->header; + + left = ud_wr->hlen; + eseg->mss = cpu_to_be16(ud_wr->mss); + eseg->inline_hdr_sz = cpu_to_be16(left); + + /* + * check if there is space till the end of queue, if yes, + * copy all in one shot, otherwise copy till the end of queue, + * rollback and than the copy the left + */ + leftlen = qend - (void *)eseg->inline_hdr_start; + copysz = min_t(u64, leftlen, left); + + memcpy(seg - size_of_inl_hdr_start, pdata, copysz); + + if (likely(copysz > size_of_inl_hdr_start)) { + seg += ALIGN(copysz - size_of_inl_hdr_start, 16); + *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16; + } + + if (unlikely(copysz < left)) { /* the last wqe in the queue */ + seg = mlx5_get_send_wqe(qp, 0); + left -= copysz; + pdata += copysz; + memcpy(seg, pdata, left); + seg += ALIGN(left, 16); + *size += ALIGN(left, 16) / 16; + } + } + + return seg; +} + static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, struct ib_send_wr *wr) { @@ -2509,6 +2629,11 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, int ndescs = mr->ndescs; memset(umr, 0, sizeof(*umr)); + + if (mr->access_mode == MLX5_ACCESS_MODE_KLM) + /* KLMs take twice the size of MTTs */ + ndescs *= 2; + umr->flags = MLX5_UMR_CHECK_NOT_FREE; umr->klm_octowords = get_klm_octo(ndescs); umr->mkey_mask = frwr_mkey_mask(); @@ -2558,6 +2683,44 @@ static __be64 get_umr_update_mtt_mask(void) return cpu_to_be64(result); } +static __be64 get_umr_update_translation_mask(void) +{ + u64 result; + + result = MLX5_MKEY_MASK_LEN | + MLX5_MKEY_MASK_PAGE_SIZE | + MLX5_MKEY_MASK_START_ADDR | + MLX5_MKEY_MASK_KEY | + MLX5_MKEY_MASK_FREE; + + return cpu_to_be64(result); +} + +static __be64 get_umr_update_access_mask(void) +{ + u64 result; + + result = MLX5_MKEY_MASK_LW | + MLX5_MKEY_MASK_RR | + MLX5_MKEY_MASK_RW | + MLX5_MKEY_MASK_A | + MLX5_MKEY_MASK_KEY | + MLX5_MKEY_MASK_FREE; + + return cpu_to_be64(result); +} + +static __be64 get_umr_update_pd_mask(void) +{ + u64 result; + + result = MLX5_MKEY_MASK_PD | + MLX5_MKEY_MASK_KEY | + MLX5_MKEY_MASK_FREE; + + return cpu_to_be64(result); +} + static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, struct ib_send_wr *wr) { @@ -2576,9 +2739,15 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, umr->mkey_mask = get_umr_update_mtt_mask(); umr->bsf_octowords = get_klm_octo(umrwr->target.offset); umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN; - } else { - umr->mkey_mask = get_umr_reg_mr_mask(); } + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION) + umr->mkey_mask |= get_umr_update_translation_mask(); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS) + umr->mkey_mask |= get_umr_update_access_mask(); + if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD) + umr->mkey_mask |= get_umr_update_pd_mask(); + if (!umr->mkey_mask) + umr->mkey_mask = get_umr_reg_mr_mask(); } else { umr->mkey_mask = get_umr_unreg_mr_mask(); } @@ -2603,13 +2772,19 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, int ndescs = ALIGN(mr->ndescs, 8) >> 1; memset(seg, 0, sizeof(*seg)); - seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT; + + if (mr->access_mode == MLX5_ACCESS_MODE_MTT) + seg->log2_page_size = ilog2(mr->ibmr.page_size); + else if (mr->access_mode == MLX5_ACCESS_MODE_KLM) + /* KLMs take twice the size of MTTs */ + ndescs *= 2; + + seg->flags = get_umr_flags(access) | mr->access_mode; seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00); seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL); seg->start_addr = cpu_to_be64(mr->ibmr.iova); seg->len = cpu_to_be64(mr->ibmr.length); seg->xlt_oct_size = cpu_to_be32(ndescs); - seg->log2_page_size = ilog2(mr->ibmr.page_size); } static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg) @@ -2630,7 +2805,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w seg->flags = convert_access(umrwr->access_flags); if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) { - seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); + if (umrwr->pd) + seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn); seg->start_addr = cpu_to_be64(umrwr->target.virt_addr); } seg->len = cpu_to_be64(umrwr->length); @@ -3196,13 +3372,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, { struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ struct mlx5_ib_dev *dev = to_mdev(ibqp->device); - struct mlx5_ib_qp *qp = to_mqp(ibqp); + struct mlx5_ib_qp *qp; struct mlx5_ib_mr *mr; struct mlx5_wqe_data_seg *dpseg; struct mlx5_wqe_xrc_seg *xrc; - struct mlx5_bf *bf = qp->bf; + struct mlx5_bf *bf; int uninitialized_var(size); - void *qend = qp->sq.qend; + void *qend; unsigned long flags; unsigned idx; int err = 0; @@ -3214,6 +3390,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, u8 next_fence = 0; u8 fence; + if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr); + + qp = to_mqp(ibqp); + bf = qp->bf; + qend = qp->sq.qend; + spin_lock_irqsave(&qp->sq.lock, flags); for (nreq = 0; wr; nreq++, wr = wr->next) { @@ -3373,16 +3556,37 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } break; - case IB_QPT_UD: case IB_QPT_SMI: - case IB_QPT_GSI: + case MLX5_IB_QPT_HW_GSI: set_datagram_seg(seg, wr); seg += sizeof(struct mlx5_wqe_datagram_seg); size += sizeof(struct mlx5_wqe_datagram_seg) / 16; if (unlikely((seg == qend))) seg = mlx5_get_send_wqe(qp, 0); break; + case IB_QPT_UD: + set_datagram_seg(seg, wr); + seg += sizeof(struct mlx5_wqe_datagram_seg); + size += sizeof(struct mlx5_wqe_datagram_seg) / 16; + + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + + /* handle qp that supports ud offload */ + if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { + struct mlx5_wqe_eth_pad *pad; + + pad = seg; + memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad)); + seg += sizeof(struct mlx5_wqe_eth_pad); + size += sizeof(struct mlx5_wqe_eth_pad) / 16; + seg = set_eth_seg(seg, wr, qend, qp, &size); + + if (unlikely((seg == qend))) + seg = mlx5_get_send_wqe(qp, 0); + } + break; case MLX5_IB_QPT_REG_UMR: if (wr->opcode != MLX5_IB_WR_UMR) { err = -EINVAL; @@ -3502,6 +3706,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, int ind; int i; + if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr); + spin_lock_irqsave(&qp->rq.lock, flags); ind = qp->rq.head & (qp->rq.wqe_cnt - 1); @@ -3822,6 +4029,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int err = 0; u8 raw_packet_qp_state; + if (unlikely(ibqp->qp_type == IB_QPT_GSI)) + return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask, + qp_init_attr); + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING /* * Wait for any outstanding page faults, in case the user frees memory @@ -3874,6 +4085,8 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND; if (qp->flags & MLX5_IB_QP_MANAGED_RECV) qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV; + if (qp->flags & MLX5_IB_QP_SQPN_QP1) + qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1(); qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h index b94a55404..61bc308bb 100644 --- a/drivers/infiniband/hw/mlx5/user.h +++ b/drivers/infiniband/hw/mlx5/user.h @@ -152,6 +152,13 @@ struct mlx5_ib_create_qp_resp { __u32 uuar_index; }; +struct mlx5_ib_alloc_mw { + __u32 comp_mask; + __u8 num_klms; + __u8 reserved1; + __u16 reserved2; +}; + static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext, struct mlx5_ib_create_qp *ucmd, int inlen, diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c index 7d2e42dd6..6c00d04b8 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.c +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c @@ -472,8 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, goto out; } - ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, - pages, NULL); + ret = get_user_pages(uaddr & PAGE_MASK, 1, 1, 0, pages, NULL); if (ret < 0) goto out; diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig index 846dc97cf..7964eba8e 100644 --- a/drivers/infiniband/hw/nes/Kconfig +++ b/drivers/infiniband/hw/nes/Kconfig @@ -2,7 +2,6 @@ config INFINIBAND_NES tristate "NetEffect RNIC Driver" depends on PCI && INET && INFINIBAND select LIBCRC32C - select INET_LRO ---help--- This is the RDMA Network Interface Card (RNIC) driver for NetEffect Ethernet Cluster Server Adapters. diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 9f9d5c563..35cbb17be 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -111,17 +111,6 @@ static struct pci_device_id nes_pci_table[] = { MODULE_DEVICE_TABLE(pci, nes_pci_table); -/* registered nes netlink callbacks */ -static struct ibnl_client_cbs nes_nl_cb_table[] = { - [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, - [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, - [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, - [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb}, - [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb}, - [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb}, - [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb} -}; - static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *); static int nes_net_event(struct notifier_block *, unsigned long, void *); static int nes_notifiers_registered; @@ -682,17 +671,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) } nes_notifiers_registered++; - if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table)) - printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n", - __func__, __LINE__); - - ret = iwpm_init(RDMA_NL_NES); - if (ret) { - printk(KERN_ERR PFX "%s: port mapper initialization failed\n", - pci_name(pcidev)); - goto bail7; - } - INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status); /* Initialize network devices */ @@ -731,7 +709,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n", nesdev->netdev_count, nesdev->nesadapter->netdev_count); - ibnl_remove_client(RDMA_NL_NES); nes_notifiers_registered--; if (nes_notifiers_registered == 0) { @@ -795,8 +772,6 @@ static void nes_remove(struct pci_dev *pcidev) nesdev->nesadapter->netdev_count--; } } - ibnl_remove_client(RDMA_NL_NES); - iwpm_exit(RDMA_NL_NES); nes_notifiers_registered--; if (nes_notifiers_registered == 0) { diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index cb9f0f273..7f0aa23ae 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -482,11 +482,11 @@ static void form_cm_frame(struct sk_buff *skb, iph->ttl = 0x40; iph->protocol = 0x06; /* IPPROTO_TCP */ - iph->saddr = htonl(cm_node->mapped_loc_addr); - iph->daddr = htonl(cm_node->mapped_rem_addr); + iph->saddr = htonl(cm_node->loc_addr); + iph->daddr = htonl(cm_node->rem_addr); - tcph->source = htons(cm_node->mapped_loc_port); - tcph->dest = htons(cm_node->mapped_rem_port); + tcph->source = htons(cm_node->loc_port); + tcph->dest = htons(cm_node->rem_port); tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); if (flags & SET_ACK) { @@ -525,125 +525,6 @@ static void form_cm_frame(struct sk_buff *skb, cm_packets_created++; } -/* - * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct - */ -static void nes_create_sockaddr(__be32 ip_addr, __be16 port, - struct sockaddr_storage *addr) -{ - struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr; - nes_sockaddr->sin_family = AF_INET; - memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32)); - nes_sockaddr->sin_port = port; -} - -/* - * nes_create_mapinfo - Create a mapinfo object in the port mapper data base - */ -static int nes_create_mapinfo(struct nes_cm_info *cm_info) -{ - struct sockaddr_storage local_sockaddr; - struct sockaddr_storage mapped_sockaddr; - - nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port), - &local_sockaddr); - nes_create_sockaddr(htonl(cm_info->mapped_loc_addr), - htons(cm_info->mapped_loc_port), &mapped_sockaddr); - - return iwpm_create_mapinfo(&local_sockaddr, - &mapped_sockaddr, RDMA_NL_NES); -} - -/* - * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base - * and send a remove mapping op message to - * the userspace port mapper - */ -static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port, - u32 mapped_loc_addr, u16 mapped_loc_port) -{ - struct sockaddr_storage local_sockaddr; - struct sockaddr_storage mapped_sockaddr; - - nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr); - nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port), - &mapped_sockaddr); - - iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr); - return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES); -} - -/* - * nes_form_pm_msg - Form a port mapper message with mapping info - */ -static void nes_form_pm_msg(struct nes_cm_info *cm_info, - struct iwpm_sa_data *pm_msg) -{ - nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port), - &pm_msg->loc_addr); - nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port), - &pm_msg->rem_addr); -} - -/* - * nes_form_reg_msg - Form a port mapper message with dev info - */ -static void nes_form_reg_msg(struct nes_vnic *nesvnic, - struct iwpm_dev_data *pm_msg) -{ - memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name, - IWPM_DEVNAME_SIZE); - memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE); -} - -static void record_sockaddr_info(struct sockaddr_storage *addr_info, - nes_addr_t *ip_addr, u16 *port_num) -{ - struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info; - - if (in_addr->sin_family == AF_INET) { - *ip_addr = ntohl(in_addr->sin_addr.s_addr); - *port_num = ntohs(in_addr->sin_port); - } -} - -/* - * nes_record_pm_msg - Save the received mapping info - */ -static void nes_record_pm_msg(struct nes_cm_info *cm_info, - struct iwpm_sa_data *pm_msg) -{ - record_sockaddr_info(&pm_msg->mapped_loc_addr, - &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port); - - record_sockaddr_info(&pm_msg->mapped_rem_addr, - &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port); -} - -/* - * nes_get_reminfo - Get the address info of the remote connecting peer - */ -static int nes_get_remote_addr(struct nes_cm_node *cm_node) -{ - struct sockaddr_storage mapped_loc_addr, mapped_rem_addr; - struct sockaddr_storage remote_addr; - int ret; - - nes_create_sockaddr(htonl(cm_node->mapped_loc_addr), - htons(cm_node->mapped_loc_port), &mapped_loc_addr); - nes_create_sockaddr(htonl(cm_node->mapped_rem_addr), - htons(cm_node->mapped_rem_port), &mapped_rem_addr); - - ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr, - &remote_addr, RDMA_NL_NES); - if (ret) - nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n"); - else - record_sockaddr_info(&remote_addr, &cm_node->rem_addr, - &cm_node->rem_port); - return ret; -} - /** * print_core - dump a cm core */ @@ -1266,11 +1147,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, loc_addr, loc_port, cm_node->rem_addr, cm_node->rem_port, rem_addr, rem_port); - if ((cm_node->mapped_loc_addr == loc_addr) && - (cm_node->mapped_loc_port == loc_port) && - (cm_node->mapped_rem_addr == rem_addr) && - (cm_node->mapped_rem_port == rem_port)) { - + if ((cm_node->loc_addr == loc_addr) && + (cm_node->loc_port == loc_port) && + (cm_node->rem_addr == rem_addr) && + (cm_node->rem_port == rem_port)) { add_ref_cm_node(cm_node); spin_unlock_irqrestore(&cm_core->ht_lock, flags); return cm_node; @@ -1287,8 +1167,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core, * find_listener - find a cm node listening on this addr-port pair */ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, - nes_addr_t dst_addr, u16 dst_port, - enum nes_cm_listener_state listener_state, int local) + nes_addr_t dst_addr, u16 dst_port, + enum nes_cm_listener_state listener_state) { unsigned long flags; struct nes_cm_listener *listen_node; @@ -1298,13 +1178,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core, /* walk list and find cm_node associated with this session ID */ spin_lock_irqsave(&cm_core->listen_list_lock, flags); list_for_each_entry(listen_node, &cm_core->listen_list.list, list) { - if (local) { - listen_addr = listen_node->loc_addr; - listen_port = listen_node->loc_port; - } else { - listen_addr = listen_node->mapped_loc_addr; - listen_port = listen_node->mapped_loc_port; - } + listen_addr = listen_node->loc_addr; + listen_port = listen_node->loc_port; + /* compare node pair, return node handle if a match */ if (((listen_addr == dst_addr) || listen_addr == 0x00000000) && @@ -1443,17 +1319,13 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, if (listener->nesvnic) { nes_manage_apbvt(listener->nesvnic, - listener->mapped_loc_port, + listener->loc_port, PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); - nes_remove_mapinfo(listener->loc_addr, - listener->loc_port, - listener->mapped_loc_addr, - listener->mapped_loc_port); nes_debug(NES_DBG_NLMSG, - "Delete APBVT mapped_loc_port = %04X\n", - listener->mapped_loc_port); + "Delete APBVT loc_port = %04X\n", + listener->loc_port); } nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener); @@ -1602,11 +1474,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, cm_node->rem_addr = cm_info->rem_addr; cm_node->rem_port = cm_info->rem_port; - cm_node->mapped_loc_addr = cm_info->mapped_loc_addr; - cm_node->mapped_rem_addr = cm_info->mapped_rem_addr; - cm_node->mapped_loc_port = cm_info->mapped_loc_port; - cm_node->mapped_rem_port = cm_info->mapped_rem_port; - cm_node->mpa_frame_rev = mpa_version; cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; cm_node->mpav2_ird_ord = 0; @@ -1655,10 +1522,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core, cm_node->loopbackpartner = NULL; /* get the mac addr for the remote node */ - oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr, - NULL, NES_ARP_RESOLVE); - arpindex = nes_addr_resolve_neigh(nesvnic, - cm_node->mapped_rem_addr, oldarpindex); + oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr, + NULL, NES_ARP_RESOLVE); + arpindex = nes_addr_resolve_neigh(nesvnic, cm_node->rem_addr, + oldarpindex); if (arpindex < 0) { kfree(cm_node); return NULL; @@ -1720,14 +1587,12 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0); } else { if (cm_node->apbvt_set && cm_node->nesvnic) { - nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port, + nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port, PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); } - nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n", - cm_node->mapped_loc_port); - nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port, - cm_node->mapped_loc_addr, cm_node->mapped_loc_port); + nes_debug(NES_DBG_NLMSG, "Delete APBVT loc_port = %04X\n", + cm_node->loc_port); } atomic_dec(&cm_core->node_cnt); @@ -2184,7 +2049,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb, cm_node->state = NES_CM_STATE_ESTABLISHED; if (datasize) { cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; - nes_get_remote_addr(cm_node); handle_rcv_mpa(cm_node, skb); } else { /* rcvd ACK only */ dev_kfree_skb_any(skb); @@ -2399,17 +2263,14 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, struct nes_vnic *nesvnic, struct nes_cm_info *cm_info) { struct nes_cm_listener *listener; - struct iwpm_dev_data pm_reg_msg; - struct iwpm_sa_data pm_msg; unsigned long flags; - int iwpm_err = 0; nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n", cm_info->loc_addr, cm_info->loc_port); /* cannot have multiple matching listeners */ listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port, - NES_CM_LISTENER_EITHER_STATE, 1); + NES_CM_LISTENER_EITHER_STATE); if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) { /* find automatically incs ref count ??? */ @@ -2419,22 +2280,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, } if (!listener) { - nes_form_reg_msg(nesvnic, &pm_reg_msg); - iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES); - if (iwpm_err) { - nes_debug(NES_DBG_NLMSG, - "Port Mapper reg pid fail (err = %d).\n", iwpm_err); - } - if (iwpm_valid_pid() && !iwpm_err) { - nes_form_pm_msg(cm_info, &pm_msg); - iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES); - if (iwpm_err) - nes_debug(NES_DBG_NLMSG, - "Port Mapper query fail (err = %d).\n", iwpm_err); - else - nes_record_pm_msg(cm_info, &pm_msg); - } - /* create a CM listen node (1/2 node to compare incoming traffic to) */ listener = kzalloc(sizeof(*listener), GFP_ATOMIC); if (!listener) { @@ -2444,8 +2289,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core, listener->loc_addr = cm_info->loc_addr; listener->loc_port = cm_info->loc_port; - listener->mapped_loc_addr = cm_info->mapped_loc_addr; - listener->mapped_loc_port = cm_info->mapped_loc_port; listener->reused_node = 0; atomic_set(&listener->ref_count, 1); @@ -2507,18 +2350,18 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, if (cm_info->loc_addr == cm_info->rem_addr) { loopbackremotelistener = find_listener(cm_core, - cm_node->mapped_loc_addr, cm_node->mapped_rem_port, - NES_CM_LISTENER_ACTIVE_STATE, 0); + cm_node->loc_addr, cm_node->rem_port, + NES_CM_LISTENER_ACTIVE_STATE); if (loopbackremotelistener == NULL) { create_event(cm_node, NES_CM_EVENT_ABORTED); } else { loopback_cm_info = *cm_info; loopback_cm_info.loc_port = cm_info->rem_port; loopback_cm_info.rem_port = cm_info->loc_port; - loopback_cm_info.mapped_loc_port = - cm_info->mapped_rem_port; - loopback_cm_info.mapped_rem_port = - cm_info->mapped_loc_port; + loopback_cm_info.loc_port = + cm_info->rem_port; + loopback_cm_info.rem_port = + cm_info->loc_port; loopback_cm_info.cm_id = loopbackremotelistener->cm_id; loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info, loopbackremotelistener); @@ -2747,12 +2590,6 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, nfo.rem_addr = ntohl(iph->saddr); nfo.rem_port = ntohs(tcph->source); - /* If port mapper is available these should be mapped address info */ - nfo.mapped_loc_addr = ntohl(iph->daddr); - nfo.mapped_loc_port = ntohs(tcph->dest); - nfo.mapped_rem_addr = ntohl(iph->saddr); - nfo.mapped_rem_port = ntohs(tcph->source); - tmp_daddr = cpu_to_be32(iph->daddr); tmp_saddr = cpu_to_be32(iph->saddr); @@ -2761,8 +2598,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, do { cm_node = find_node(cm_core, - nfo.mapped_rem_port, nfo.mapped_rem_addr, - nfo.mapped_loc_port, nfo.mapped_loc_addr); + nfo.rem_port, nfo.rem_addr, + nfo.loc_port, nfo.loc_addr); if (!cm_node) { /* Only type of packet accepted are for */ @@ -2771,9 +2608,9 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, skb_handled = 0; break; } - listener = find_listener(cm_core, nfo.mapped_loc_addr, - nfo.mapped_loc_port, - NES_CM_LISTENER_ACTIVE_STATE, 0); + listener = find_listener(cm_core, nfo.loc_addr, + nfo.loc_port, + NES_CM_LISTENER_ACTIVE_STATE); if (!listener) { nfo.cm_id = NULL; nfo.conn_type = 0; @@ -2856,12 +2693,22 @@ static struct nes_cm_core *nes_cm_alloc_core(void) nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n"); cm_core->event_wq = create_singlethread_workqueue("nesewq"); + if (!cm_core->event_wq) + goto out_free_cmcore; cm_core->post_event = nes_cm_post_event; nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n"); cm_core->disconn_wq = create_singlethread_workqueue("nesdwq"); + if (!cm_core->disconn_wq) + goto out_free_wq; print_core(cm_core); return cm_core; + +out_free_wq: + destroy_workqueue(cm_core->event_wq); +out_free_cmcore: + kfree(cm_core); + return NULL; } @@ -3121,8 +2968,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) atomic_inc(&cm_disconnects); cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = disconn_status; - cm_event.local_addr = cm_id->local_addr; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.local_addr = cm_id->m_local_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; @@ -3148,8 +2995,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; - cm_event.local_addr = cm_id->local_addr; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.local_addr = cm_id->m_local_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; @@ -3240,8 +3087,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) u8 *start_ptr = &start_addr; u8 **start_buff = &start_ptr; u16 buff_len = 0; - struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; - struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; + struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; ibqp = nes_get_qp(cm_id->device, conn_param->qpn); if (!ibqp) @@ -3378,11 +3225,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) nes_cm_init_tsa_conn(nesqp, cm_node); nesqp->nesqp_context->tcpPorts[0] = - cpu_to_le16(cm_node->mapped_loc_port); + cpu_to_le16(cm_node->loc_port); nesqp->nesqp_context->tcpPorts[1] = - cpu_to_le16(cm_node->mapped_rem_port); + cpu_to_le16(cm_node->rem_port); - nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr); + nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr); nesqp->nesqp_context->misc2 |= cpu_to_le32( (u32)PCI_FUNC(nesdev->pcidev->devfn) << @@ -3406,9 +3253,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) memset(&nes_quad, 0, sizeof(nes_quad)); nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); - nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr); - nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port); - nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port); + nes_quad.SrcIpadr = htonl(cm_node->rem_addr); + nes_quad.TcpPorts[0] = htons(cm_node->rem_port); + nes_quad.TcpPorts[1] = htons(cm_node->loc_port); /* Produce hash key */ crc_value = get_crc_value(&nes_quad); @@ -3437,8 +3284,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_event.event = IW_CM_EVENT_ESTABLISHED; cm_event.status = 0; cm_event.provider_data = (void *)nesqp; - cm_event.local_addr = cm_id->local_addr; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.local_addr = cm_id->m_local_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; cm_event.ird = cm_node->ird_size; @@ -3508,11 +3355,8 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct nes_cm_node *cm_node; struct nes_cm_info cm_info; int apbvt_set = 0; - struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; - struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; - struct iwpm_dev_data pm_reg_msg; - struct iwpm_sa_data pm_msg; - int iwpm_err = 0; + struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; if (cm_id->remote_addr.ss_family != AF_INET) return -ENOSYS; @@ -3558,37 +3402,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_info.cm_id = cm_id; cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; - /* No port mapper available, go with the specified peer information */ - cm_info.mapped_loc_addr = cm_info.loc_addr; - cm_info.mapped_loc_port = cm_info.loc_port; - cm_info.mapped_rem_addr = cm_info.rem_addr; - cm_info.mapped_rem_port = cm_info.rem_port; - - nes_form_reg_msg(nesvnic, &pm_reg_msg); - iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES); - if (iwpm_err) { - nes_debug(NES_DBG_NLMSG, - "Port Mapper reg pid fail (err = %d).\n", iwpm_err); - } - if (iwpm_valid_pid() && !iwpm_err) { - nes_form_pm_msg(&cm_info, &pm_msg); - iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES); - if (iwpm_err) - nes_debug(NES_DBG_NLMSG, - "Port Mapper query fail (err = %d).\n", iwpm_err); - else - nes_record_pm_msg(&cm_info, &pm_msg); - } - if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) { - nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port, - PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); + nes_manage_apbvt(nesvnic, cm_info.loc_port, + PCI_FUNC(nesdev->pcidev->devfn), + NES_MANAGE_APBVT_ADD); apbvt_set = 1; } - if (nes_create_mapinfo(&cm_info)) - return -ENOMEM; - cm_id->add_ref(cm_id); /* create a connect CM node connection */ @@ -3597,14 +3417,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) &cm_info); if (!cm_node) { if (apbvt_set) - nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port, + nes_manage_apbvt(nesvnic, cm_info.loc_port, PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_DEL); - nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n", - cm_info.mapped_loc_port); - nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port, - cm_info.mapped_loc_addr, cm_info.mapped_loc_port); + nes_debug(NES_DBG_NLMSG, "Delete loc_port = %04X\n", + cm_info.loc_port); cm_id->rem_ref(cm_id); return -ENOMEM; } @@ -3633,12 +3451,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) struct nes_cm_listener *cm_node; struct nes_cm_info cm_info; int err; - struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr; + struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n", cm_id, ntohs(laddr->sin_port)); - if (cm_id->local_addr.ss_family != AF_INET) + if (cm_id->m_local_addr.ss_family != AF_INET) return -ENOSYS; nesvnic = to_nesvnic(cm_id->device); if (!nesvnic) @@ -3658,10 +3476,6 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) cm_info.conn_type = NES_CM_IWARP_CONN_TYPE; - /* No port mapper available, go with the specified info */ - cm_info.mapped_loc_addr = cm_info.loc_addr; - cm_info.mapped_loc_port = cm_info.loc_port; - cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info); if (!cm_node) { printk(KERN_ERR "%s[%u] Error returned from listen API call\n", @@ -3673,10 +3487,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) cm_node->tos = cm_id->tos; if (!cm_node->reused_node) { - if (nes_create_mapinfo(&cm_info)) - return -ENOMEM; - - err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port, + err = nes_manage_apbvt(nesvnic, cm_node->loc_port, PCI_FUNC(nesvnic->nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD); if (err) { @@ -3786,8 +3597,8 @@ static void cm_event_connected(struct nes_cm_event *event) nesvnic = to_nesvnic(nesqp->ibqp.device); nesdev = nesvnic->nesdev; nesadapter = nesdev->nesadapter; - laddr = (struct sockaddr_in *)&cm_id->local_addr; - raddr = (struct sockaddr_in *)&cm_id->remote_addr; + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr; if (nesqp->destroyed) @@ -3802,10 +3613,10 @@ static void cm_event_connected(struct nes_cm_event *event) /* set the QP tsa context */ nesqp->nesqp_context->tcpPorts[0] = - cpu_to_le16(cm_node->mapped_loc_port); + cpu_to_le16(cm_node->loc_port); nesqp->nesqp_context->tcpPorts[1] = - cpu_to_le16(cm_node->mapped_rem_port); - nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr); + cpu_to_le16(cm_node->rem_port); + nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr); nesqp->nesqp_context->misc2 |= cpu_to_le32( (u32)PCI_FUNC(nesdev->pcidev->devfn) << @@ -3835,9 +3646,9 @@ static void cm_event_connected(struct nes_cm_event *event) nes_quad.DstIpAdrIndex = cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24); - nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr); - nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port); - nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port); + nes_quad.SrcIpadr = htonl(cm_node->rem_addr); + nes_quad.TcpPorts[0] = htons(cm_node->rem_port); + nes_quad.TcpPorts[1] = htons(cm_node->loc_port); /* Produce hash key */ crc_value = get_crc_value(&nes_quad); @@ -3858,14 +3669,14 @@ static void cm_event_connected(struct nes_cm_event *event) cm_event.provider_data = cm_id->provider_data; cm_event_laddr->sin_family = AF_INET; cm_event_laddr->sin_port = laddr->sin_port; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = (void *)event->cm_node->mpa_frame_buf; cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size; cm_event.ird = cm_node->ird_size; cm_event.ord = cm_node->ord_size; - cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr); + cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr); ret = cm_id->event_handler(cm_id, &cm_event); nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret); @@ -3913,8 +3724,8 @@ static void cm_event_connect_error(struct nes_cm_event *event) cm_event.event = IW_CM_EVENT_CONNECT_REPLY; cm_event.status = -ECONNRESET; cm_event.provider_data = cm_id->provider_data; - cm_event.local_addr = cm_id->local_addr; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.local_addr = cm_id->m_local_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; @@ -3970,8 +3781,8 @@ static void cm_event_reset(struct nes_cm_event *event) cm_event.event = IW_CM_EVENT_DISCONNECT; cm_event.status = -ECONNRESET; cm_event.provider_data = cm_id->provider_data; - cm_event.local_addr = cm_id->local_addr; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.local_addr = cm_id->m_local_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; @@ -3981,8 +3792,8 @@ static void cm_event_reset(struct nes_cm_event *event) cm_event.event = IW_CM_EVENT_CLOSE; cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; - cm_event.local_addr = cm_id->local_addr; - cm_event.remote_addr = cm_id->remote_addr; + cm_event.local_addr = cm_id->m_local_addr; + cm_event.remote_addr = cm_id->m_remote_addr; cm_event.private_data = NULL; cm_event.private_data_len = 0; nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node); diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index 147c2c884..d827d03e3 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h @@ -293,8 +293,8 @@ struct nes_cm_listener { struct list_head list; struct nes_cm_core *cm_core; u8 loc_mac[ETH_ALEN]; - nes_addr_t loc_addr, mapped_loc_addr; - u16 loc_port, mapped_loc_port; + nes_addr_t loc_addr; + u16 loc_port; struct iw_cm_id *cm_id; enum nes_cm_conn_type conn_type; atomic_t ref_count; @@ -309,9 +309,7 @@ struct nes_cm_listener { /* per connection node and node state information */ struct nes_cm_node { nes_addr_t loc_addr, rem_addr; - nes_addr_t mapped_loc_addr, mapped_rem_addr; u16 loc_port, rem_port; - u16 mapped_loc_port, mapped_rem_port; u8 loc_mac[ETH_ALEN]; u8 rem_mac[ETH_ALEN]; @@ -368,11 +366,6 @@ struct nes_cm_info { u16 rem_port; nes_addr_t loc_addr; nes_addr_t rem_addr; - u16 mapped_loc_port; - u16 mapped_rem_port; - nes_addr_t mapped_loc_addr; - nes_addr_t mapped_rem_addr; - enum nes_cm_conn_type conn_type; int backlog; }; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 4713dd7ed..a1c6481d8 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -35,18 +35,11 @@ #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> -#include <linux/ip.h> -#include <linux/tcp.h> #include <linux/if_vlan.h> -#include <linux/inet_lro.h> #include <linux/slab.h> #include "nes.h" -static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; -module_param(nes_lro_max_aggr, uint, 0444); -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); - static int wide_ppm_offset; module_param(wide_ppm_offset, int, 0644); MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm"); @@ -1642,25 +1635,6 @@ static void nes_rq_wqes_timeout(unsigned long parm) } -static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *priv) -{ - unsigned int ip_len; - struct iphdr *iph; - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - return 0; -} - - /** * nes_init_nic_qp */ @@ -1895,14 +1869,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) return -ENOMEM; } - nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr; - nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS; - nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc; - nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr; - nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - nesvnic->lro_mgr.dev = netdev; - nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; return 0; } @@ -2809,13 +2775,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) u16 pkt_type; u16 rqes_processed = 0; u8 sq_cqes = 0; - u8 nes_use_lro = 0; head = cq->cq_head; cq_size = cq->cq_size; cq->cqes_pending = 1; - if (nesvnic->netdev->features & NETIF_F_LRO) - nes_use_lro = 1; do { if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) & NES_NIC_CQE_VALID) { @@ -2950,10 +2913,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag); } - if (nes_use_lro) - lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); - else - netif_receive_skb(rx_skb); + napi_gro_receive(&nesvnic->napi, rx_skb); skip_rx_indicate0: ; @@ -2984,8 +2944,6 @@ skip_rx_indicate0: } while (1); - if (nes_use_lro) - lro_flush_all(&nesvnic->lro_mgr); if (sq_cqes) { barrier(); /* restart the queue if it had been stopped */ diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index c9080208a..1b66ef1e9 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -33,8 +33,6 @@ #ifndef __NES_HW_H #define __NES_HW_H -#include <linux/inet_lro.h> - #define NES_PHY_TYPE_CX4 1 #define NES_PHY_TYPE_1G 2 #define NES_PHY_TYPE_ARGUS 4 @@ -1049,8 +1047,6 @@ struct nes_hw_tune_timer { #define NES_TIMER_ENABLE_LIMIT 4 #define NES_MAX_LINK_INTERRUPTS 128 #define NES_MAX_LINK_CHECK 200 -#define NES_MAX_LRO_DESCRIPTORS 32 -#define NES_LRO_MAX_AGGR 64 struct nes_adapter { u64 fw_ver; @@ -1263,9 +1259,6 @@ struct nes_vnic { u8 next_qp_nic_index; u8 of_device_registered; u8 rdma_enabled; - u32 lro_max_aggr; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; struct timer_list event_timer; enum ib_event_type delayed_event; enum ib_event_type last_dispatched_event; diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 6a0bdfa0c..92914539e 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) * skb_shinfo(skb)->nr_frags, skb_is_gso(skb)); */ - if (!netif_carrier_ok(netdev)) - return NETDEV_TX_OK; - if (netif_queue_stopped(netdev)) return NETDEV_TX_BUSY; @@ -1085,9 +1082,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Free 4Kpbls", "Free 256pbls", "Timer Inits", - "LRO aggregated", - "LRO flushed", - "LRO no_desc", "PAU CreateQPs", "PAU DestroyQPs", }; @@ -1302,9 +1296,6 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, target_stat_values[++index] = nesadapter->free_4kpbl; target_stat_values[++index] = nesadapter->free_256pbl; target_stat_values[++index] = int_mod_timer_init; - target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; - target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; - target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; target_stat_values[++index] = atomic_read(&pau_qps_created); target_stat_values[++index] = atomic_read(&pau_qps_destroyed); } @@ -1709,7 +1700,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, netdev->hw_features |= NETIF_F_TSO; netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX; - netdev->hw_features |= NETIF_F_LRO; nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," " nic_index = %d, logical_port = %d, mac_index = %d.\n", diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 8c4daf7f2..fba69a39a 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -56,7 +56,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr); /** * nes_alloc_mw */ -static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type) +static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type, + struct ib_udata *udata) { struct nes_pd *nespd = to_nespd(ibpd); struct nes_vnic *nesvnic = to_nesvnic(ibpd->device); @@ -3768,6 +3769,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.iwcm->create_listen = nes_create_listen; nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen; nesibdev->ibdev.get_port_immutable = nes_port_immutable; + memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name, + sizeof(nesibdev->ibdev.iwcm->ifname)); return nesibdev; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 12503f15f..45bdfa0e3 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -114,6 +114,7 @@ struct ocrdma_dev_attr { u8 local_ca_ack_delay; u8 ird; u8 num_ird_pages; + u8 udp_encap; }; struct ocrdma_dma_mem { @@ -356,6 +357,7 @@ struct ocrdma_ah { struct ocrdma_av *av; u16 sgid_index; u32 id; + u8 hdr_type; }; struct ocrdma_qp_hwq_info { @@ -598,4 +600,10 @@ static inline u8 ocrdma_get_ae_link_state(u32 ae_state) return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT); } +static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev) +{ + return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) || + (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6); +} + #endif diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index 3790771f2..797362a29 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -55,18 +55,46 @@ #define OCRDMA_VID_PCP_SHIFT 0xD +static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type) +{ + switch (hdr_type) { + case OCRDMA_L3_TYPE_IB_GRH: + return (u16)0x8915; + case OCRDMA_L3_TYPE_IPV4: + return (u16)0x0800; + case OCRDMA_L3_TYPE_IPV6: + return (u16)0x86dd; + default: + pr_err("ocrdma%d: Invalid network header\n", devid); + return 0; + } +} + static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, struct ib_ah_attr *attr, union ib_gid *sgid, int pdid, bool *isvlan, u16 vlan_tag) { - int status = 0; + int status; struct ocrdma_eth_vlan eth; struct ocrdma_grh grh; int eth_sz; + u16 proto_num = 0; + u8 nxthdr = 0x11; + struct iphdr ipv4; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; memset(ð, 0, sizeof(eth)); memset(&grh, 0, sizeof(grh)); + /* Protocol Number */ + proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type); + if (!proto_num) + return -EINVAL; + nxthdr = (proto_num == 0x8915) ? 0x1b : 0x11; /* VLAN */ if (!vlan_tag || (vlan_tag > 0xFFF)) vlan_tag = dev->pvid; @@ -78,13 +106,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, dev->id); } eth.eth_type = cpu_to_be16(0x8100); - eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); + eth.roce_eth_type = cpu_to_be16(proto_num); vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; eth.vlan_tag = cpu_to_be16(vlan_tag); eth_sz = sizeof(struct ocrdma_eth_vlan); *isvlan = true; } else { - eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); + eth.eth_type = cpu_to_be16(proto_num); eth_sz = sizeof(struct ocrdma_eth_basic); } /* MAC */ @@ -93,18 +121,33 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, if (status) return status; ah->sgid_index = attr->grh.sgid_index; - memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid)); - memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw)); - - grh.tclass_flow = cpu_to_be32((6 << 28) | - (attr->grh.traffic_class << 24) | - attr->grh.flow_label); - /* 0x1b is next header value in GRH */ - grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | - (0x1b << 8) | attr->grh.hop_limit); /* Eth HDR */ memcpy(&ah->av->eth_hdr, ð, eth_sz); - memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); + if (ah->hdr_type == RDMA_NETWORK_IPV4) { + *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) | + attr->grh.traffic_class); + ipv4.id = cpu_to_be16(pdid); + ipv4.frag_off = htons(IP_DF); + ipv4.tot_len = htons(0); + ipv4.ttl = attr->grh.hop_limit; + ipv4.protocol = nxthdr; + rdma_gid2ip(&sgid_addr._sockaddr, sgid); + ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr; + rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid); + ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr; + memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr)); + } else { + memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid)); + grh.tclass_flow = cpu_to_be32((6 << 28) | + (attr->grh.traffic_class << 24) | + attr->grh.flow_label); + memcpy(&grh.dgid[0], attr->grh.dgid.raw, + sizeof(attr->grh.dgid.raw)); + grh.pdid_hoplimit = cpu_to_be32((pdid << 16) | + (nxthdr << 8) | + attr->grh.hop_limit); + memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); + } if (*isvlan) ah->av->valid |= OCRDMA_AV_VLAN_VALID; ah->av->valid = cpu_to_le32(ah->av->valid); @@ -128,6 +171,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) if (atomic_cmpxchg(&dev->update_sl, 1, 0)) ocrdma_init_service_level(dev); + ah = kzalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) return ERR_PTR(-ENOMEM); @@ -148,6 +192,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev); dev_put(sgid_attr.ndev); } + /* Get network header type for this GID */ + ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid); if ((pd->uctx) && (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) && @@ -172,6 +218,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; *ahid_addr = 0; *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK; + if (ocrdma_is_udp_encap_supported(dev)) { + *ahid_addr |= ((u32)ah->hdr_type & + OCRDMA_AH_L3_TYPE_MASK) << + OCRDMA_AH_L3_TYPE_SHIFT; + } if (isvlan) *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK << OCRDMA_AH_VLAN_VALID_SHIFT); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 04a30ae67..3856dd4c7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -46,9 +46,10 @@ enum { OCRDMA_AH_ID_MASK = 0x3FF, OCRDMA_AH_VLAN_VALID_MASK = 0x01, - OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F + OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F, + OCRDMA_AH_L3_TYPE_MASK = 0x03, + OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */ }; - struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); int ocrdma_destroy_ah(struct ib_ah *); int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 283ca842f..16740dcb8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1113,7 +1113,7 @@ mbx_err: static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe, void *payload_va) { - int status = 0; + int status; struct ocrdma_mbx_rsp *rsp = payload_va; if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >> @@ -1144,6 +1144,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, attr->max_pd = (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; + attr->udp_encap = (rsp->max_pd_ca_ack_delay & + OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >> + OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT; attr->max_dpp_pds = (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET; @@ -2138,7 +2141,6 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, enum ib_qp_state *old_ib_state) { unsigned long flags; - int status = 0; enum ocrdma_qp_state new_state; new_state = get_ocrdma_qp_state(new_ib_state); @@ -2163,7 +2165,7 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state, qp->state = new_state; spin_unlock_irqrestore(&qp->q_lock, flags); - return status; + return 0; } static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp) @@ -2501,7 +2503,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, union ib_gid sgid, zgid; struct ib_gid_attr sgid_attr; u32 vlan_id = 0xFFFF; - u8 mac_addr[6]; + u8 mac_addr[6], hdr_type; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); if ((ah_attr->ah_flags & IB_AH_GRH) == 0) @@ -2516,6 +2523,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, cmd->params.hop_lmt_rq_psn |= (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT); cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; + + /* GIDs */ memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], sizeof(cmd->params.dgid)); @@ -2538,6 +2547,16 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, return status; cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | (mac_addr[2] << 16) | (mac_addr[3] << 24); + + hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid); + if (hdr_type == RDMA_NETWORK_IPV4) { + rdma_gid2ip(&sgid_addr._sockaddr, &sgid); + rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid); + memcpy(&cmd->params.dgid[0], + &dgid_addr._sockaddr_in.sin_addr.s_addr, 4); + memcpy(&cmd->params.sgid[0], + &sgid_addr._sockaddr_in.sin_addr.s_addr, 4); + } /* convert them to LE format. */ ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid)); ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid)); @@ -2558,7 +2577,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, cmd->params.rnt_rc_sl_fl |= (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; } - + cmd->params.max_sge_recv_flags |= ((hdr_type << + OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) & + OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK); return 0; } @@ -2871,7 +2892,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq) static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype, struct ocrdma_dcbx_cfg *dcbxcfg) { - int status = 0; + int status; dma_addr_t pa; struct ocrdma_mqe cmd; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index f38743018..3d75f65ce 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -89,8 +89,10 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; + struct ocrdma_dev *dev; int err; + dev = get_ocrdma_dev(ibdev); err = ocrdma_query_port(ibdev, port_num, &attr); if (err) return err; @@ -98,6 +100,8 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + if (ocrdma_is_udp_encap_supported(dev)) + immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP; immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 99dd6fdf0..0efc9662c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -140,7 +140,11 @@ enum { OCRDMA_DB_RQ_SHIFT = 24 }; -#define OCRDMA_ROUDP_FLAGS_SHIFT 0x03 +enum { + OCRDMA_L3_TYPE_IB_GRH = 0x00, + OCRDMA_L3_TYPE_IPV4 = 0x01, + OCRDMA_L3_TYPE_IPV6 = 0x02 +}; #define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ #define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */ @@ -546,7 +550,8 @@ enum { OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8, OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF << OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT, - + OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT = 3, + OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18, OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16, @@ -1107,6 +1112,8 @@ enum { OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7), OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8), OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9), + OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT = 11, + OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK = BIT(11) | BIT(12) | BIT(13), OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16, OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF << OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT, @@ -1735,8 +1742,11 @@ enum { /* w1 */ OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16, + OCRDMA_CQE_UD_XFER_LEN_MASK = 0x1FFF, OCRDMA_CQE_PKEY_SHIFT = 0, OCRDMA_CQE_PKEY_MASK = 0xFFFF, + OCRDMA_CQE_UD_L3TYPE_SHIFT = 29, + OCRDMA_CQE_UD_L3TYPE_MASK = 0x07, /* w2 */ OCRDMA_CQE_QPN_SHIFT = 0, @@ -1861,7 +1871,7 @@ struct ocrdma_ewqe_ud_hdr { u32 rsvd_dest_qpn; u32 qkey; u32 rsvd_ahid; - u32 rsvd; + u32 hdr_type; }; /* extended wqe followed by hdr_wqe for Fast Memory register */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 255f77408..8bef09a8c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -610,7 +610,7 @@ static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev) static void ocrdma_update_stats(struct ocrdma_dev *dev) { ulong now = jiffies, secs; - int status = 0; + int status; struct ocrdma_rdma_stats_resp *rdma_stats = (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; @@ -641,7 +641,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, { char tmp_str[32]; long reset; - int status = 0; + int status; struct ocrdma_stats *pstats = filp->private_data; struct ocrdma_dev *dev = pstats->dev; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 12420e4ec..a8496a18e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -419,7 +419,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ib_udata *udata) { struct ocrdma_pd *pd = NULL; - int status = 0; + int status; pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) @@ -468,7 +468,7 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx, static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) { - int status = 0; + int status; if (dev->pd_mgr->pd_prealloc_valid) status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); @@ -596,7 +596,7 @@ map_err: int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx) { - int status = 0; + int status; struct ocrdma_mm *mm, *tmp; struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx); struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device); @@ -623,7 +623,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT; u64 unmapped_db = (u64) dev->nic_info.unmapped_db; unsigned long len = (vma->vm_end - vma->vm_start); - int status = 0; + int status; bool found; if (vma->vm_start & (PAGE_SIZE - 1)) @@ -1285,7 +1285,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, struct ib_udata *udata, int dpp_offset, int dpp_credit_lmt, int srq) { - int status = 0; + int status; u64 usr_db; struct ocrdma_create_qp_uresp uresp; struct ocrdma_pd *pd = qp->pd; @@ -1494,9 +1494,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, */ if (status < 0) return status; - status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); - - return status; + return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask); } int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, @@ -1949,7 +1947,7 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq, enum ib_srq_attr_mask srq_attr_mask, struct ib_udata *udata) { - int status = 0; + int status; struct ocrdma_srq *srq; srq = get_ocrdma_srq(ibsrq); @@ -2005,6 +2003,7 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, else ud_hdr->qkey = ud_wr(wr)->remote_qkey; ud_hdr->rsvd_ahid = ah->id; + ud_hdr->hdr_type = ah->hdr_type; if (ah->av->valid & OCRDMA_AV_VLAN_VALID) hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); } @@ -2717,9 +2716,11 @@ static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, return expand; } -static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) +static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc, + struct ocrdma_cqe *cqe) { int status; + u16 hdr_type = 0; status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; @@ -2728,7 +2729,17 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe) ibwc->pkey_index = 0; ibwc->wc_flags = IB_WC_GRH; ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> - OCRDMA_CQE_UD_XFER_LEN_SHIFT); + OCRDMA_CQE_UD_XFER_LEN_SHIFT) & + OCRDMA_CQE_UD_XFER_LEN_MASK; + + if (ocrdma_is_udp_encap_supported(dev)) { + hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >> + OCRDMA_CQE_UD_L3TYPE_SHIFT) & + OCRDMA_CQE_UD_L3TYPE_MASK; + ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE; + ibwc->network_hdr_type = hdr_type; + } + return status; } @@ -2791,12 +2802,15 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, struct ib_wc *ibwc) { + struct ocrdma_dev *dev; + + dev = get_ocrdma_dev(qp->ibqp.device); ibwc->opcode = IB_WC_RECV; ibwc->qp = &qp->ibqp; ibwc->status = IB_WC_SUCCESS; if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) - ocrdma_update_ud_rcqe(ibwc, cqe); + ocrdma_update_ud_rcqe(dev, ibwc, cqe); else ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen); diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 495be0978..e0fdb9201 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig @@ -1,6 +1,6 @@ config INFINIBAND_QIB tristate "Intel PCIe HCA support" - depends on 64BIT + depends on 64BIT && INFINIBAND_RDMAVT ---help--- This is a low-level driver for Intel PCIe QLE InfiniBand host channel adapters. This driver does not support the Intel diff --git a/drivers/infiniband/hw/qib/Makefile b/drivers/infiniband/hw/qib/Makefile index 57f8103e5..79ebd79e8 100644 --- a/drivers/infiniband/hw/qib/Makefile +++ b/drivers/infiniband/hw/qib/Makefile @@ -1,11 +1,11 @@ obj-$(CONFIG_INFINIBAND_QIB) += ib_qib.o -ib_qib-y := qib_cq.o qib_diag.o qib_dma.o qib_driver.o qib_eeprom.o \ - qib_file_ops.o qib_fs.o qib_init.o qib_intr.o qib_keys.o \ - qib_mad.o qib_mmap.o qib_mr.o qib_pcie.o qib_pio_copy.o \ - qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o qib_srq.o \ +ib_qib-y := qib_diag.o qib_driver.o qib_eeprom.o \ + qib_file_ops.o qib_fs.o qib_init.o qib_intr.o \ + qib_mad.o qib_pcie.o qib_pio_copy.o \ + qib_qp.o qib_qsfp.o qib_rc.o qib_ruc.o qib_sdma.o \ qib_sysfs.o qib_twsi.o qib_tx.o qib_uc.o qib_ud.o \ - qib_user_pages.o qib_user_sdma.o qib_verbs_mcast.o qib_iba7220.o \ + qib_user_pages.o qib_user_sdma.o qib_iba7220.o \ qib_sd7220.o qib_iba7322.o qib_verbs.o # 6120 has no fallback if no MSI interrupts, others can do INTx diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 7df16f74b..bbf0a163a 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -52,6 +52,7 @@ #include <linux/kref.h> #include <linux/sched.h> #include <linux/kthread.h> +#include <rdma/rdma_vt.h> #include "qib_common.h" #include "qib_verbs.h" @@ -229,9 +230,6 @@ struct qib_ctxtdata { u8 redirect_seq_cnt; /* ctxt rcvhdrq head offset */ u32 head; - /* lookaside fields */ - struct qib_qp *lookaside_qp; - u32 lookaside_qpn; /* QPs waiting for context processing */ struct list_head qp_wait_list; #ifdef CONFIG_DEBUG_FS @@ -240,7 +238,7 @@ struct qib_ctxtdata { #endif }; -struct qib_sge_state; +struct rvt_sge_state; struct qib_sdma_txreq { int flags; @@ -258,14 +256,14 @@ struct qib_sdma_desc { struct qib_verbs_txreq { struct qib_sdma_txreq txreq; - struct qib_qp *qp; - struct qib_swqe *wqe; + struct rvt_qp *qp; + struct rvt_swqe *wqe; u32 dwords; u16 hdr_dwords; u16 hdr_inx; struct qib_pio_header *align_buf; - struct qib_mregion *mr; - struct qib_sge_state *ss; + struct rvt_mregion *mr; + struct rvt_sge_state *ss; }; #define QIB_SDMA_TXREQ_F_USELARGEBUF 0x1 @@ -1096,8 +1094,6 @@ struct qib_devdata { u16 psxmitwait_check_rate; /* high volume overflow errors defered to tasklet */ struct tasklet_struct error_tasklet; - /* per device cq worker */ - struct kthread_worker *worker; int assigned_node_id; /* NUMA node closest to HCA */ }; @@ -1135,8 +1131,9 @@ extern spinlock_t qib_devs_lock; extern struct qib_devdata *qib_lookup(int unit); extern u32 qib_cpulist_count; extern unsigned long *qib_cpulist; - +extern u16 qpt_mask; extern unsigned qib_cc_table_size; + int qib_init(struct qib_devdata *, int); int init_chip_wc_pat(struct qib_devdata *dd, u32); int qib_enable_wc(struct qib_devdata *dd); @@ -1323,7 +1320,7 @@ void __qib_sdma_intr(struct qib_pportdata *); void qib_sdma_intr(struct qib_pportdata *); void qib_user_sdma_send_desc(struct qib_pportdata *dd, struct list_head *pktlist); -int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *, +int qib_sdma_verbs_send(struct qib_pportdata *, struct rvt_sge_state *, u32, struct qib_verbs_txreq *); /* ppd->sdma_lock should be locked before calling this. */ int qib_sdma_make_progress(struct qib_pportdata *dd); @@ -1454,6 +1451,8 @@ u64 qib_sps_ints(void); dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long, size_t, int); const char *qib_get_unit_name(int unit); +const char *qib_get_card_name(struct rvt_dev_info *rdi); +struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi); /* * Flush write combining store buffers (if present) and perform a write @@ -1540,4 +1539,14 @@ struct qib_hwerror_msgs { void qib_format_hwerrors(u64 hwerrs, const struct qib_hwerror_msgs *hwerrmsgs, size_t nhwerrmsgs, char *msg, size_t lmsg); + +void qib_stop_send_queue(struct rvt_qp *qp); +void qib_quiesce_qp(struct rvt_qp *qp); +void qib_flush_qp_waiters(struct rvt_qp *qp); +int qib_mtu_to_path_mtu(u32 mtu); +u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu); +void qib_notify_error_qp(struct rvt_qp *qp); +int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, + struct ib_qp_attr *attr); + #endif /* _QIB_KERNEL_H */ diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index 4fb78abd8..1d6e63eb1 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -742,14 +742,11 @@ struct qib_tid_session_member { #define SIZE_OF_CRC 1 #define QIB_DEFAULT_P_KEY 0xFFFF -#define QIB_PERMISSIVE_LID 0xFFFF #define QIB_AETH_CREDIT_SHIFT 24 #define QIB_AETH_CREDIT_MASK 0x1F #define QIB_AETH_CREDIT_INVAL 0x1F #define QIB_PSN_MASK 0xFFFFFF #define QIB_MSN_MASK 0xFFFFFF -#define QIB_QPN_MASK 0xFFFFFF -#define QIB_MULTICAST_LID_BASE 0xC000 #define QIB_EAGER_TID_ID QLOGIC_IB_I_TID_MASK #define QIB_MULTICAST_QPN 0xFFFFFF diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c deleted file mode 100644 index 2b45d0b02..000000000 --- a/drivers/infiniband/hw/qib/qib_cq.c +++ /dev/null @@ -1,545 +0,0 @@ -/* - * Copyright (c) 2013 Intel Corporation. All rights reserved. - * Copyright (c) 2006, 2007, 2008, 2010 QLogic Corporation. All rights reserved. - * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/kthread.h> - -#include "qib_verbs.h" -#include "qib.h" - -/** - * qib_cq_enter - add a new entry to the completion queue - * @cq: completion queue - * @entry: work completion entry to add - * @sig: true if @entry is a solicitated entry - * - * This may be called with qp->s_lock held. - */ -void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int solicited) -{ - struct qib_cq_wc *wc; - unsigned long flags; - u32 head; - u32 next; - - spin_lock_irqsave(&cq->lock, flags); - - /* - * Note that the head pointer might be writable by user processes. - * Take care to verify it is a sane value. - */ - wc = cq->queue; - head = wc->head; - if (head >= (unsigned) cq->ibcq.cqe) { - head = cq->ibcq.cqe; - next = 0; - } else - next = head + 1; - if (unlikely(next == wc->tail)) { - spin_unlock_irqrestore(&cq->lock, flags); - if (cq->ibcq.event_handler) { - struct ib_event ev; - - ev.device = cq->ibcq.device; - ev.element.cq = &cq->ibcq; - ev.event = IB_EVENT_CQ_ERR; - cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); - } - return; - } - if (cq->ip) { - wc->uqueue[head].wr_id = entry->wr_id; - wc->uqueue[head].status = entry->status; - wc->uqueue[head].opcode = entry->opcode; - wc->uqueue[head].vendor_err = entry->vendor_err; - wc->uqueue[head].byte_len = entry->byte_len; - wc->uqueue[head].ex.imm_data = - (__u32 __force)entry->ex.imm_data; - wc->uqueue[head].qp_num = entry->qp->qp_num; - wc->uqueue[head].src_qp = entry->src_qp; - wc->uqueue[head].wc_flags = entry->wc_flags; - wc->uqueue[head].pkey_index = entry->pkey_index; - wc->uqueue[head].slid = entry->slid; - wc->uqueue[head].sl = entry->sl; - wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits; - wc->uqueue[head].port_num = entry->port_num; - /* Make sure entry is written before the head index. */ - smp_wmb(); - } else - wc->kqueue[head] = *entry; - wc->head = next; - - if (cq->notify == IB_CQ_NEXT_COMP || - (cq->notify == IB_CQ_SOLICITED && - (solicited || entry->status != IB_WC_SUCCESS))) { - struct kthread_worker *worker; - /* - * This will cause send_complete() to be called in - * another thread. - */ - smp_rmb(); - worker = cq->dd->worker; - if (likely(worker)) { - cq->notify = IB_CQ_NONE; - cq->triggered++; - queue_kthread_work(worker, &cq->comptask); - } - } - - spin_unlock_irqrestore(&cq->lock, flags); -} - -/** - * qib_poll_cq - poll for work completion entries - * @ibcq: the completion queue to poll - * @num_entries: the maximum number of entries to return - * @entry: pointer to array where work completions are placed - * - * Returns the number of completion entries polled. - * - * This may be called from interrupt context. Also called by ib_poll_cq() - * in the generic verbs code. - */ -int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) -{ - struct qib_cq *cq = to_icq(ibcq); - struct qib_cq_wc *wc; - unsigned long flags; - int npolled; - u32 tail; - - /* The kernel can only poll a kernel completion queue */ - if (cq->ip) { - npolled = -EINVAL; - goto bail; - } - - spin_lock_irqsave(&cq->lock, flags); - - wc = cq->queue; - tail = wc->tail; - if (tail > (u32) cq->ibcq.cqe) - tail = (u32) cq->ibcq.cqe; - for (npolled = 0; npolled < num_entries; ++npolled, ++entry) { - if (tail == wc->head) - break; - /* The kernel doesn't need a RMB since it has the lock. */ - *entry = wc->kqueue[tail]; - if (tail >= cq->ibcq.cqe) - tail = 0; - else - tail++; - } - wc->tail = tail; - - spin_unlock_irqrestore(&cq->lock, flags); - -bail: - return npolled; -} - -static void send_complete(struct kthread_work *work) -{ - struct qib_cq *cq = container_of(work, struct qib_cq, comptask); - - /* - * The completion handler will most likely rearm the notification - * and poll for all pending entries. If a new completion entry - * is added while we are in this routine, queue_work() - * won't call us again until we return so we check triggered to - * see if we need to call the handler again. - */ - for (;;) { - u8 triggered = cq->triggered; - - /* - * IPoIB connected mode assumes the callback is from a - * soft IRQ. We simulate this by blocking "bottom halves". - * See the implementation for ipoib_cm_handle_tx_wc(), - * netif_tx_lock_bh() and netif_tx_lock(). - */ - local_bh_disable(); - cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); - local_bh_enable(); - - if (cq->triggered == triggered) - return; - } -} - -/** - * qib_create_cq - create a completion queue - * @ibdev: the device this completion queue is attached to - * @attr: creation attributes - * @context: unused by the QLogic_IB driver - * @udata: user data for libibverbs.so - * - * Returns a pointer to the completion queue or negative errno values - * for failure. - * - * Called by ib_create_cq() in the generic verbs code. - */ -struct ib_cq *qib_create_cq(struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, - struct ib_udata *udata) -{ - int entries = attr->cqe; - struct qib_ibdev *dev = to_idev(ibdev); - struct qib_cq *cq; - struct qib_cq_wc *wc; - struct ib_cq *ret; - u32 sz; - - if (attr->flags) - return ERR_PTR(-EINVAL); - - if (entries < 1 || entries > ib_qib_max_cqes) { - ret = ERR_PTR(-EINVAL); - goto done; - } - - /* Allocate the completion queue structure. */ - cq = kmalloc(sizeof(*cq), GFP_KERNEL); - if (!cq) { - ret = ERR_PTR(-ENOMEM); - goto done; - } - - /* - * Allocate the completion queue entries and head/tail pointers. - * This is allocated separately so that it can be resized and - * also mapped into user space. - * We need to use vmalloc() in order to support mmap and large - * numbers of entries. - */ - sz = sizeof(*wc); - if (udata && udata->outlen >= sizeof(__u64)) - sz += sizeof(struct ib_uverbs_wc) * (entries + 1); - else - sz += sizeof(struct ib_wc) * (entries + 1); - wc = vmalloc_user(sz); - if (!wc) { - ret = ERR_PTR(-ENOMEM); - goto bail_cq; - } - - /* - * Return the address of the WC as the offset to mmap. - * See qib_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - int err; - - cq->ip = qib_create_mmap_info(dev, sz, context, wc); - if (!cq->ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wc; - } - - err = ib_copy_to_udata(udata, &cq->ip->offset, - sizeof(cq->ip->offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } else - cq->ip = NULL; - - spin_lock(&dev->n_cqs_lock); - if (dev->n_cqs_allocated == ib_qib_max_cqs) { - spin_unlock(&dev->n_cqs_lock); - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - dev->n_cqs_allocated++; - spin_unlock(&dev->n_cqs_lock); - - if (cq->ip) { - spin_lock_irq(&dev->pending_lock); - list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - } - - /* - * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. - * The number of entries should be >= the number requested or return - * an error. - */ - cq->dd = dd_from_dev(dev); - cq->ibcq.cqe = entries; - cq->notify = IB_CQ_NONE; - cq->triggered = 0; - spin_lock_init(&cq->lock); - init_kthread_work(&cq->comptask, send_complete); - wc->head = 0; - wc->tail = 0; - cq->queue = wc; - - ret = &cq->ibcq; - - goto done; - -bail_ip: - kfree(cq->ip); -bail_wc: - vfree(wc); -bail_cq: - kfree(cq); -done: - return ret; -} - -/** - * qib_destroy_cq - destroy a completion queue - * @ibcq: the completion queue to destroy. - * - * Returns 0 for success. - * - * Called by ib_destroy_cq() in the generic verbs code. - */ -int qib_destroy_cq(struct ib_cq *ibcq) -{ - struct qib_ibdev *dev = to_idev(ibcq->device); - struct qib_cq *cq = to_icq(ibcq); - - flush_kthread_work(&cq->comptask); - spin_lock(&dev->n_cqs_lock); - dev->n_cqs_allocated--; - spin_unlock(&dev->n_cqs_lock); - if (cq->ip) - kref_put(&cq->ip->ref, qib_release_mmap_info); - else - vfree(cq->queue); - kfree(cq); - - return 0; -} - -/** - * qib_req_notify_cq - change the notification type for a completion queue - * @ibcq: the completion queue - * @notify_flags: the type of notification to request - * - * Returns 0 for success. - * - * This may be called from interrupt context. Also called by - * ib_req_notify_cq() in the generic verbs code. - */ -int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags) -{ - struct qib_cq *cq = to_icq(ibcq); - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&cq->lock, flags); - /* - * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow - * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). - */ - if (cq->notify != IB_CQ_NEXT_COMP) - cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; - - if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && - cq->queue->head != cq->queue->tail) - ret = 1; - - spin_unlock_irqrestore(&cq->lock, flags); - - return ret; -} - -/** - * qib_resize_cq - change the size of the CQ - * @ibcq: the completion queue - * - * Returns 0 for success. - */ -int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) -{ - struct qib_cq *cq = to_icq(ibcq); - struct qib_cq_wc *old_wc; - struct qib_cq_wc *wc; - u32 head, tail, n; - int ret; - u32 sz; - - if (cqe < 1 || cqe > ib_qib_max_cqes) { - ret = -EINVAL; - goto bail; - } - - /* - * Need to use vmalloc() if we want to support large #s of entries. - */ - sz = sizeof(*wc); - if (udata && udata->outlen >= sizeof(__u64)) - sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); - else - sz += sizeof(struct ib_wc) * (cqe + 1); - wc = vmalloc_user(sz); - if (!wc) { - ret = -ENOMEM; - goto bail; - } - - /* Check that we can write the offset to mmap. */ - if (udata && udata->outlen >= sizeof(__u64)) { - __u64 offset = 0; - - ret = ib_copy_to_udata(udata, &offset, sizeof(offset)); - if (ret) - goto bail_free; - } - - spin_lock_irq(&cq->lock); - /* - * Make sure head and tail are sane since they - * might be user writable. - */ - old_wc = cq->queue; - head = old_wc->head; - if (head > (u32) cq->ibcq.cqe) - head = (u32) cq->ibcq.cqe; - tail = old_wc->tail; - if (tail > (u32) cq->ibcq.cqe) - tail = (u32) cq->ibcq.cqe; - if (head < tail) - n = cq->ibcq.cqe + 1 + head - tail; - else - n = head - tail; - if (unlikely((u32)cqe < n)) { - ret = -EINVAL; - goto bail_unlock; - } - for (n = 0; tail != head; n++) { - if (cq->ip) - wc->uqueue[n] = old_wc->uqueue[tail]; - else - wc->kqueue[n] = old_wc->kqueue[tail]; - if (tail == (u32) cq->ibcq.cqe) - tail = 0; - else - tail++; - } - cq->ibcq.cqe = cqe; - wc->head = n; - wc->tail = 0; - cq->queue = wc; - spin_unlock_irq(&cq->lock); - - vfree(old_wc); - - if (cq->ip) { - struct qib_ibdev *dev = to_idev(ibcq->device); - struct qib_mmap_info *ip = cq->ip; - - qib_update_mmap_info(dev, ip, sz, wc); - - /* - * Return the offset to mmap. - * See qib_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - ret = ib_copy_to_udata(udata, &ip->offset, - sizeof(ip->offset)); - if (ret) - goto bail; - } - - spin_lock_irq(&dev->pending_lock); - if (list_empty(&ip->pending_mmaps)) - list_add(&ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - } - - ret = 0; - goto bail; - -bail_unlock: - spin_unlock_irq(&cq->lock); -bail_free: - vfree(wc); -bail: - return ret; -} - -int qib_cq_init(struct qib_devdata *dd) -{ - int ret = 0; - int cpu; - struct task_struct *task; - - if (dd->worker) - return 0; - dd->worker = kzalloc(sizeof(*dd->worker), GFP_KERNEL); - if (!dd->worker) - return -ENOMEM; - init_kthread_worker(dd->worker); - task = kthread_create_on_node( - kthread_worker_fn, - dd->worker, - dd->assigned_node_id, - "qib_cq%d", dd->unit); - if (IS_ERR(task)) - goto task_fail; - cpu = cpumask_first(cpumask_of_node(dd->assigned_node_id)); - kthread_bind(task, cpu); - wake_up_process(task); -out: - return ret; -task_fail: - ret = PTR_ERR(task); - kfree(dd->worker); - dd->worker = NULL; - goto out; -} - -void qib_cq_exit(struct qib_devdata *dd) -{ - struct kthread_worker *worker; - - worker = dd->worker; - if (!worker) - return; - /* blocks future queuing from send_complete() */ - dd->worker = NULL; - smp_wmb(); - flush_kthread_worker(worker); - kthread_stop(worker->task); - kfree(worker); -} diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index f58fdc3d2..67ee6438c 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -90,6 +90,22 @@ const char *qib_get_unit_name(int unit) return iname; } +const char *qib_get_card_name(struct rvt_dev_info *rdi) +{ + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(ibdev, + struct qib_devdata, verbs_dev); + return qib_get_unit_name(dd->unit); +} + +struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi) +{ + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(ibdev, + struct qib_devdata, verbs_dev); + return dd->pcidev; +} + /* * Return count of units with at least one port ACTIVE. */ @@ -306,7 +322,9 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr; struct qib_other_headers *ohdr = NULL; struct qib_ibport *ibp = &ppd->ibport_data; - struct qib_qp *qp = NULL; + struct qib_devdata *dd = ppd->dd; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; + struct rvt_qp *qp = NULL; u32 tlen = qib_hdrget_length_in_bytes(rhf_addr); u16 lid = be16_to_cpu(hdr->lrh[1]); int lnh = be16_to_cpu(hdr->lrh[0]) & 3; @@ -319,7 +337,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, if (tlen < 24) goto drop; - if (lid < QIB_MULTICAST_LID_BASE) { + if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { lid &= ~((1 << ppd->lmc) - 1); if (unlikely(lid != ppd->lid)) goto drop; @@ -346,13 +364,16 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, psn = be32_to_cpu(ohdr->bth[2]); /* Get the destination QP number. */ - qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; + qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; if (qp_num != QIB_MULTICAST_QPN) { int ruc_res; - qp = qib_lookup_qpn(ibp, qp_num); - if (!qp) + rcu_read_lock(); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); + if (!qp) { + rcu_read_unlock(); goto drop; + } /* * Handle only RC QPs - for other QP types drop error @@ -361,9 +382,9 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, spin_lock(&qp->r_lock); /* Check for valid receive state. */ - if (!(ib_qib_state_ops[qp->state] & - QIB_PROCESS_RECV_OK)) { - ibp->n_pkt_drops++; + if (!(ib_rvt_state_ops[qp->state] & + RVT_PROCESS_RECV_OK)) { + ibp->rvp.n_pkt_drops++; goto unlock; } @@ -383,7 +404,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) { diff = qib_cmp24(psn, qp->r_psn); if (!qp->r_nak_state && diff >= 0) { - ibp->n_rc_seqnak++; + ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ @@ -398,7 +419,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, */ if (list_empty(&qp->rspwait)) { qp->r_flags |= - QIB_R_RSP_NAK; + RVT_R_RSP_NAK; atomic_inc( &qp->refcount); list_add_tail( @@ -419,12 +440,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, unlock: spin_unlock(&qp->r_lock); - /* - * Notify qib_destroy_qp() if it is waiting - * for us to finish. - */ - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); + rcu_read_unlock(); } /* Unicast QP */ } /* Valid packet with TIDErr */ @@ -456,7 +472,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) u32 eflags, etype, tlen, i = 0, updegr = 0, crcs = 0; int last; u64 lval; - struct qib_qp *qp, *nqp; + struct rvt_qp *qp, *nqp; l = rcd->head; rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; @@ -549,15 +565,6 @@ move_along: updegr = 0; } } - /* - * Notify qib_destroy_qp() if it is waiting - * for lookaside_qp to finish. - */ - if (rcd->lookaside_qp) { - if (atomic_dec_and_test(&rcd->lookaside_qp->refcount)) - wake_up(&rcd->lookaside_qp->wait); - rcd->lookaside_qp = NULL; - } rcd->head = l; @@ -567,17 +574,17 @@ move_along: */ list_for_each_entry_safe(qp, nqp, &rcd->qp_wait_list, rspwait) { list_del_init(&qp->rspwait); - if (qp->r_flags & QIB_R_RSP_NAK) { - qp->r_flags &= ~QIB_R_RSP_NAK; + if (qp->r_flags & RVT_R_RSP_NAK) { + qp->r_flags &= ~RVT_R_RSP_NAK; qib_send_rc_ack(qp); } - if (qp->r_flags & QIB_R_RSP_SEND) { + if (qp->r_flags & RVT_R_RSP_SEND) { unsigned long flags; - qp->r_flags &= ~QIB_R_RSP_SEND; + qp->r_flags &= ~RVT_R_RSP_SEND; spin_lock_irqsave(&qp->s_lock, flags); - if (ib_qib_state_ops[qp->state] & - QIB_PROCESS_OR_FLUSH_SEND) + if (ib_rvt_state_ops[qp->state] & + RVT_PROCESS_OR_FLUSH_SEND) qib_schedule_send(qp); spin_unlock_irqrestore(&qp->s_lock, flags); } diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index 4b927809d..a3733f252 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2956,13 +2956,13 @@ static void pma_6120_timer(unsigned long data) struct qib_ibport *ibp = &ppd->ibport_data; unsigned long flags; - spin_lock_irqsave(&ibp->lock, flags); + spin_lock_irqsave(&ibp->rvp.lock, flags); if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED) { cs->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING; qib_snapshot_counters(ppd, &cs->sword, &cs->rword, &cs->spkts, &cs->rpkts, &cs->xmit_wait); mod_timer(&cs->pma_timer, - jiffies + usecs_to_jiffies(ibp->pma_sample_interval)); + jiffies + usecs_to_jiffies(ibp->rvp.pma_sample_interval)); } else if (cs->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) { u64 ta, tb, tc, td, te; @@ -2975,11 +2975,11 @@ static void pma_6120_timer(unsigned long data) cs->rpkts = td - cs->rpkts; cs->xmit_wait = te - cs->xmit_wait; } - spin_unlock_irqrestore(&ibp->lock, flags); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); } /* - * Note that the caller has the ibp->lock held. + * Note that the caller has the ibp->rvp.lock held. */ static void qib_set_cntr_6120_sample(struct qib_pportdata *ppd, u32 intv, u32 start) diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 6c8ff1010..82d7c4bf5 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -2910,8 +2910,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) spin_unlock_irqrestore(&dd->cspec->gpio_lock, flags); qib_qsfp_deinit(&dd->pport[i].cpspec->qsfp_data); } - if (dd->pport[i].ibport_data.smi_ah) - ib_destroy_ah(&dd->pport[i].ibport_data.smi_ah->ibah); } } @@ -5497,7 +5495,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd) unsigned delay; int ret; - agent = ibp->send_agent; + agent = ibp->rvp.send_agent; if (!agent) goto retry; @@ -5515,7 +5513,7 @@ static void try_7322_ipg(struct qib_pportdata *ppd) ret = PTR_ERR(ah); else { send_buf->ah = ah; - ibp->smi_ah = to_iah(ah); + ibp->smi_ah = ibah_to_rvtah(ah); ret = 0; } } else { diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 4ff340fe9..3f062f0dd 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -42,6 +42,7 @@ #ifdef CONFIG_INFINIBAND_QIB_DCA #include <linux/dca.h> #endif +#include <rdma/rdma_vt.h> #include "qib.h" #include "qib_common.h" @@ -244,6 +245,13 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, alloc_percpu(struct qib_pma_counters); if (!ppd->ibport_data.pmastats) return -ENOMEM; + ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64); + ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64); + ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64); + if (!(ppd->ibport_data.rvp.rc_acks) || + !(ppd->ibport_data.rvp.rc_qacks) || + !(ppd->ibport_data.rvp.rc_delayed_comp)) + return -ENOMEM; if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) goto bail; @@ -449,8 +457,6 @@ static int loadtime_init(struct qib_devdata *dd) init_timer(&dd->intrchk_timer); dd->intrchk_timer.function = verify_interrupt; dd->intrchk_timer.data = (unsigned long) dd; - - ret = qib_cq_init(dd); done: return ret; } @@ -631,6 +637,9 @@ wq_error: static void qib_free_pportdata(struct qib_pportdata *ppd) { free_percpu(ppd->ibport_data.pmastats); + free_percpu(ppd->ibport_data.rvp.rc_acks); + free_percpu(ppd->ibport_data.rvp.rc_qacks); + free_percpu(ppd->ibport_data.rvp.rc_delayed_comp); ppd->ibport_data.pmastats = NULL; } @@ -1081,7 +1090,7 @@ void qib_free_devdata(struct qib_devdata *dd) qib_dbg_ibdev_exit(&dd->verbs_dev); #endif free_percpu(dd->int_counter); - ib_dealloc_device(&dd->verbs_dev.ibdev); + ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); } u64 qib_int_counter(struct qib_devdata *dd) @@ -1120,9 +1129,12 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) { unsigned long flags; struct qib_devdata *dd; - int ret; + int ret, nports; - dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); + /* extra is * number of ports */ + nports = extra / sizeof(struct qib_pportdata); + dd = (struct qib_devdata *)rvt_alloc_device(sizeof(*dd) + extra, + nports); if (!dd) return ERR_PTR(-ENOMEM); @@ -1171,7 +1183,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) bail: if (!list_empty(&dd->list)) list_del_init(&dd->list); - ib_dealloc_device(&dd->verbs_dev.ibdev); + ib_dealloc_device(&dd->verbs_dev.rdi.ibdev); return ERR_PTR(ret); } @@ -1421,7 +1433,6 @@ static void cleanup_device_data(struct qib_devdata *dd) } kfree(tmp); kfree(dd->boardname); - qib_cq_exit(dd); } /* diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index 086616d07..a014fd4cd 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c @@ -74,7 +74,7 @@ static void signal_ib_event(struct qib_pportdata *ppd, enum ib_event_type ev) struct ib_event event; struct qib_devdata *dd = ppd->dd; - event.device = &dd->verbs_dev.ibdev; + event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = ppd->port; event.event = ev; ib_dispatch_event(&event); diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index d725c5655..2c3c93572 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -46,20 +46,20 @@ * */ -int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) +int qib_alloc_lkey(struct rvt_mregion *mr, int dma_region) { unsigned long flags; u32 r; u32 n; int ret = 0; struct qib_ibdev *dev = to_idev(mr->pd->device); - struct qib_lkey_table *rkt = &dev->lk_table; + struct rvt_lkey_table *rkt = &dev->lk_table; spin_lock_irqsave(&rkt->lock, flags); /* special case for dma_mr lkey == 0 */ if (dma_region) { - struct qib_mregion *tmr; + struct rvt_mregion *tmr; tmr = rcu_access_pointer(dev->dma_mr); if (!tmr) { @@ -90,8 +90,8 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) * bits are capped in qib_verbs.c to insure enough bits * for generation number */ - mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | - ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) + mr->lkey = (r << (32 - ib_rvt_lkey_table_size)) | + ((((1 << (24 - ib_rvt_lkey_table_size)) - 1) & rkt->gen) << 8); if (mr->lkey == 0) { mr->lkey |= 1 << 8; @@ -114,13 +114,13 @@ bail: * qib_free_lkey - free an lkey * @mr: mr to free from tables */ -void qib_free_lkey(struct qib_mregion *mr) +void qib_free_lkey(struct rvt_mregion *mr) { unsigned long flags; u32 lkey = mr->lkey; u32 r; struct qib_ibdev *dev = to_idev(mr->pd->device); - struct qib_lkey_table *rkt = &dev->lk_table; + struct rvt_lkey_table *rkt = &dev->lk_table; spin_lock_irqsave(&rkt->lock, flags); if (!mr->lkey_published) @@ -128,7 +128,7 @@ void qib_free_lkey(struct qib_mregion *mr) if (lkey == 0) RCU_INIT_POINTER(dev->dma_mr, NULL); else { - r = lkey >> (32 - ib_qib_lkey_table_size); + r = lkey >> (32 - ib_rvt_lkey_table_size); RCU_INIT_POINTER(rkt->table[r], NULL); } qib_put_mr(mr); @@ -138,105 +138,6 @@ out: } /** - * qib_lkey_ok - check IB SGE for validity and initialize - * @rkt: table containing lkey to check SGE against - * @pd: protection domain - * @isge: outgoing internal SGE - * @sge: SGE to check - * @acc: access flags - * - * Return 1 if valid and successful, otherwise returns 0. - * - * increments the reference count upon success - * - * Check the IB SGE for validity and initialize our internal version - * of it. - */ -int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, - struct qib_sge *isge, struct ib_sge *sge, int acc) -{ - struct qib_mregion *mr; - unsigned n, m; - size_t off; - - /* - * We use LKEY == zero for kernel virtual addresses - * (see qib_get_dma_mr and qib_dma.c). - */ - rcu_read_lock(); - if (sge->lkey == 0) { - struct qib_ibdev *dev = to_idev(pd->ibpd.device); - - if (pd->user) - goto bail; - mr = rcu_dereference(dev->dma_mr); - if (!mr) - goto bail; - if (unlikely(!atomic_inc_not_zero(&mr->refcount))) - goto bail; - rcu_read_unlock(); - - isge->mr = mr; - isge->vaddr = (void *) sge->addr; - isge->length = sge->length; - isge->sge_length = sge->length; - isge->m = 0; - isge->n = 0; - goto ok; - } - mr = rcu_dereference( - rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]); - if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd)) - goto bail; - - off = sge->addr - mr->user_base; - if (unlikely(sge->addr < mr->user_base || - off + sge->length > mr->length || - (mr->access_flags & acc) != acc)) - goto bail; - if (unlikely(!atomic_inc_not_zero(&mr->refcount))) - goto bail; - rcu_read_unlock(); - - off += mr->offset; - if (mr->page_shift) { - /* - page sizes are uniform power of 2 so no loop is necessary - entries_spanned_by_off is the number of times the loop below - would have executed. - */ - size_t entries_spanned_by_off; - - entries_spanned_by_off = off >> mr->page_shift; - off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off/QIB_SEGSZ; - n = entries_spanned_by_off%QIB_SEGSZ; - } else { - m = 0; - n = 0; - while (off >= mr->map[m]->segs[n].length) { - off -= mr->map[m]->segs[n].length; - n++; - if (n >= QIB_SEGSZ) { - m++; - n = 0; - } - } - } - isge->mr = mr; - isge->vaddr = mr->map[m]->segs[n].vaddr + off; - isge->length = mr->map[m]->segs[n].length - off; - isge->sge_length = sge->length; - isge->m = m; - isge->n = n; -ok: - return 1; -bail: - rcu_read_unlock(); - return 0; -} - -/** * qib_rkey_ok - check the IB virtual address, length, and RKEY * @qp: qp for validation * @sge: SGE state @@ -249,11 +150,11 @@ bail: * * increments the reference count upon success */ -int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, +int qib_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc) { - struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; - struct qib_mregion *mr; + struct rvt_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; + struct rvt_mregion *mr; unsigned n, m; size_t off; @@ -263,7 +164,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, */ rcu_read_lock(); if (rkey == 0) { - struct qib_pd *pd = to_ipd(qp->ibqp.pd); + struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd); struct qib_ibdev *dev = to_idev(pd->ibpd.device); if (pd->user) @@ -285,7 +186,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, } mr = rcu_dereference( - rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]); + rkt->table[(rkey >> (32 - ib_rvt_lkey_table_size))]); if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) goto bail; @@ -308,15 +209,15 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, entries_spanned_by_off = off >> mr->page_shift; off -= (entries_spanned_by_off << mr->page_shift); - m = entries_spanned_by_off/QIB_SEGSZ; - n = entries_spanned_by_off%QIB_SEGSZ; + m = entries_spanned_by_off / RVT_SEGSZ; + n = entries_spanned_by_off % RVT_SEGSZ; } else { m = 0; n = 0; while (off >= mr->map[m]->segs[n].length) { off -= mr->map[m]->segs[n].length; n++; - if (n >= QIB_SEGSZ) { + if (n >= RVT_SEGSZ) { m++; n = 0; } @@ -335,58 +236,3 @@ bail: return 0; } -/* - * Initialize the memory region specified by the work request. - */ -int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr) -{ - struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; - struct qib_pd *pd = to_ipd(qp->ibqp.pd); - struct qib_mr *mr = to_imr(wr->mr); - struct qib_mregion *mrg; - u32 key = wr->key; - unsigned i, n, m; - int ret = -EINVAL; - unsigned long flags; - u64 *page_list; - size_t ps; - - spin_lock_irqsave(&rkt->lock, flags); - if (pd->user || key == 0) - goto bail; - - mrg = rcu_dereference_protected( - rkt->table[(key >> (32 - ib_qib_lkey_table_size))], - lockdep_is_held(&rkt->lock)); - if (unlikely(mrg == NULL || qp->ibqp.pd != mrg->pd)) - goto bail; - - if (mr->npages > mrg->max_segs) - goto bail; - - ps = mr->ibmr.page_size; - if (mr->ibmr.length > ps * mr->npages) - goto bail; - - mrg->user_base = mr->ibmr.iova; - mrg->iova = mr->ibmr.iova; - mrg->lkey = key; - mrg->length = mr->ibmr.length; - mrg->access_flags = wr->access; - page_list = mr->pages; - m = 0; - n = 0; - for (i = 0; i < mr->npages; i++) { - mrg->map[m]->segs[n].vaddr = (void *) page_list[i]; - mrg->map[m]->segs[n].length = ps; - if (++n == QIB_SEGSZ) { - m++; - n = 0; - } - } - - ret = 0; -bail: - spin_unlock_irqrestore(&rkt->lock, flags); - return ret; -} diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 9625e7c43..0bd18375d 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -70,7 +70,7 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) unsigned long flags; unsigned long timeout; - agent = ibp->send_agent; + agent = ibp->rvp.send_agent; if (!agent) return; @@ -79,7 +79,8 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) return; /* o14-2 */ - if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) + if (ibp->rvp.trap_timeout && + time_before(jiffies, ibp->rvp.trap_timeout)) return; send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, @@ -93,42 +94,42 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; smp->class_version = 1; smp->method = IB_MGMT_METHOD_TRAP; - ibp->tid++; - smp->tid = cpu_to_be64(ibp->tid); + ibp->rvp.tid++; + smp->tid = cpu_to_be64(ibp->rvp.tid); smp->attr_id = IB_SMP_ATTR_NOTICE; /* o14-1: smp->mkey = 0; */ memcpy(smp->data, data, len); - spin_lock_irqsave(&ibp->lock, flags); - if (!ibp->sm_ah) { - if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { + spin_lock_irqsave(&ibp->rvp.lock, flags); + if (!ibp->rvp.sm_ah) { + if (ibp->rvp.sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; - ah = qib_create_qp0_ah(ibp, ibp->sm_lid); + ah = qib_create_qp0_ah(ibp, ibp->rvp.sm_lid); if (IS_ERR(ah)) ret = PTR_ERR(ah); else { send_buf->ah = ah; - ibp->sm_ah = to_iah(ah); + ibp->rvp.sm_ah = ibah_to_rvtah(ah); ret = 0; } } else ret = -EINVAL; } else { - send_buf->ah = &ibp->sm_ah->ibah; + send_buf->ah = &ibp->rvp.sm_ah->ibah; ret = 0; } - spin_unlock_irqrestore(&ibp->lock, flags); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); if (!ret) ret = ib_post_send_mad(send_buf, NULL); if (!ret) { /* 4.096 usec. */ - timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; - ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); + timeout = (4096 * (1UL << ibp->rvp.subnet_timeout)) / 1000; + ibp->rvp.trap_timeout = jiffies + usecs_to_jiffies(timeout); } else { ib_free_send_mad(send_buf); - ibp->trap_timeout = 0; + ibp->rvp.trap_timeout = 0; } } @@ -141,10 +142,10 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, struct ib_mad_notice_attr data; if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) - ibp->pkey_violations++; + ibp->rvp.pkey_violations++; else - ibp->qkey_violations++; - ibp->n_pkt_drops++; + ibp->rvp.qkey_violations++; + ibp->rvp.n_pkt_drops++; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; @@ -205,8 +206,11 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) /* * Send a Port Capability Mask Changed trap (ch. 14.3.11). */ -void qib_cap_mask_chg(struct qib_ibport *ibp) +void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num) { + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = dd_from_dev(ibdev); + struct qib_ibport *ibp = &dd->pport[port_num - 1].ibport_data; struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; @@ -217,8 +221,8 @@ void qib_cap_mask_chg(struct qib_ibport *ibp) data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; - data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); - + data.details.ntc_144.new_cap_mask = + cpu_to_be32(ibp->rvp.port_cap_flags); qib_send_trap(ibp, &data, sizeof(data)); } @@ -409,37 +413,38 @@ static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) int ret = 0; /* Is the mkey in the process of expiring? */ - if (ibp->mkey_lease_timeout && - time_after_eq(jiffies, ibp->mkey_lease_timeout)) { + if (ibp->rvp.mkey_lease_timeout && + time_after_eq(jiffies, ibp->rvp.mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ - ibp->mkey_lease_timeout = 0; - ibp->mkeyprot = 0; + ibp->rvp.mkey_lease_timeout = 0; + ibp->rvp.mkeyprot = 0; } - if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || - ibp->mkey == smp->mkey) + if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->rvp.mkey == 0 || + ibp->rvp.mkey == smp->mkey) valid_mkey = 1; /* Unset lease timeout on any valid Get/Set/TrapRepress */ - if (valid_mkey && ibp->mkey_lease_timeout && + if (valid_mkey && ibp->rvp.mkey_lease_timeout && (smp->method == IB_MGMT_METHOD_GET || smp->method == IB_MGMT_METHOD_SET || smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) - ibp->mkey_lease_timeout = 0; + ibp->rvp.mkey_lease_timeout = 0; if (!valid_mkey) { switch (smp->method) { case IB_MGMT_METHOD_GET: /* Bad mkey not a violation below level 2 */ - if (ibp->mkeyprot < 2) + if (ibp->rvp.mkeyprot < 2) break; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: - if (ibp->mkey_violations != 0xFFFF) - ++ibp->mkey_violations; - if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) - ibp->mkey_lease_timeout = jiffies + - ibp->mkey_lease_period * HZ; + if (ibp->rvp.mkey_violations != 0xFFFF) + ++ibp->rvp.mkey_violations; + if (!ibp->rvp.mkey_lease_timeout && + ibp->rvp.mkey_lease_period) + ibp->rvp.mkey_lease_timeout = jiffies + + ibp->rvp.mkey_lease_period * HZ; /* Generate a trap notice. */ qib_bad_mkey(ibp, smp); ret = 1; @@ -489,15 +494,15 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, /* Only return the mkey if the protection field allows it. */ if (!(smp->method == IB_MGMT_METHOD_GET && - ibp->mkey != smp->mkey && - ibp->mkeyprot == 1)) - pip->mkey = ibp->mkey; - pip->gid_prefix = ibp->gid_prefix; + ibp->rvp.mkey != smp->mkey && + ibp->rvp.mkeyprot == 1)) + pip->mkey = ibp->rvp.mkey; + pip->gid_prefix = ibp->rvp.gid_prefix; pip->lid = cpu_to_be16(ppd->lid); - pip->sm_lid = cpu_to_be16(ibp->sm_lid); - pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); + pip->sm_lid = cpu_to_be16(ibp->rvp.sm_lid); + pip->cap_mask = cpu_to_be32(ibp->rvp.port_cap_flags); /* pip->diag_code; */ - pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); + pip->mkey_lease_period = cpu_to_be16(ibp->rvp.mkey_lease_period); pip->local_port_num = port; pip->link_width_enabled = ppd->link_width_enabled; pip->link_width_supported = ppd->link_width_supported; @@ -508,7 +513,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, pip->portphysstate_linkdown = (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | (get_linkdowndefaultstate(ppd) ? 1 : 2); - pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; + pip->mkeyprot_resv_lmc = (ibp->rvp.mkeyprot << 6) | ppd->lmc; pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | ppd->link_speed_enabled; switch (ppd->ibmtu) { @@ -529,9 +534,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, mtu = IB_MTU_256; break; } - pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; + pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->rvp.sm_sl; pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ - pip->vl_high_limit = ibp->vl_high_limit; + pip->vl_high_limit = ibp->rvp.vl_high_limit; pip->vl_arb_high_cap = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); pip->vl_arb_low_cap = @@ -542,20 +547,20 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, /* pip->vlstallcnt_hoqlife; */ pip->operationalvl_pei_peo_fpi_fpo = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; - pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); + pip->mkey_violations = cpu_to_be16(ibp->rvp.mkey_violations); /* P_KeyViolations are counted by hardware. */ - pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); - pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); + pip->pkey_violations = cpu_to_be16(ibp->rvp.pkey_violations); + pip->qkey_violations = cpu_to_be16(ibp->rvp.qkey_violations); /* Only the hardware GUID is supported for now */ pip->guid_cap = QIB_GUIDS_PER_PORT; - pip->clientrereg_resv_subnetto = ibp->subnet_timeout; + pip->clientrereg_resv_subnetto = ibp->rvp.subnet_timeout; /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = (get_phyerrthreshold(ppd) << 4) | get_overrunthreshold(ppd); /* pip->max_credit_hint; */ - if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { + if (ibp->rvp.port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { u32 v; v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); @@ -685,13 +690,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, event.device = ibdev; event.element.port_num = port; - ibp->mkey = pip->mkey; - ibp->gid_prefix = pip->gid_prefix; - ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); + ibp->rvp.mkey = pip->mkey; + ibp->rvp.gid_prefix = pip->gid_prefix; + ibp->rvp.mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); /* Must be a valid unicast LID address. */ - if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) + if (lid == 0 || lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) smp->status |= IB_SMP_INVALID_FIELD; else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { if (ppd->lid != lid) @@ -706,21 +711,21 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, smlid = be16_to_cpu(pip->sm_lid); msl = pip->neighbormtu_mastersmsl & 0xF; /* Must be a valid unicast LID address. */ - if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) + if (smlid == 0 || smlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) smp->status |= IB_SMP_INVALID_FIELD; - else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { - spin_lock_irqsave(&ibp->lock, flags); - if (ibp->sm_ah) { - if (smlid != ibp->sm_lid) - ibp->sm_ah->attr.dlid = smlid; - if (msl != ibp->sm_sl) - ibp->sm_ah->attr.sl = msl; + else if (smlid != ibp->rvp.sm_lid || msl != ibp->rvp.sm_sl) { + spin_lock_irqsave(&ibp->rvp.lock, flags); + if (ibp->rvp.sm_ah) { + if (smlid != ibp->rvp.sm_lid) + ibp->rvp.sm_ah->attr.dlid = smlid; + if (msl != ibp->rvp.sm_sl) + ibp->rvp.sm_ah->attr.sl = msl; } - spin_unlock_irqrestore(&ibp->lock, flags); - if (smlid != ibp->sm_lid) - ibp->sm_lid = smlid; - if (msl != ibp->sm_sl) - ibp->sm_sl = msl; + spin_unlock_irqrestore(&ibp->rvp.lock, flags); + if (smlid != ibp->rvp.sm_lid) + ibp->rvp.sm_lid = smlid; + if (msl != ibp->rvp.sm_sl) + ibp->rvp.sm_sl = msl; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } @@ -768,10 +773,10 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, smp->status |= IB_SMP_INVALID_FIELD; } - ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; - ibp->vl_high_limit = pip->vl_high_limit; + ibp->rvp.mkeyprot = pip->mkeyprot_resv_lmc >> 6; + ibp->rvp.vl_high_limit = pip->vl_high_limit; (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, - ibp->vl_high_limit); + ibp->rvp.vl_high_limit); mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); if (mtu == -1) @@ -789,13 +794,13 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, } if (pip->mkey_violations == 0) - ibp->mkey_violations = 0; + ibp->rvp.mkey_violations = 0; if (pip->pkey_violations == 0) - ibp->pkey_violations = 0; + ibp->rvp.pkey_violations = 0; if (pip->qkey_violations == 0) - ibp->qkey_violations = 0; + ibp->rvp.qkey_violations = 0; ore = pip->localphyerrors_overrunerrors; if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) @@ -804,7 +809,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, if (set_overrunthreshold(ppd, (ore & 0xF))) smp->status |= IB_SMP_INVALID_FIELD; - ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; + ibp->rvp.subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; /* * Do the port state change now that the other link parameters @@ -1028,7 +1033,7 @@ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); event.event = IB_EVENT_PKEY_CHANGE; - event.device = &dd->verbs_dev.ibdev; + event.device = &dd->verbs_dev.rdi.ibdev; event.element.port_num = port; ib_dispatch_event(&event); } @@ -1062,7 +1067,7 @@ static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, memset(smp->data, 0, sizeof(smp->data)); - if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) + if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) smp->status |= IB_SMP_UNSUP_METHOD; else for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) @@ -1078,7 +1083,7 @@ static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, u8 *p = (u8 *) smp->data; unsigned i; - if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { + if (!(ibp->rvp.port_cap_flags & IB_PORT_SL_MAP_SUP)) { smp->status |= IB_SMP_UNSUP_METHOD; return reply(smp); } @@ -1195,20 +1200,20 @@ static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } - spin_lock_irqsave(&ibp->lock, flags); + spin_lock_irqsave(&ibp->rvp.lock, flags); p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->counter_width = 4; /* 32 bit counters */ p->counter_mask0_9 = COUNTER_MASK0_9; - p->sample_start = cpu_to_be32(ibp->pma_sample_start); - p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); - p->tag = cpu_to_be16(ibp->pma_tag); - p->counter_select[0] = ibp->pma_counter_select[0]; - p->counter_select[1] = ibp->pma_counter_select[1]; - p->counter_select[2] = ibp->pma_counter_select[2]; - p->counter_select[3] = ibp->pma_counter_select[3]; - p->counter_select[4] = ibp->pma_counter_select[4]; - spin_unlock_irqrestore(&ibp->lock, flags); + p->sample_start = cpu_to_be32(ibp->rvp.pma_sample_start); + p->sample_interval = cpu_to_be32(ibp->rvp.pma_sample_interval); + p->tag = cpu_to_be16(ibp->rvp.pma_tag); + p->counter_select[0] = ibp->rvp.pma_counter_select[0]; + p->counter_select[1] = ibp->rvp.pma_counter_select[1]; + p->counter_select[2] = ibp->rvp.pma_counter_select[2]; + p->counter_select[3] = ibp->rvp.pma_counter_select[3]; + p->counter_select[4] = ibp->rvp.pma_counter_select[4]; + spin_unlock_irqrestore(&ibp->rvp.lock, flags); bail: return reply((struct ib_smp *) pmp); @@ -1233,7 +1238,7 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, goto bail; } - spin_lock_irqsave(&ibp->lock, flags); + spin_lock_irqsave(&ibp->rvp.lock, flags); /* Port Sampling code owns the PS* HW counters */ xmit_flags = ppd->cong_stats.flags; @@ -1242,18 +1247,18 @@ static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, if (status == IB_PMA_SAMPLE_STATUS_DONE || (status == IB_PMA_SAMPLE_STATUS_RUNNING && xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { - ibp->pma_sample_start = be32_to_cpu(p->sample_start); - ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); - ibp->pma_tag = be16_to_cpu(p->tag); - ibp->pma_counter_select[0] = p->counter_select[0]; - ibp->pma_counter_select[1] = p->counter_select[1]; - ibp->pma_counter_select[2] = p->counter_select[2]; - ibp->pma_counter_select[3] = p->counter_select[3]; - ibp->pma_counter_select[4] = p->counter_select[4]; - dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, - ibp->pma_sample_start); + ibp->rvp.pma_sample_start = be32_to_cpu(p->sample_start); + ibp->rvp.pma_sample_interval = be32_to_cpu(p->sample_interval); + ibp->rvp.pma_tag = be16_to_cpu(p->tag); + ibp->rvp.pma_counter_select[0] = p->counter_select[0]; + ibp->rvp.pma_counter_select[1] = p->counter_select[1]; + ibp->rvp.pma_counter_select[2] = p->counter_select[2]; + ibp->rvp.pma_counter_select[3] = p->counter_select[3]; + ibp->rvp.pma_counter_select[4] = p->counter_select[4]; + dd->f_set_cntr_sample(ppd, ibp->rvp.pma_sample_interval, + ibp->rvp.pma_sample_start); } - spin_unlock_irqrestore(&ibp->lock, flags); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); ret = pma_get_portsamplescontrol(pmp, ibdev, port); @@ -1357,8 +1362,8 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, int i; memset(pmp->data, 0, sizeof(pmp->data)); - spin_lock_irqsave(&ibp->lock, flags); - p->tag = cpu_to_be16(ibp->pma_tag); + spin_lock_irqsave(&ibp->rvp.lock, flags); + p->tag = cpu_to_be16(ibp->rvp.pma_tag); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; else { @@ -1373,11 +1378,11 @@ static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } } - for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) + for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++) p->counter[i] = cpu_to_be32( get_cache_hw_sample_counters( - ppd, ibp->pma_counter_select[i])); - spin_unlock_irqrestore(&ibp->lock, flags); + ppd, ibp->rvp.pma_counter_select[i])); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); return reply((struct ib_smp *) pmp); } @@ -1397,8 +1402,8 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, /* Port Sampling code owns the PS* HW counters */ memset(pmp->data, 0, sizeof(pmp->data)); - spin_lock_irqsave(&ibp->lock, flags); - p->tag = cpu_to_be16(ibp->pma_tag); + spin_lock_irqsave(&ibp->rvp.lock, flags); + p->tag = cpu_to_be16(ibp->rvp.pma_tag); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; else { @@ -1415,11 +1420,11 @@ static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } } - for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) + for (i = 0; i < ARRAY_SIZE(ibp->rvp.pma_counter_select); i++) p->counter[i] = cpu_to_be64( get_cache_hw_sample_counters( - ppd, ibp->pma_counter_select[i])); - spin_unlock_irqrestore(&ibp->lock, flags); + ppd, ibp->rvp.pma_counter_select[i])); + spin_unlock_irqrestore(&ibp->rvp.lock, flags); return reply((struct ib_smp *) pmp); } @@ -1453,7 +1458,7 @@ static int pma_get_portcounters(struct ib_pma_mad *pmp, cntrs.excessive_buffer_overrun_errors -= ibp->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= ibp->z_vl15_dropped; - cntrs.vl15_dropped += ibp->n_vl15_dropped; + cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped; memset(pmp->data, 0, sizeof(pmp->data)); @@ -1546,9 +1551,9 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; qib_get_counters(ppd, &cntrs); - spin_lock_irqsave(&ppd->ibport_data.lock, flags); + spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); xmit_wait_counter = xmit_wait_get_value_delta(ppd); - spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); + spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; @@ -1564,7 +1569,7 @@ static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, cntrs.excessive_buffer_overrun_errors -= ibp->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= ibp->z_vl15_dropped; - cntrs.vl15_dropped += ibp->n_vl15_dropped; + cntrs.vl15_dropped += ibp->rvp.n_vl15_dropped; cntrs.port_xmit_data -= ibp->z_port_xmit_data; cntrs.port_rcv_data -= ibp->z_port_rcv_data; cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; @@ -1743,7 +1748,7 @@ static int pma_set_portcounters(struct ib_pma_mad *pmp, cntrs.excessive_buffer_overrun_errors; if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { - ibp->n_vl15_dropped = 0; + ibp->rvp.n_vl15_dropped = 0; ibp->z_vl15_dropped = cntrs.vl15_dropped; } @@ -1778,11 +1783,11 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, ret = pma_get_portcounters_cong(pmp, ibdev, port); if (counter_select & IB_PMA_SEL_CONG_XMIT) { - spin_lock_irqsave(&ppd->ibport_data.lock, flags); + spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); ppd->cong_stats.counter = 0; dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); - spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); + spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); } if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { ibp->z_port_xmit_data = cntrs.port_xmit_data; @@ -1806,7 +1811,7 @@ static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, cntrs.local_link_integrity_errors; ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; - ibp->n_vl15_dropped = 0; + ibp->rvp.n_vl15_dropped = 0; ibp->z_vl15_dropped = cntrs.vl15_dropped; } @@ -1916,12 +1921,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = subn_get_vl_arb(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SM_INFO: - if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { + if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } - if (ibp->port_cap_flags & IB_PORT_SM) { + if (ibp->rvp.port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } @@ -1950,12 +1955,12 @@ static int process_subn(struct ib_device *ibdev, int mad_flags, ret = subn_set_vl_arb(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SM_INFO: - if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { + if (ibp->rvp.port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } - if (ibp->port_cap_flags & IB_PORT_SM) { + if (ibp->rvp.port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } @@ -2443,12 +2448,6 @@ bail: return ret; } -static void send_handler(struct ib_mad_agent *agent, - struct ib_mad_send_wc *mad_send_wc) -{ - ib_free_send_mad(mad_send_wc->send_buf); -} - static void xmit_wait_timer_func(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; @@ -2456,7 +2455,7 @@ static void xmit_wait_timer_func(unsigned long opaque) unsigned long flags; u8 status; - spin_lock_irqsave(&ppd->ibport_data.lock, flags); + spin_lock_irqsave(&ppd->ibport_data.rvp.lock, flags); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); if (status == IB_PMA_SAMPLE_STATUS_DONE) { @@ -2469,74 +2468,35 @@ static void xmit_wait_timer_func(unsigned long opaque) ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); done: - spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); + spin_unlock_irqrestore(&ppd->ibport_data.rvp.lock, flags); mod_timer(&ppd->cong_stats.timer, jiffies + HZ); } -int qib_create_agents(struct qib_ibdev *dev) +void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx) { - struct qib_devdata *dd = dd_from_dev(dev); - struct ib_mad_agent *agent; - struct qib_ibport *ibp; - int p; - int ret; + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(ibdev, + struct qib_devdata, verbs_dev); - for (p = 0; p < dd->num_pports; p++) { - ibp = &dd->pport[p].ibport_data; - agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, - NULL, 0, send_handler, - NULL, NULL, 0); - if (IS_ERR(agent)) { - ret = PTR_ERR(agent); - goto err; - } - - /* Initialize xmit_wait structure */ - dd->pport[p].cong_stats.counter = 0; - init_timer(&dd->pport[p].cong_stats.timer); - dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; - dd->pport[p].cong_stats.timer.data = - (unsigned long)(&dd->pport[p]); - dd->pport[p].cong_stats.timer.expires = 0; - add_timer(&dd->pport[p].cong_stats.timer); - - ibp->send_agent = agent; - } - - return 0; - -err: - for (p = 0; p < dd->num_pports; p++) { - ibp = &dd->pport[p].ibport_data; - if (ibp->send_agent) { - agent = ibp->send_agent; - ibp->send_agent = NULL; - ib_unregister_mad_agent(agent); - } - } - - return ret; + /* Initialize xmit_wait structure */ + dd->pport[port_idx].cong_stats.counter = 0; + init_timer(&dd->pport[port_idx].cong_stats.timer); + dd->pport[port_idx].cong_stats.timer.function = xmit_wait_timer_func; + dd->pport[port_idx].cong_stats.timer.data = + (unsigned long)(&dd->pport[port_idx]); + dd->pport[port_idx].cong_stats.timer.expires = 0; + add_timer(&dd->pport[port_idx].cong_stats.timer); } -void qib_free_agents(struct qib_ibdev *dev) +void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx) { - struct qib_devdata *dd = dd_from_dev(dev); - struct ib_mad_agent *agent; - struct qib_ibport *ibp; - int p; - - for (p = 0; p < dd->num_pports; p++) { - ibp = &dd->pport[p].ibport_data; - if (ibp->send_agent) { - agent = ibp->send_agent; - ibp->send_agent = NULL; - ib_unregister_mad_agent(agent); - } - if (ibp->sm_ah) { - ib_destroy_ah(&ibp->sm_ah->ibah); - ibp->sm_ah = NULL; - } - if (dd->pport[p].cong_stats.timer.data) - del_timer_sync(&dd->pport[p].cong_stats.timer); - } + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(ibdev, + struct qib_devdata, verbs_dev); + + if (dd->pport[port_idx].cong_stats.timer.data) + del_timer_sync(&dd->pport[port_idx].cong_stats.timer); + + if (dd->pport[port_idx].ibport_data.smi_ah) + ib_destroy_ah(&dd->pport[port_idx].ibport_data.smi_ah->ibah); } diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c deleted file mode 100644 index 34927b700..000000000 --- a/drivers/infiniband/hw/qib/qib_mmap.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/mm.h> -#include <linux/errno.h> -#include <asm/pgtable.h> - -#include "qib_verbs.h" - -/** - * qib_release_mmap_info - free mmap info structure - * @ref: a pointer to the kref within struct qib_mmap_info - */ -void qib_release_mmap_info(struct kref *ref) -{ - struct qib_mmap_info *ip = - container_of(ref, struct qib_mmap_info, ref); - struct qib_ibdev *dev = to_idev(ip->context->device); - - spin_lock_irq(&dev->pending_lock); - list_del(&ip->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - - vfree(ip->obj); - kfree(ip); -} - -/* - * open and close keep track of how many times the CQ is mapped, - * to avoid releasing it. - */ -static void qib_vma_open(struct vm_area_struct *vma) -{ - struct qib_mmap_info *ip = vma->vm_private_data; - - kref_get(&ip->ref); -} - -static void qib_vma_close(struct vm_area_struct *vma) -{ - struct qib_mmap_info *ip = vma->vm_private_data; - - kref_put(&ip->ref, qib_release_mmap_info); -} - -static const struct vm_operations_struct qib_vm_ops = { - .open = qib_vma_open, - .close = qib_vma_close, -}; - -/** - * qib_mmap - create a new mmap region - * @context: the IB user context of the process making the mmap() call - * @vma: the VMA to be initialized - * Return zero if the mmap is OK. Otherwise, return an errno. - */ -int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) -{ - struct qib_ibdev *dev = to_idev(context->device); - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - unsigned long size = vma->vm_end - vma->vm_start; - struct qib_mmap_info *ip, *pp; - int ret = -EINVAL; - - /* - * Search the device's list of objects waiting for a mmap call. - * Normally, this list is very short since a call to create a - * CQ, QP, or SRQ is soon followed by a call to mmap(). - */ - spin_lock_irq(&dev->pending_lock); - list_for_each_entry_safe(ip, pp, &dev->pending_mmaps, - pending_mmaps) { - /* Only the creator is allowed to mmap the object */ - if (context != ip->context || (__u64) offset != ip->offset) - continue; - /* Don't allow a mmap larger than the object. */ - if (size > ip->size) - break; - - list_del_init(&ip->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - - ret = remap_vmalloc_range(vma, ip->obj, 0); - if (ret) - goto done; - vma->vm_ops = &qib_vm_ops; - vma->vm_private_data = ip; - qib_vma_open(vma); - goto done; - } - spin_unlock_irq(&dev->pending_lock); -done: - return ret; -} - -/* - * Allocate information for qib_mmap - */ -struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, - u32 size, - struct ib_ucontext *context, - void *obj) { - struct qib_mmap_info *ip; - - ip = kmalloc(sizeof(*ip), GFP_KERNEL); - if (!ip) - goto bail; - - size = PAGE_ALIGN(size); - - spin_lock_irq(&dev->mmap_offset_lock); - if (dev->mmap_offset == 0) - dev->mmap_offset = PAGE_SIZE; - ip->offset = dev->mmap_offset; - dev->mmap_offset += size; - spin_unlock_irq(&dev->mmap_offset_lock); - - INIT_LIST_HEAD(&ip->pending_mmaps); - ip->size = size; - ip->context = context; - ip->obj = obj; - kref_init(&ip->ref); - -bail: - return ip; -} - -void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, - u32 size, void *obj) -{ - size = PAGE_ALIGN(size); - - spin_lock_irq(&dev->mmap_offset_lock); - if (dev->mmap_offset == 0) - dev->mmap_offset = PAGE_SIZE; - ip->offset = dev->mmap_offset; - dev->mmap_offset += size; - spin_unlock_irq(&dev->mmap_offset_lock); - - ip->size = size; - ip->obj = obj; -} diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c deleted file mode 100644 index 5f53304e8..000000000 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ /dev/null @@ -1,490 +0,0 @@ -/* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. - * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <rdma/ib_umem.h> -#include <rdma/ib_smi.h> - -#include "qib.h" - -/* Fast memory region */ -struct qib_fmr { - struct ib_fmr ibfmr; - struct qib_mregion mr; /* must be last */ -}; - -static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) -{ - return container_of(ibfmr, struct qib_fmr, ibfmr); -} - -static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, - int count) -{ - int m, i = 0; - int rval = 0; - - m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; - for (; i < m; i++) { - mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); - if (!mr->map[i]) - goto bail; - } - mr->mapsz = m; - init_completion(&mr->comp); - /* count returning the ptr to user */ - atomic_set(&mr->refcount, 1); - mr->pd = pd; - mr->max_segs = count; -out: - return rval; -bail: - while (i) - kfree(mr->map[--i]); - rval = -ENOMEM; - goto out; -} - -static void deinit_qib_mregion(struct qib_mregion *mr) -{ - int i = mr->mapsz; - - mr->mapsz = 0; - while (i) - kfree(mr->map[--i]); -} - - -/** - * qib_get_dma_mr - get a DMA memory region - * @pd: protection domain for this memory region - * @acc: access flags - * - * Returns the memory region on success, otherwise returns an errno. - * Note that all DMA addresses should be created via the - * struct ib_dma_mapping_ops functions (see qib_dma.c). - */ -struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) -{ - struct qib_mr *mr = NULL; - struct ib_mr *ret; - int rval; - - if (to_ipd(pd)->user) { - ret = ERR_PTR(-EPERM); - goto bail; - } - - mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - rval = init_qib_mregion(&mr->mr, pd, 0); - if (rval) { - ret = ERR_PTR(rval); - goto bail; - } - - - rval = qib_alloc_lkey(&mr->mr, 1); - if (rval) { - ret = ERR_PTR(rval); - goto bail_mregion; - } - - mr->mr.access_flags = acc; - ret = &mr->ibmr; -done: - return ret; - -bail_mregion: - deinit_qib_mregion(&mr->mr); -bail: - kfree(mr); - goto done; -} - -static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) -{ - struct qib_mr *mr; - int rval = -ENOMEM; - int m; - - /* Allocate struct plus pointers to first level page tables. */ - m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; - mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); - if (!mr) - goto bail; - - rval = init_qib_mregion(&mr->mr, pd, count); - if (rval) - goto bail; - - rval = qib_alloc_lkey(&mr->mr, 0); - if (rval) - goto bail_mregion; - mr->ibmr.lkey = mr->mr.lkey; - mr->ibmr.rkey = mr->mr.lkey; -done: - return mr; - -bail_mregion: - deinit_qib_mregion(&mr->mr); -bail: - kfree(mr); - mr = ERR_PTR(rval); - goto done; -} - -/** - * qib_reg_user_mr - register a userspace memory region - * @pd: protection domain for this memory region - * @start: starting userspace address - * @length: length of region to register - * @mr_access_flags: access flags for this memory region - * @udata: unused by the QLogic_IB driver - * - * Returns the memory region on success, otherwise returns an errno. - */ -struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int mr_access_flags, - struct ib_udata *udata) -{ - struct qib_mr *mr; - struct ib_umem *umem; - struct scatterlist *sg; - int n, m, entry; - struct ib_mr *ret; - - if (length == 0) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - - umem = ib_umem_get(pd->uobject->context, start, length, - mr_access_flags, 0); - if (IS_ERR(umem)) - return (void *) umem; - - n = umem->nmap; - - mr = alloc_mr(n, pd); - if (IS_ERR(mr)) { - ret = (struct ib_mr *)mr; - ib_umem_release(umem); - goto bail; - } - - mr->mr.user_base = start; - mr->mr.iova = virt_addr; - mr->mr.length = length; - mr->mr.offset = ib_umem_offset(umem); - mr->mr.access_flags = mr_access_flags; - mr->umem = umem; - - if (is_power_of_2(umem->page_size)) - mr->mr.page_shift = ilog2(umem->page_size); - m = 0; - n = 0; - for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { - void *vaddr; - - vaddr = page_address(sg_page(sg)); - if (!vaddr) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - mr->mr.map[m]->segs[n].vaddr = vaddr; - mr->mr.map[m]->segs[n].length = umem->page_size; - n++; - if (n == QIB_SEGSZ) { - m++; - n = 0; - } - } - ret = &mr->ibmr; - -bail: - return ret; -} - -/** - * qib_dereg_mr - unregister and free a memory region - * @ibmr: the memory region to free - * - * Returns 0 on success. - * - * Note that this is called to free MRs created by qib_get_dma_mr() - * or qib_reg_user_mr(). - */ -int qib_dereg_mr(struct ib_mr *ibmr) -{ - struct qib_mr *mr = to_imr(ibmr); - int ret = 0; - unsigned long timeout; - - kfree(mr->pages); - qib_free_lkey(&mr->mr); - - qib_put_mr(&mr->mr); /* will set completion if last */ - timeout = wait_for_completion_timeout(&mr->mr.comp, - 5 * HZ); - if (!timeout) { - qib_get_mr(&mr->mr); - ret = -EBUSY; - goto out; - } - deinit_qib_mregion(&mr->mr); - if (mr->umem) - ib_umem_release(mr->umem); - kfree(mr); -out: - return ret; -} - -/* - * Allocate a memory region usable with the - * IB_WR_REG_MR send work request. - * - * Return the memory region on success, otherwise return an errno. - */ -struct ib_mr *qib_alloc_mr(struct ib_pd *pd, - enum ib_mr_type mr_type, - u32 max_num_sg) -{ - struct qib_mr *mr; - - if (mr_type != IB_MR_TYPE_MEM_REG) - return ERR_PTR(-EINVAL); - - mr = alloc_mr(max_num_sg, pd); - if (IS_ERR(mr)) - return (struct ib_mr *)mr; - - mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); - if (!mr->pages) - goto err; - - return &mr->ibmr; - -err: - qib_dereg_mr(&mr->ibmr); - return ERR_PTR(-ENOMEM); -} - -static int qib_set_page(struct ib_mr *ibmr, u64 addr) -{ - struct qib_mr *mr = to_imr(ibmr); - - if (unlikely(mr->npages == mr->mr.max_segs)) - return -ENOMEM; - - mr->pages[mr->npages++] = addr; - - return 0; -} - -int qib_map_mr_sg(struct ib_mr *ibmr, - struct scatterlist *sg, - int sg_nents) -{ - struct qib_mr *mr = to_imr(ibmr); - - mr->npages = 0; - - return ib_sg_to_pages(ibmr, sg, sg_nents, qib_set_page); -} - -/** - * qib_alloc_fmr - allocate a fast memory region - * @pd: the protection domain for this memory region - * @mr_access_flags: access flags for this memory region - * @fmr_attr: fast memory region attributes - * - * Returns the memory region on success, otherwise returns an errno. - */ -struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr) -{ - struct qib_fmr *fmr; - int m; - struct ib_fmr *ret; - int rval = -ENOMEM; - - /* Allocate struct plus pointers to first level page tables. */ - m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; - fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); - if (!fmr) - goto bail; - - rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); - if (rval) - goto bail; - - /* - * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & - * rkey. - */ - rval = qib_alloc_lkey(&fmr->mr, 0); - if (rval) - goto bail_mregion; - fmr->ibfmr.rkey = fmr->mr.lkey; - fmr->ibfmr.lkey = fmr->mr.lkey; - /* - * Resources are allocated but no valid mapping (RKEY can't be - * used). - */ - fmr->mr.access_flags = mr_access_flags; - fmr->mr.max_segs = fmr_attr->max_pages; - fmr->mr.page_shift = fmr_attr->page_shift; - - ret = &fmr->ibfmr; -done: - return ret; - -bail_mregion: - deinit_qib_mregion(&fmr->mr); -bail: - kfree(fmr); - ret = ERR_PTR(rval); - goto done; -} - -/** - * qib_map_phys_fmr - set up a fast memory region - * @ibmfr: the fast memory region to set up - * @page_list: the list of pages to associate with the fast memory region - * @list_len: the number of pages to associate with the fast memory region - * @iova: the virtual address of the start of the fast memory region - * - * This may be called from interrupt context. - */ - -int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, - int list_len, u64 iova) -{ - struct qib_fmr *fmr = to_ifmr(ibfmr); - struct qib_lkey_table *rkt; - unsigned long flags; - int m, n, i; - u32 ps; - int ret; - - i = atomic_read(&fmr->mr.refcount); - if (i > 2) - return -EBUSY; - - if (list_len > fmr->mr.max_segs) { - ret = -EINVAL; - goto bail; - } - rkt = &to_idev(ibfmr->device)->lk_table; - spin_lock_irqsave(&rkt->lock, flags); - fmr->mr.user_base = iova; - fmr->mr.iova = iova; - ps = 1 << fmr->mr.page_shift; - fmr->mr.length = list_len * ps; - m = 0; - n = 0; - for (i = 0; i < list_len; i++) { - fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; - fmr->mr.map[m]->segs[n].length = ps; - if (++n == QIB_SEGSZ) { - m++; - n = 0; - } - } - spin_unlock_irqrestore(&rkt->lock, flags); - ret = 0; - -bail: - return ret; -} - -/** - * qib_unmap_fmr - unmap fast memory regions - * @fmr_list: the list of fast memory regions to unmap - * - * Returns 0 on success. - */ -int qib_unmap_fmr(struct list_head *fmr_list) -{ - struct qib_fmr *fmr; - struct qib_lkey_table *rkt; - unsigned long flags; - - list_for_each_entry(fmr, fmr_list, ibfmr.list) { - rkt = &to_idev(fmr->ibfmr.device)->lk_table; - spin_lock_irqsave(&rkt->lock, flags); - fmr->mr.user_base = 0; - fmr->mr.iova = 0; - fmr->mr.length = 0; - spin_unlock_irqrestore(&rkt->lock, flags); - } - return 0; -} - -/** - * qib_dealloc_fmr - deallocate a fast memory region - * @ibfmr: the fast memory region to deallocate - * - * Returns 0 on success. - */ -int qib_dealloc_fmr(struct ib_fmr *ibfmr) -{ - struct qib_fmr *fmr = to_ifmr(ibfmr); - int ret = 0; - unsigned long timeout; - - qib_free_lkey(&fmr->mr); - qib_put_mr(&fmr->mr); /* will set completion if last */ - timeout = wait_for_completion_timeout(&fmr->mr.comp, - 5 * HZ); - if (!timeout) { - qib_get_mr(&fmr->mr); - ret = -EBUSY; - goto out; - } - deinit_qib_mregion(&fmr->mr); - kfree(fmr); -out: - return ret; -} - -void mr_rcu_callback(struct rcu_head *list) -{ - struct qib_mregion *mr = container_of(list, struct qib_mregion, list); - - complete(&mr->comp); -} diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 3eff35c2d..575b737d9 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -34,32 +34,38 @@ #include <linux/err.h> #include <linux/vmalloc.h> -#include <linux/jhash.h> +#include <rdma/rdma_vt.h> #ifdef CONFIG_DEBUG_FS #include <linux/seq_file.h> #endif #include "qib.h" -#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) -#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) +/* + * mask field which was present in now deleted qib_qpn_table + * is not present in rvt_qpn_table. Defining the same field + * as qpt_mask here instead of adding the mask field to + * rvt_qpn_table. + */ +u16 qpt_mask; -static inline unsigned mk_qpn(struct qib_qpn_table *qpt, - struct qpn_map *map, unsigned off) +static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, + struct rvt_qpn_map *map, unsigned off) { - return (map - qpt->map) * BITS_PER_PAGE + off; + return (map - qpt->map) * RVT_BITS_PER_PAGE + off; } -static inline unsigned find_next_offset(struct qib_qpn_table *qpt, - struct qpn_map *map, unsigned off, +static inline unsigned find_next_offset(struct rvt_qpn_table *qpt, + struct rvt_qpn_map *map, unsigned off, unsigned n) { - if (qpt->mask) { + if (qpt_mask) { off++; - if (((off & qpt->mask) >> 1) >= n) - off = (off | qpt->mask) + 2; - } else - off = find_next_zero_bit(map->page, BITS_PER_PAGE, off); + if (((off & qpt_mask) >> 1) >= n) + off = (off | qpt_mask) + 2; + } else { + off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off); + } return off; } @@ -100,7 +106,7 @@ static u32 credit_table[31] = { 32768 /* 1E */ }; -static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map, +static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, gfp_t gfp) { unsigned long page = get_zeroed_page(gfp); @@ -121,12 +127,15 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map, * Allocate the next available QPN or * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. */ -static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp) +int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, + enum ib_qp_type type, u8 port, gfp_t gfp) { u32 i, offset, max_scan, qpn; - struct qpn_map *map; + struct rvt_qpn_map *map; u32 ret; + struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, + verbs_dev); if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; @@ -143,12 +152,12 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, } qpn = qpt->last + 2; - if (qpn >= QPN_MAX) + if (qpn >= RVT_QPN_MAX) qpn = 2; - if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) - qpn = (qpn | qpt->mask) + 2; - offset = qpn & BITS_PER_PAGE_MASK; - map = &qpt->map[qpn / BITS_PER_PAGE]; + if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues) + qpn = (qpn | qpt_mask) + 2; + offset = qpn & RVT_BITS_PER_PAGE_MASK; + map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { @@ -173,14 +182,14 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt, * We just need to be sure we don't loop * forever. */ - } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); + } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX); /* * In order to keep the number of pages allocated to a * minimum, we scan the all existing pages before increasing * the size of the bitmap table. */ if (++i > max_scan) { - if (qpt->nmaps == QPNMAP_ENTRIES) + if (qpt->nmaps == RVT_QPNMAP_ENTRIES) break; map = &qpt->map[qpt->nmaps++]; offset = 0; @@ -200,706 +209,113 @@ bail: return ret; } -static void free_qpn(struct qib_qpn_table *qpt, u32 qpn) -{ - struct qpn_map *map; - - map = qpt->map + qpn / BITS_PER_PAGE; - if (map->page) - clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); -} - -static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn) -{ - return jhash_1word(qpn, dev->qp_rnd) & - (dev->qp_table_size - 1); -} - - -/* - * Put the QP into the hash table. - * The hash table holds a reference to the QP. - */ -static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp) -{ - struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - unsigned long flags; - unsigned n = qpn_hash(dev, qp->ibqp.qp_num); - - atomic_inc(&qp->refcount); - spin_lock_irqsave(&dev->qpt_lock, flags); - - if (qp->ibqp.qp_num == 0) - rcu_assign_pointer(ibp->qp0, qp); - else if (qp->ibqp.qp_num == 1) - rcu_assign_pointer(ibp->qp1, qp); - else { - qp->next = dev->qp_table[n]; - rcu_assign_pointer(dev->qp_table[n], qp); - } - - spin_unlock_irqrestore(&dev->qpt_lock, flags); -} - -/* - * Remove the QP from the table so it can't be found asynchronously by - * the receive interrupt routine. - */ -static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) -{ - struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - unsigned n = qpn_hash(dev, qp->ibqp.qp_num); - unsigned long flags; - int removed = 1; - - spin_lock_irqsave(&dev->qpt_lock, flags); - - if (rcu_dereference_protected(ibp->qp0, - lockdep_is_held(&dev->qpt_lock)) == qp) { - RCU_INIT_POINTER(ibp->qp0, NULL); - } else if (rcu_dereference_protected(ibp->qp1, - lockdep_is_held(&dev->qpt_lock)) == qp) { - RCU_INIT_POINTER(ibp->qp1, NULL); - } else { - struct qib_qp *q; - struct qib_qp __rcu **qpp; - - removed = 0; - qpp = &dev->qp_table[n]; - for (; (q = rcu_dereference_protected(*qpp, - lockdep_is_held(&dev->qpt_lock))) != NULL; - qpp = &q->next) - if (q == qp) { - RCU_INIT_POINTER(*qpp, - rcu_dereference_protected(qp->next, - lockdep_is_held(&dev->qpt_lock))); - removed = 1; - break; - } - } - - spin_unlock_irqrestore(&dev->qpt_lock, flags); - if (removed) { - synchronize_rcu(); - atomic_dec(&qp->refcount); - } -} - /** * qib_free_all_qps - check for QPs still in use - * @qpt: the QP table to empty - * - * There should not be any QPs still in use. - * Free memory for table. */ -unsigned qib_free_all_qps(struct qib_devdata *dd) +unsigned qib_free_all_qps(struct rvt_dev_info *rdi) { - struct qib_ibdev *dev = &dd->verbs_dev; - unsigned long flags; - struct qib_qp *qp; + struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, + verbs_dev); unsigned n, qp_inuse = 0; for (n = 0; n < dd->num_pports; n++) { struct qib_ibport *ibp = &dd->pport[n].ibport_data; - if (!qib_mcast_tree_empty(ibp)) - qp_inuse++; rcu_read_lock(); - if (rcu_dereference(ibp->qp0)) + if (rcu_dereference(ibp->rvp.qp[0])) qp_inuse++; - if (rcu_dereference(ibp->qp1)) + if (rcu_dereference(ibp->rvp.qp[1])) qp_inuse++; rcu_read_unlock(); } - - spin_lock_irqsave(&dev->qpt_lock, flags); - for (n = 0; n < dev->qp_table_size; n++) { - qp = rcu_dereference_protected(dev->qp_table[n], - lockdep_is_held(&dev->qpt_lock)); - RCU_INIT_POINTER(dev->qp_table[n], NULL); - - for (; qp; qp = rcu_dereference_protected(qp->next, - lockdep_is_held(&dev->qpt_lock))) - qp_inuse++; - } - spin_unlock_irqrestore(&dev->qpt_lock, flags); - synchronize_rcu(); - return qp_inuse; } -/** - * qib_lookup_qpn - return the QP with the given QPN - * @qpt: the QP table - * @qpn: the QP number to look up - * - * The caller is responsible for decrementing the QP reference count - * when done. - */ -struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn) +void qib_notify_qp_reset(struct rvt_qp *qp) { - struct qib_qp *qp = NULL; - - rcu_read_lock(); - if (unlikely(qpn <= 1)) { - if (qpn == 0) - qp = rcu_dereference(ibp->qp0); - else - qp = rcu_dereference(ibp->qp1); - if (qp) - atomic_inc(&qp->refcount); - } else { - struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev; - unsigned n = qpn_hash(dev, qpn); - - for (qp = rcu_dereference(dev->qp_table[n]); qp; - qp = rcu_dereference(qp->next)) - if (qp->ibqp.qp_num == qpn) { - atomic_inc(&qp->refcount); - break; - } - } - rcu_read_unlock(); - return qp; -} - -/** - * qib_reset_qp - initialize the QP state to the reset state - * @qp: the QP to reset - * @type: the QP type - */ -static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type) -{ - qp->remote_qpn = 0; - qp->qkey = 0; - qp->qp_access_flags = 0; - atomic_set(&qp->s_dma_busy, 0); - qp->s_flags &= QIB_S_SIGNAL_REQ_WR; - qp->s_hdrwords = 0; - qp->s_wqe = NULL; - qp->s_draining = 0; - qp->s_next_psn = 0; - qp->s_last_psn = 0; - qp->s_sending_psn = 0; - qp->s_sending_hpsn = 0; - qp->s_psn = 0; - qp->r_psn = 0; - qp->r_msn = 0; - if (type == IB_QPT_RC) { - qp->s_state = IB_OPCODE_RC_SEND_LAST; - qp->r_state = IB_OPCODE_RC_SEND_LAST; - } else { - qp->s_state = IB_OPCODE_UC_SEND_LAST; - qp->r_state = IB_OPCODE_UC_SEND_LAST; - } - qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; - qp->r_nak_state = 0; - qp->r_aflags = 0; - qp->r_flags = 0; - qp->s_head = 0; - qp->s_tail = 0; - qp->s_cur = 0; - qp->s_acked = 0; - qp->s_last = 0; - qp->s_ssn = 1; - qp->s_lsn = 0; - qp->s_mig_state = IB_MIG_MIGRATED; - memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); - qp->r_head_ack_queue = 0; - qp->s_tail_ack_queue = 0; - qp->s_num_rd_atomic = 0; - if (qp->r_rq.wq) { - qp->r_rq.wq->head = 0; - qp->r_rq.wq->tail = 0; - } - qp->r_sge.num_sge = 0; -} - -static void clear_mr_refs(struct qib_qp *qp, int clr_sends) -{ - unsigned n; - - if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) - qib_put_ss(&qp->s_rdma_read_sge); - - qib_put_ss(&qp->r_sge); - - if (clr_sends) { - while (qp->s_last != qp->s_head) { - struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last); - unsigned i; - - for (i = 0; i < wqe->wr.num_sge; i++) { - struct qib_sge *sge = &wqe->sg_list[i]; - - qib_put_mr(sge->mr); - } - if (qp->ibqp.qp_type == IB_QPT_UD || - qp->ibqp.qp_type == IB_QPT_SMI || - qp->ibqp.qp_type == IB_QPT_GSI) - atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; - } - if (qp->s_rdma_mr) { - qib_put_mr(qp->s_rdma_mr); - qp->s_rdma_mr = NULL; - } - } - - if (qp->ibqp.qp_type != IB_QPT_RC) - return; + struct qib_qp_priv *priv = qp->priv; - for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) { - struct qib_ack_entry *e = &qp->s_ack_queue[n]; - - if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && - e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); - e->rdma_sge.mr = NULL; - } - } + atomic_set(&priv->s_dma_busy, 0); } -/** - * qib_error_qp - put a QP into the error state - * @qp: the QP to put into the error state - * @err: the receive completion error to signal if a RWQE is active - * - * Flushes both send and receive work queues. - * Returns true if last WQE event should be generated. - * The QP r_lock and s_lock should be held and interrupts disabled. - * If we are already in error state, just return. - */ -int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) +void qib_notify_error_qp(struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_ibdev *dev = to_idev(qp->ibqp.device); - struct ib_wc wc; - int ret = 0; - - if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET) - goto bail; - - qp->state = IB_QPS_ERR; - - if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); - del_timer(&qp->s_timer); - } - - if (qp->s_flags & QIB_S_ANY_WAIT_SEND) - qp->s_flags &= ~QIB_S_ANY_WAIT_SEND; - spin_lock(&dev->pending_lock); - if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) { - qp->s_flags &= ~QIB_S_ANY_WAIT_IO; - list_del_init(&qp->iowait); + spin_lock(&dev->rdi.pending_lock); + if (!list_empty(&priv->iowait) && !(qp->s_flags & RVT_S_BUSY)) { + qp->s_flags &= ~RVT_S_ANY_WAIT_IO; + list_del_init(&priv->iowait); } - spin_unlock(&dev->pending_lock); + spin_unlock(&dev->rdi.pending_lock); - if (!(qp->s_flags & QIB_S_BUSY)) { + if (!(qp->s_flags & RVT_S_BUSY)) { qp->s_hdrwords = 0; if (qp->s_rdma_mr) { - qib_put_mr(qp->s_rdma_mr); + rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } - if (qp->s_tx) { - qib_put_txreq(qp->s_tx); - qp->s_tx = NULL; + if (priv->s_tx) { + qib_put_txreq(priv->s_tx); + priv->s_tx = NULL; } } - - /* Schedule the sending tasklet to drain the send work queue. */ - if (qp->s_last != qp->s_head) - qib_schedule_send(qp); - - clear_mr_refs(qp, 0); - - memset(&wc, 0, sizeof(wc)); - wc.qp = &qp->ibqp; - wc.opcode = IB_WC_RECV; - - if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) { - wc.wr_id = qp->r_wr_id; - wc.status = err; - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); - } - wc.status = IB_WC_WR_FLUSH_ERR; - - if (qp->r_rq.wq) { - struct qib_rwq *wq; - u32 head; - u32 tail; - - spin_lock(&qp->r_rq.lock); - - /* sanity check pointers before trusting them */ - wq = qp->r_rq.wq; - head = wq->head; - if (head >= qp->r_rq.size) - head = 0; - tail = wq->tail; - if (tail >= qp->r_rq.size) - tail = 0; - while (tail != head) { - wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; - if (++tail >= qp->r_rq.size) - tail = 0; - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); - } - wq->tail = tail; - - spin_unlock(&qp->r_rq.lock); - } else if (qp->ibqp.event_handler) - ret = 1; - -bail: - return ret; } -/** - * qib_modify_qp - modify the attributes of a queue pair - * @ibqp: the queue pair who's attributes we're modifying - * @attr: the new attributes - * @attr_mask: the mask of attributes to modify - * @udata: user data for libibverbs.so - * - * Returns 0 on success, otherwise returns an errno. - */ -int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata) +static int mtu_to_enum(u32 mtu) { - struct qib_ibdev *dev = to_idev(ibqp->device); - struct qib_qp *qp = to_iqp(ibqp); - enum ib_qp_state cur_state, new_state; - struct ib_event ev; - int lastwqe = 0; - int mig = 0; - int ret; - u32 pmtu = 0; /* for gcc warning only */ - - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_lock); - - cur_state = attr_mask & IB_QP_CUR_STATE ? - attr->cur_qp_state : qp->state; - new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask, IB_LINK_LAYER_UNSPECIFIED)) - goto inval; - - if (attr_mask & IB_QP_AV) { - if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE) - goto inval; - if (qib_check_ah(qp->ibqp.device, &attr->ah_attr)) - goto inval; - } - - if (attr_mask & IB_QP_ALT_PATH) { - if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE) - goto inval; - if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr)) - goto inval; - if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev))) - goto inval; - } - - if (attr_mask & IB_QP_PKEY_INDEX) - if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev))) - goto inval; - - if (attr_mask & IB_QP_MIN_RNR_TIMER) - if (attr->min_rnr_timer > 31) - goto inval; - - if (attr_mask & IB_QP_PORT) - if (qp->ibqp.qp_type == IB_QPT_SMI || - qp->ibqp.qp_type == IB_QPT_GSI || - attr->port_num == 0 || - attr->port_num > ibqp->device->phys_port_cnt) - goto inval; - - if (attr_mask & IB_QP_DEST_QPN) - if (attr->dest_qp_num > QIB_QPN_MASK) - goto inval; - - if (attr_mask & IB_QP_RETRY_CNT) - if (attr->retry_cnt > 7) - goto inval; - - if (attr_mask & IB_QP_RNR_RETRY) - if (attr->rnr_retry > 7) - goto inval; - - /* - * Don't allow invalid path_mtu values. OK to set greater - * than the active mtu (or even the max_cap, if we have tuned - * that to a small mtu. We'll set qp->path_mtu - * to the lesser of requested attribute mtu and active, - * for packetizing messages. - * Note that the QP port has to be set in INIT and MTU in RTR. - */ - if (attr_mask & IB_QP_PATH_MTU) { - struct qib_devdata *dd = dd_from_dev(dev); - int mtu, pidx = qp->port_num - 1; - - mtu = ib_mtu_enum_to_int(attr->path_mtu); - if (mtu == -1) - goto inval; - if (mtu > dd->pport[pidx].ibmtu) { - switch (dd->pport[pidx].ibmtu) { - case 4096: - pmtu = IB_MTU_4096; - break; - case 2048: - pmtu = IB_MTU_2048; - break; - case 1024: - pmtu = IB_MTU_1024; - break; - case 512: - pmtu = IB_MTU_512; - break; - case 256: - pmtu = IB_MTU_256; - break; - default: - pmtu = IB_MTU_2048; - } - } else - pmtu = attr->path_mtu; - } - - if (attr_mask & IB_QP_PATH_MIG_STATE) { - if (attr->path_mig_state == IB_MIG_REARM) { - if (qp->s_mig_state == IB_MIG_ARMED) - goto inval; - if (new_state != IB_QPS_RTS) - goto inval; - } else if (attr->path_mig_state == IB_MIG_MIGRATED) { - if (qp->s_mig_state == IB_MIG_REARM) - goto inval; - if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD) - goto inval; - if (qp->s_mig_state == IB_MIG_ARMED) - mig = 1; - } else - goto inval; - } - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC) - goto inval; + int enum_mtu; - switch (new_state) { - case IB_QPS_RESET: - if (qp->state != IB_QPS_RESET) { - qp->state = IB_QPS_RESET; - spin_lock(&dev->pending_lock); - if (!list_empty(&qp->iowait)) - list_del_init(&qp->iowait); - spin_unlock(&dev->pending_lock); - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - /* Stop the sending work queue and retry timer */ - cancel_work_sync(&qp->s_work); - del_timer_sync(&qp->s_timer); - wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); - if (qp->s_tx) { - qib_put_txreq(qp->s_tx); - qp->s_tx = NULL; - } - remove_qp(dev, qp); - wait_event(qp->wait, !atomic_read(&qp->refcount)); - spin_lock_irq(&qp->r_lock); - spin_lock(&qp->s_lock); - clear_mr_refs(qp, 1); - qib_reset_qp(qp, ibqp->qp_type); - } + switch (mtu) { + case 4096: + enum_mtu = IB_MTU_4096; break; - - case IB_QPS_RTR: - /* Allow event to retrigger if QP set to RTR more than once */ - qp->r_flags &= ~QIB_R_COMM_EST; - qp->state = new_state; + case 2048: + enum_mtu = IB_MTU_2048; break; - - case IB_QPS_SQD: - qp->s_draining = qp->s_last != qp->s_cur; - qp->state = new_state; + case 1024: + enum_mtu = IB_MTU_1024; break; - - case IB_QPS_SQE: - if (qp->ibqp.qp_type == IB_QPT_RC) - goto inval; - qp->state = new_state; + case 512: + enum_mtu = IB_MTU_512; break; - - case IB_QPS_ERR: - lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + case 256: + enum_mtu = IB_MTU_256; break; - default: - qp->state = new_state; - break; - } - - if (attr_mask & IB_QP_PKEY_INDEX) - qp->s_pkey_index = attr->pkey_index; - - if (attr_mask & IB_QP_PORT) - qp->port_num = attr->port_num; - - if (attr_mask & IB_QP_DEST_QPN) - qp->remote_qpn = attr->dest_qp_num; - - if (attr_mask & IB_QP_SQ_PSN) { - qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK; - qp->s_psn = qp->s_next_psn; - qp->s_sending_psn = qp->s_next_psn; - qp->s_last_psn = qp->s_next_psn - 1; - qp->s_sending_hpsn = qp->s_last_psn; - } - - if (attr_mask & IB_QP_RQ_PSN) - qp->r_psn = attr->rq_psn & QIB_PSN_MASK; - - if (attr_mask & IB_QP_ACCESS_FLAGS) - qp->qp_access_flags = attr->qp_access_flags; - - if (attr_mask & IB_QP_AV) { - qp->remote_ah_attr = attr->ah_attr; - qp->s_srate = attr->ah_attr.static_rate; - } - - if (attr_mask & IB_QP_ALT_PATH) { - qp->alt_ah_attr = attr->alt_ah_attr; - qp->s_alt_pkey_index = attr->alt_pkey_index; - } - - if (attr_mask & IB_QP_PATH_MIG_STATE) { - qp->s_mig_state = attr->path_mig_state; - if (mig) { - qp->remote_ah_attr = qp->alt_ah_attr; - qp->port_num = qp->alt_ah_attr.port_num; - qp->s_pkey_index = qp->s_alt_pkey_index; - } - } - - if (attr_mask & IB_QP_PATH_MTU) { - qp->path_mtu = pmtu; - qp->pmtu = ib_mtu_enum_to_int(pmtu); - } - - if (attr_mask & IB_QP_RETRY_CNT) { - qp->s_retry_cnt = attr->retry_cnt; - qp->s_retry = attr->retry_cnt; - } - - if (attr_mask & IB_QP_RNR_RETRY) { - qp->s_rnr_retry_cnt = attr->rnr_retry; - qp->s_rnr_retry = attr->rnr_retry; + enum_mtu = IB_MTU_2048; } - - if (attr_mask & IB_QP_MIN_RNR_TIMER) - qp->r_min_rnr_timer = attr->min_rnr_timer; - - if (attr_mask & IB_QP_TIMEOUT) { - qp->timeout = attr->timeout; - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); - } - - if (attr_mask & IB_QP_QKEY) - qp->qkey = attr->qkey; - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - qp->r_max_rd_atomic = attr->max_dest_rd_atomic; - - if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) - qp->s_max_rd_atomic = attr->max_rd_atomic; - - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) - insert_qp(dev, qp); - - if (lastwqe) { - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_QP_LAST_WQE_REACHED; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); - } - if (mig) { - ev.device = qp->ibqp.device; - ev.element.qp = &qp->ibqp; - ev.event = IB_EVENT_PATH_MIG; - qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); - } - ret = 0; - goto bail; - -inval: - spin_unlock(&qp->s_lock); - spin_unlock_irq(&qp->r_lock); - ret = -EINVAL; - -bail: - return ret; + return enum_mtu; } -int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_qp_init_attr *init_attr) +int qib_get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, + struct ib_qp_attr *attr) { - struct qib_qp *qp = to_iqp(ibqp); + int mtu, pmtu, pidx = qp->port_num - 1; + struct qib_ibdev *verbs_dev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = container_of(verbs_dev, struct qib_devdata, + verbs_dev); + mtu = ib_mtu_enum_to_int(attr->path_mtu); + if (mtu == -1) + return -EINVAL; + + if (mtu > dd->pport[pidx].ibmtu) + pmtu = mtu_to_enum(dd->pport[pidx].ibmtu); + else + pmtu = attr->path_mtu; + return pmtu; +} - attr->qp_state = qp->state; - attr->cur_qp_state = attr->qp_state; - attr->path_mtu = qp->path_mtu; - attr->path_mig_state = qp->s_mig_state; - attr->qkey = qp->qkey; - attr->rq_psn = qp->r_psn & QIB_PSN_MASK; - attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK; - attr->dest_qp_num = qp->remote_qpn; - attr->qp_access_flags = qp->qp_access_flags; - attr->cap.max_send_wr = qp->s_size - 1; - attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; - attr->cap.max_send_sge = qp->s_max_sge; - attr->cap.max_recv_sge = qp->r_rq.max_sge; - attr->cap.max_inline_data = 0; - attr->ah_attr = qp->remote_ah_attr; - attr->alt_ah_attr = qp->alt_ah_attr; - attr->pkey_index = qp->s_pkey_index; - attr->alt_pkey_index = qp->s_alt_pkey_index; - attr->en_sqd_async_notify = 0; - attr->sq_draining = qp->s_draining; - attr->max_rd_atomic = qp->s_max_rd_atomic; - attr->max_dest_rd_atomic = qp->r_max_rd_atomic; - attr->min_rnr_timer = qp->r_min_rnr_timer; - attr->port_num = qp->port_num; - attr->timeout = qp->timeout; - attr->retry_cnt = qp->s_retry_cnt; - attr->rnr_retry = qp->s_rnr_retry_cnt; - attr->alt_port_num = qp->alt_ah_attr.port_num; - attr->alt_timeout = qp->alt_timeout; +int qib_mtu_to_path_mtu(u32 mtu) +{ + return mtu_to_enum(mtu); +} - init_attr->event_handler = qp->ibqp.event_handler; - init_attr->qp_context = qp->ibqp.qp_context; - init_attr->send_cq = qp->ibqp.send_cq; - init_attr->recv_cq = qp->ibqp.recv_cq; - init_attr->srq = qp->ibqp.srq; - init_attr->cap = attr->cap; - if (qp->s_flags & QIB_S_SIGNAL_REQ_WR) - init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; - else - init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; - init_attr->qp_type = qp->ibqp.qp_type; - init_attr->port_num = qp->port_num; - return 0; +u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) +{ + return ib_mtu_enum_to_int(pmtu); } /** @@ -908,7 +324,7 @@ int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, * * Returns the AETH. */ -__be32 qib_compute_aeth(struct qib_qp *qp) +__be32 qib_compute_aeth(struct rvt_qp *qp) { u32 aeth = qp->r_msn & QIB_MSN_MASK; @@ -921,7 +337,7 @@ __be32 qib_compute_aeth(struct qib_qp *qp) } else { u32 min, max, x; u32 credits; - struct qib_rwq *wq = qp->r_rq.wq; + struct rvt_rwq *wq = qp->r_rq.wq; u32 head; u32 tail; @@ -962,315 +378,63 @@ __be32 qib_compute_aeth(struct qib_qp *qp) return cpu_to_be32(aeth); } -/** - * qib_create_qp - create a queue pair for a device - * @ibpd: the protection domain who's device we create the queue pair for - * @init_attr: the attributes of the queue pair - * @udata: user data for libibverbs.so - * - * Returns the queue pair on success, otherwise returns an errno. - * - * Called by the ib_create_qp() core verbs function. - */ -struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata) +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) { - struct qib_qp *qp; - int err; - struct qib_swqe *swq = NULL; - struct qib_ibdev *dev; - struct qib_devdata *dd; - size_t sz; - size_t sg_list_sz; - struct ib_qp *ret; - gfp_t gfp; + struct qib_qp_priv *priv; + priv = kzalloc(sizeof(*priv), gfp); + if (!priv) + return ERR_PTR(-ENOMEM); + priv->owner = qp; - if (init_attr->cap.max_send_sge > ib_qib_max_sges || - init_attr->cap.max_send_wr > ib_qib_max_qp_wrs || - init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) - return ERR_PTR(-EINVAL); - - /* GFP_NOIO is applicable in RC QPs only */ - if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && - init_attr->qp_type != IB_QPT_RC) - return ERR_PTR(-EINVAL); - - gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? - GFP_NOIO : GFP_KERNEL; - - /* Check receive queue parameters if no SRQ is specified. */ - if (!init_attr->srq) { - if (init_attr->cap.max_recv_sge > ib_qib_max_sges || - init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - if (init_attr->cap.max_send_sge + - init_attr->cap.max_send_wr + - init_attr->cap.max_recv_sge + - init_attr->cap.max_recv_wr == 0) { - ret = ERR_PTR(-EINVAL); - goto bail; - } + priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); + if (!priv->s_hdr) { + kfree(priv); + return ERR_PTR(-ENOMEM); } + init_waitqueue_head(&priv->wait_dma); + INIT_WORK(&priv->s_work, _qib_do_send); + INIT_LIST_HEAD(&priv->iowait); - switch (init_attr->qp_type) { - case IB_QPT_SMI: - case IB_QPT_GSI: - if (init_attr->port_num == 0 || - init_attr->port_num > ibpd->device->phys_port_cnt) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - case IB_QPT_UC: - case IB_QPT_RC: - case IB_QPT_UD: - sz = sizeof(struct qib_sge) * - init_attr->cap.max_send_sge + - sizeof(struct qib_swqe); - swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz, - gfp, PAGE_KERNEL); - if (swq == NULL) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - sz = sizeof(*qp); - sg_list_sz = 0; - if (init_attr->srq) { - struct qib_srq *srq = to_isrq(init_attr->srq); - - if (srq->rq.max_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (srq->rq.max_sge - 1); - } else if (init_attr->cap.max_recv_sge > 1) - sg_list_sz = sizeof(*qp->r_sg_list) * - (init_attr->cap.max_recv_sge - 1); - qp = kzalloc(sz + sg_list_sz, gfp); - if (!qp) { - ret = ERR_PTR(-ENOMEM); - goto bail_swq; - } - RCU_INIT_POINTER(qp->next, NULL); - qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp); - if (!qp->s_hdr) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; - } - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); - if (init_attr->srq) - sz = 0; - else { - qp->r_rq.size = init_attr->cap.max_recv_wr + 1; - qp->r_rq.max_sge = init_attr->cap.max_recv_sge; - sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + - sizeof(struct qib_rwqe); - if (gfp != GFP_NOIO) - qp->r_rq.wq = vmalloc_user( - sizeof(struct qib_rwq) + - qp->r_rq.size * sz); - else - qp->r_rq.wq = __vmalloc( - sizeof(struct qib_rwq) + - qp->r_rq.size * sz, - gfp, PAGE_KERNEL); - - if (!qp->r_rq.wq) { - ret = ERR_PTR(-ENOMEM); - goto bail_qp; - } - } - - /* - * ib_create_qp() will initialize qp->ibqp - * except for qp->ibqp.qp_num. - */ - spin_lock_init(&qp->r_lock); - spin_lock_init(&qp->s_lock); - spin_lock_init(&qp->r_rq.lock); - atomic_set(&qp->refcount, 0); - init_waitqueue_head(&qp->wait); - init_waitqueue_head(&qp->wait_dma); - init_timer(&qp->s_timer); - qp->s_timer.data = (unsigned long)qp; - INIT_WORK(&qp->s_work, qib_do_send); - INIT_LIST_HEAD(&qp->iowait); - INIT_LIST_HEAD(&qp->rspwait); - qp->state = IB_QPS_RESET; - qp->s_wq = swq; - qp->s_size = init_attr->cap.max_send_wr + 1; - qp->s_max_sge = init_attr->cap.max_send_sge; - if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) - qp->s_flags = QIB_S_SIGNAL_REQ_WR; - dev = to_idev(ibpd->device); - dd = dd_from_dev(dev); - err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type, - init_attr->port_num, gfp); - if (err < 0) { - ret = ERR_PTR(err); - vfree(qp->r_rq.wq); - goto bail_qp; - } - qp->ibqp.qp_num = err; - qp->port_num = init_attr->port_num; - qib_reset_qp(qp, init_attr->qp_type); - break; - - default: - /* Don't support raw QPs */ - ret = ERR_PTR(-ENOSYS); - goto bail; - } - - init_attr->cap.max_inline_data = 0; - - /* - * Return the address of the RWQ as the offset to mmap. - * See qib_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - if (!qp->r_rq.wq) { - __u64 offset = 0; - - err = ib_copy_to_udata(udata, &offset, - sizeof(offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } else { - u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz; - - qp->ip = qib_create_mmap_info(dev, s, - ibpd->uobject->context, - qp->r_rq.wq); - if (!qp->ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - err = ib_copy_to_udata(udata, &(qp->ip->offset), - sizeof(qp->ip->offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } - } - - spin_lock(&dev->n_qps_lock); - if (dev->n_qps_allocated == ib_qib_max_qps) { - spin_unlock(&dev->n_qps_lock); - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - dev->n_qps_allocated++; - spin_unlock(&dev->n_qps_lock); - - if (qp->ip) { - spin_lock_irq(&dev->pending_lock); - list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - } - - ret = &qp->ibqp; - goto bail; - -bail_ip: - if (qp->ip) - kref_put(&qp->ip->ref, qib_release_mmap_info); - else - vfree(qp->r_rq.wq); - free_qpn(&dev->qpn_table, qp->ibqp.qp_num); -bail_qp: - kfree(qp->s_hdr); - kfree(qp); -bail_swq: - vfree(swq); -bail: - return ret; + return priv; } -/** - * qib_destroy_qp - destroy a queue pair - * @ibqp: the queue pair to destroy - * - * Returns 0 on success. - * - * Note that this can be called while the QP is actively sending or - * receiving! - */ -int qib_destroy_qp(struct ib_qp *ibqp) +void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) { - struct qib_qp *qp = to_iqp(ibqp); - struct qib_ibdev *dev = to_idev(ibqp->device); + struct qib_qp_priv *priv = qp->priv; - /* Make sure HW and driver activity is stopped. */ - spin_lock_irq(&qp->s_lock); - if (qp->state != IB_QPS_RESET) { - qp->state = IB_QPS_RESET; - spin_lock(&dev->pending_lock); - if (!list_empty(&qp->iowait)) - list_del_init(&qp->iowait); - spin_unlock(&dev->pending_lock); - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT); - spin_unlock_irq(&qp->s_lock); - cancel_work_sync(&qp->s_work); - del_timer_sync(&qp->s_timer); - wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); - if (qp->s_tx) { - qib_put_txreq(qp->s_tx); - qp->s_tx = NULL; - } - remove_qp(dev, qp); - wait_event(qp->wait, !atomic_read(&qp->refcount)); - clear_mr_refs(qp, 1); - } else - spin_unlock_irq(&qp->s_lock); + kfree(priv->s_hdr); + kfree(priv); +} - /* all user's cleaned up, mark it available */ - free_qpn(&dev->qpn_table, qp->ibqp.qp_num); - spin_lock(&dev->n_qps_lock); - dev->n_qps_allocated--; - spin_unlock(&dev->n_qps_lock); +void qib_stop_send_queue(struct rvt_qp *qp) +{ + struct qib_qp_priv *priv = qp->priv; - if (qp->ip) - kref_put(&qp->ip->ref, qib_release_mmap_info); - else - vfree(qp->r_rq.wq); - vfree(qp->s_wq); - kfree(qp->s_hdr); - kfree(qp); - return 0; + cancel_work_sync(&priv->s_work); + del_timer_sync(&qp->s_timer); } -/** - * qib_init_qpn_table - initialize the QP number table for a device - * @qpt: the QPN table - */ -void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt) +void qib_quiesce_qp(struct rvt_qp *qp) { - spin_lock_init(&qpt->lock); - qpt->last = 1; /* start with QPN 2 */ - qpt->nmaps = 1; - qpt->mask = dd->qpn_mask; + struct qib_qp_priv *priv = qp->priv; + + wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy)); + if (priv->s_tx) { + qib_put_txreq(priv->s_tx); + priv->s_tx = NULL; + } } -/** - * qib_free_qpn_table - free the QP number table for a device - * @qpt: the QPN table - */ -void qib_free_qpn_table(struct qib_qpn_table *qpt) +void qib_flush_qp_waiters(struct rvt_qp *qp) { - int i; + struct qib_qp_priv *priv = qp->priv; + struct qib_ibdev *dev = to_idev(qp->ibqp.device); - for (i = 0; i < ARRAY_SIZE(qpt->map); i++) - if (qpt->map[i].page) - free_page((unsigned long) qpt->map[i].page); + spin_lock(&dev->rdi.pending_lock); + if (!list_empty(&priv->iowait)) + list_del_init(&priv->iowait); + spin_unlock(&dev->rdi.pending_lock); } /** @@ -1280,7 +444,7 @@ void qib_free_qpn_table(struct qib_qpn_table *qpt) * * The QP s_lock should be held. */ -void qib_get_credit(struct qib_qp *qp, u32 aeth) +void qib_get_credit(struct rvt_qp *qp, u32 aeth) { u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK; @@ -1290,31 +454,70 @@ void qib_get_credit(struct qib_qp *qp, u32 aeth) * honor the credit field. */ if (credit == QIB_AETH_CREDIT_INVAL) { - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { - qp->s_flags |= QIB_S_UNLIMITED_CREDIT; - if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { - qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { + qp->s_flags |= RVT_S_UNLIMITED_CREDIT; + if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { + qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; qib_schedule_send(qp); } } - } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) { + } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) { /* Compute new LSN (i.e., MSN + credit) */ credit = (aeth + credit_table[credit]) & QIB_MSN_MASK; if (qib_cmp24(credit, qp->s_lsn) > 0) { qp->s_lsn = credit; - if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) { - qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT; + if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) { + qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT; qib_schedule_send(qp); } } } } +/** + * qib_check_send_wqe - validate wr/wqe + * @qp - The qp + * @wqe - The built wqe + * + * validate wr/wqe. This is called + * prior to inserting the wqe into + * the ring but after the wqe has been + * setup. + * + * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure + */ +int qib_check_send_wqe(struct rvt_qp *qp, + struct rvt_swqe *wqe) +{ + struct rvt_ah *ah; + int ret = 0; + + switch (qp->ibqp.qp_type) { + case IB_QPT_RC: + case IB_QPT_UC: + if (wqe->length > 0x80000000U) + return -EINVAL; + break; + case IB_QPT_SMI: + case IB_QPT_GSI: + case IB_QPT_UD: + ah = ibah_to_rvtah(wqe->ud_wr.ah); + if (wqe->length > (1 << ah->log_pmtu)) + return -EINVAL; + /* progress hint */ + ret = 1; + break; + default: + break; + } + return ret; +} + #ifdef CONFIG_DEBUG_FS struct qib_qp_iter { struct qib_ibdev *dev; - struct qib_qp *qp; + struct rvt_qp *qp; int n; }; @@ -1340,14 +543,14 @@ int qib_qp_iter_next(struct qib_qp_iter *iter) struct qib_ibdev *dev = iter->dev; int n = iter->n; int ret = 1; - struct qib_qp *pqp = iter->qp; - struct qib_qp *qp; + struct rvt_qp *pqp = iter->qp; + struct rvt_qp *qp; - for (; n < dev->qp_table_size; n++) { + for (; n < dev->rdi.qp_dev->qp_table_size; n++) { if (pqp) qp = rcu_dereference(pqp->next); else - qp = rcu_dereference(dev->qp_table[n]); + qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); pqp = qp; if (qp) { iter->qp = qp; @@ -1364,10 +567,11 @@ static const char * const qp_type_str[] = { void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) { - struct qib_swqe *wqe; - struct qib_qp *qp = iter->qp; + struct rvt_swqe *wqe; + struct rvt_qp *qp = iter->qp; + struct qib_qp_priv *priv = qp->priv; - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); seq_printf(s, "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n", iter->n, @@ -1377,8 +581,8 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter) wqe->wr.opcode, qp->s_hdrwords, qp->s_flags, - atomic_read(&qp->s_dma_busy), - !list_empty(&qp->iowait), + atomic_read(&priv->s_dma_busy), + !list_empty(&priv->iowait), qp->timeout, wqe->ssn, qp->s_lsn, diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index e6b7556d5..9088e26d3 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -40,7 +40,7 @@ static void rc_timeout(unsigned long arg); -static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, +static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 psn, u32 pmtu) { u32 len; @@ -54,9 +54,9 @@ static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, return wqe->length - len; } -static void start_timer(struct qib_qp *qp) +static void start_timer(struct rvt_qp *qp) { - qp->s_flags |= QIB_S_TIMER; + qp->s_flags |= RVT_S_TIMER; qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ qp->s_timer.expires = jiffies + qp->timeout_jiffies; @@ -74,17 +74,17 @@ static void start_timer(struct qib_qp *qp) * Note that we are in the responder's side of the QP context. * Note the QP s_lock must be held. */ -static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, +static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, struct qib_other_headers *ohdr, u32 pmtu) { - struct qib_ack_entry *e; + struct rvt_ack_entry *e; u32 hwords; u32 len; u32 bth0; u32 bth2; /* Don't send an ACK if we aren't supposed to. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto bail; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ @@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, case OP(RDMA_READ_RESPONSE_ONLY): e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } /* FALLTHROUGH */ @@ -112,7 +112,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { - if (qp->s_flags & QIB_S_ACK_PENDING) + if (qp->s_flags & RVT_S_ACK_PENDING) goto normal; goto bail; } @@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, /* Copy SGE state in case we need to resend */ qp->s_rdma_mr = e->rdma_sge.mr; if (qp->s_rdma_mr) - qib_get_mr(qp->s_rdma_mr); + rvt_get_mr(qp->s_rdma_mr); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_cur_sge = &qp->s_ack_rdma_sge; @@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; if (qp->s_rdma_mr) - qib_get_mr(qp->s_rdma_mr); + rvt_get_mr(qp->s_rdma_mr); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) len = pmtu; @@ -196,7 +196,7 @@ normal: * (see above). */ qp->s_ack_state = OP(SEND_ONLY); - qp->s_flags &= ~QIB_S_ACK_PENDING; + qp->s_flags &= ~RVT_S_ACK_PENDING; qp->s_cur_sge = NULL; if (qp->s_nak_state) ohdr->u.aeth = @@ -218,7 +218,7 @@ normal: bail: qp->s_ack_state = OP(ACKNOWLEDGE); - qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); + qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING); return 0; } @@ -226,63 +226,60 @@ bail: * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * @qp: a pointer to the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ -int qib_make_rc_req(struct qib_qp *qp) +int qib_make_rc_req(struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_other_headers *ohdr; - struct qib_sge_state *ss; - struct qib_swqe *wqe; + struct rvt_sge_state *ss; + struct rvt_swqe *wqe; u32 hwords; u32 len; u32 bth0; u32 bth2; u32 pmtu = qp->pmtu; char newreq; - unsigned long flags; int ret = 0; int delta; - ohdr = &qp->s_hdr->u.oth; + ohdr = &priv->s_hdr->u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &qp->s_hdr->u.l.oth; - - /* - * The lock is needed to synchronize between the sending tasklet, - * the receive interrupt handler, and timeout resends. - */ - spin_lock_irqsave(&qp->s_lock, flags); + ohdr = &priv->s_hdr->u.l.oth; /* Sending responses has higher priority over sending requests. */ - if ((qp->s_flags & QIB_S_RESP_PENDING) && + if ((qp->s_flags & RVT_S_RESP_PENDING) && qib_make_rc_ack(dev, qp, ohdr, pmtu)) goto done; - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { - if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_dma_busy)) { - qp->s_flags |= QIB_S_WAIT_DMA; + if (atomic_read(&priv->s_dma_busy)) { + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); /* will get called again */ goto done; } - if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) + if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK)) goto bail; if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { - qp->s_flags |= QIB_S_WAIT_PSN; + qp->s_flags |= RVT_S_WAIT_PSN; goto bail; } qp->s_sending_psn = qp->s_psn; @@ -294,10 +291,10 @@ int qib_make_rc_req(struct qib_qp *qp) bth0 = 0; /* Send a request. */ - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); switch (qp->s_state) { default: - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* * Resend an old request or start a new one. @@ -317,11 +314,11 @@ int qib_make_rc_req(struct qib_qp *qp) */ if ((wqe->wr.send_flags & IB_SEND_FENCE) && qp->s_num_rd_atomic) { - qp->s_flags |= QIB_S_WAIT_FENCE; + qp->s_flags |= RVT_S_WAIT_FENCE; goto bail; } - wqe->psn = qp->s_next_psn; newreq = 1; + qp->s_psn = wqe->psn; } /* * Note that we have to be careful not to modify the @@ -335,14 +332,12 @@ int qib_make_rc_req(struct qib_qp *qp) case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(SEND_FIRST); len = pmtu; break; @@ -363,14 +358,14 @@ int qib_make_rc_req(struct qib_qp *qp) break; case IB_WR_RDMA_WRITE: - if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) && qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { - qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; + qp->s_flags |= RVT_S_WAIT_SSN_CREDIT; goto bail; } @@ -380,9 +375,7 @@ int qib_make_rc_req(struct qib_qp *qp) cpu_to_be32(wqe->rdma_wr.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / sizeof(u32); - wqe->lpsn = wqe->psn; if (len > pmtu) { - wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; @@ -411,19 +404,12 @@ int qib_make_rc_req(struct qib_qp *qp) if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { - qp->s_flags |= QIB_S_WAIT_RDMAR; + qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - /* - * Adjust s_next_psn to count the - * expected number of responses. - */ - if (len > pmtu) - qp->s_next_psn += (len - 1) / pmtu; - wqe->lpsn = qp->s_next_psn++; } ohdr->u.rc.reth.vaddr = @@ -449,13 +435,12 @@ int qib_make_rc_req(struct qib_qp *qp) if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { - qp->s_flags |= QIB_S_WAIT_RDMAR; + qp->s_flags |= RVT_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; - if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) + if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) qp->s_lsn++; - wqe->lpsn = wqe->psn; } if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { qp->s_state = OP(COMPARE_SWAP); @@ -498,11 +483,8 @@ int qib_make_rc_req(struct qib_qp *qp) } if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_psn = wqe->lpsn + 1; - else { + else qp->s_psn++; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; - } break; case OP(RDMA_READ_RESPONSE_FIRST): @@ -522,8 +504,6 @@ int qib_make_rc_req(struct qib_qp *qp) /* FALLTHROUGH */ case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -563,8 +543,6 @@ int qib_make_rc_req(struct qib_qp *qp) /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; - if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) - qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { @@ -618,9 +596,9 @@ int qib_make_rc_req(struct qib_qp *qp) delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; if (delta && delta % QIB_PSN_CREDIT == 0) bth2 |= IB_BTH_REQ_ACK; - if (qp->s_flags & QIB_S_SEND_ONE) { - qp->s_flags &= ~QIB_S_SEND_ONE; - qp->s_flags |= QIB_S_WAIT_ACK; + if (qp->s_flags & RVT_S_SEND_ONE) { + qp->s_flags &= ~RVT_S_SEND_ONE; + qp->s_flags |= RVT_S_WAIT_ACK; bth2 |= IB_BTH_REQ_ACK; } qp->s_len -= len; @@ -629,13 +607,9 @@ int qib_make_rc_req(struct qib_qp *qp) qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); done: - ret = 1; - goto unlock; - + return 1; bail: - qp->s_flags &= ~QIB_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); + qp->s_flags &= ~RVT_S_BUSY; return ret; } @@ -647,7 +621,7 @@ unlock: * Note that RDMA reads and atomics are handled in the * send side QP state and tasklet. */ -void qib_send_rc_ack(struct qib_qp *qp) +void qib_send_rc_ack(struct rvt_qp *qp) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); @@ -665,11 +639,11 @@ void qib_send_rc_ack(struct qib_qp *qp) spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto unlock; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ - if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) + if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt) goto queue_ack; /* Construct the header with s_lock held so APM doesn't change it. */ @@ -758,9 +732,9 @@ void qib_send_rc_ack(struct qib_qp *qp) goto done; queue_ack: - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { - ibp->n_rc_qacks++; - qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { + this_cpu_inc(*ibp->rvp.rc_qacks); + qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; @@ -782,10 +756,10 @@ done: * for the given QP. * Called at interrupt level with the QP s_lock held. */ -static void reset_psn(struct qib_qp *qp, u32 psn) +static void reset_psn(struct rvt_qp *qp, u32 psn) { u32 n = qp->s_acked; - struct qib_swqe *wqe = get_swqe_ptr(qp, n); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n); u32 opcode; qp->s_cur = n; @@ -808,7 +782,7 @@ static void reset_psn(struct qib_qp *qp, u32 psn) n = 0; if (n == qp->s_tail) break; - wqe = get_swqe_ptr(qp, n); + wqe = rvt_get_swqe_ptr(qp, n); diff = qib_cmp24(psn, wqe->psn); if (diff < 0) break; @@ -854,22 +828,22 @@ static void reset_psn(struct qib_qp *qp, u32 psn) done: qp->s_psn = psn; /* - * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer + * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer * asynchronously before the send tasklet can get scheduled. * Doing it in qib_make_rc_req() is too late. */ if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) - qp->s_flags |= QIB_S_WAIT_PSN; + qp->s_flags |= RVT_S_WAIT_PSN; } /* * Back up requester to resend the last un-ACKed request. * The QP r_lock and s_lock should be held and interrupts disabled. */ -static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) +static void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) { - struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); + struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); struct qib_ibport *ibp; if (qp->s_retry == 0) { @@ -878,7 +852,7 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) qp->s_retry = qp->s_retry_cnt; } else if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); - qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); return; } else /* XXX need to handle delayed completion */ return; @@ -887,15 +861,15 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_RDMA_READ) - ibp->n_rc_resends++; + ibp->rvp.n_rc_resends++; else - ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; + ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; - qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | - QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | - QIB_S_WAIT_ACK); + qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | + RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN | + RVT_S_WAIT_ACK); if (wait) - qp->s_flags |= QIB_S_SEND_ONE; + qp->s_flags |= RVT_S_SEND_ONE; reset_psn(qp, psn); } @@ -904,16 +878,16 @@ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) */ static void rc_timeout(unsigned long arg) { - struct qib_qp *qp = (struct qib_qp *)arg; + struct rvt_qp *qp = (struct rvt_qp *)arg; struct qib_ibport *ibp; unsigned long flags; spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); - if (qp->s_flags & QIB_S_TIMER) { + if (qp->s_flags & RVT_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); - ibp->n_rc_timeouts++; - qp->s_flags &= ~QIB_S_TIMER; + ibp->rvp.n_rc_timeouts++; + qp->s_flags &= ~RVT_S_TIMER; del_timer(&qp->s_timer); qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_schedule_send(qp); @@ -927,12 +901,12 @@ static void rc_timeout(unsigned long arg) */ void qib_rc_rnr_retry(unsigned long arg) { - struct qib_qp *qp = (struct qib_qp *)arg; + struct rvt_qp *qp = (struct rvt_qp *)arg; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & QIB_S_WAIT_RNR) { - qp->s_flags &= ~QIB_S_WAIT_RNR; + if (qp->s_flags & RVT_S_WAIT_RNR) { + qp->s_flags &= ~RVT_S_WAIT_RNR; del_timer(&qp->s_timer); qib_schedule_send(qp); } @@ -943,14 +917,14 @@ void qib_rc_rnr_retry(unsigned long arg) * Set qp->s_sending_psn to the next PSN after the given one. * This would be psn+1 except when RDMA reads are present. */ -static void reset_sending_psn(struct qib_qp *qp, u32 psn) +static void reset_sending_psn(struct rvt_qp *qp, u32 psn) { - struct qib_swqe *wqe; + struct rvt_swqe *wqe; u32 n = qp->s_last; /* Find the work request corresponding to the given PSN. */ for (;;) { - wqe = get_swqe_ptr(qp, n); + wqe = rvt_get_swqe_ptr(qp, n); if (qib_cmp24(psn, wqe->lpsn) <= 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_sending_psn = wqe->lpsn + 1; @@ -968,16 +942,16 @@ static void reset_sending_psn(struct qib_qp *qp, u32 psn) /* * This should be called with the QP s_lock held and interrupts disabled. */ -void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) +void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr) { struct qib_other_headers *ohdr; - struct qib_swqe *wqe; + struct rvt_swqe *wqe; struct ib_wc wc; unsigned i; u32 opcode; u32 psn; - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; /* Find out where the BTH is */ @@ -1002,22 +976,30 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) * there are still requests that haven't been acked. */ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && - !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && - (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) && + (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) start_timer(qp); while (qp->s_last != qp->s_acked) { - wqe = get_swqe_ptr(qp, qp->s_last); + u32 s_last; + + wqe = rvt_get_swqe_ptr(qp, qp->s_last); if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { - struct qib_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); } /* Post a send completion queue entry if requested. */ - if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; @@ -1025,25 +1007,23 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; - qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } /* * If we were waiting for sends to complete before resending, * and they are now complete, restart sending. */ - if (qp->s_flags & QIB_S_WAIT_PSN && + if (qp->s_flags & RVT_S_WAIT_PSN && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { - qp->s_flags &= ~QIB_S_WAIT_PSN; + qp->s_flags &= ~RVT_S_WAIT_PSN; qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; qib_schedule_send(qp); } } -static inline void update_last_psn(struct qib_qp *qp, u32 psn) +static inline void update_last_psn(struct rvt_qp *qp, u32 psn) { qp->s_last_psn = psn; } @@ -1053,8 +1033,8 @@ static inline void update_last_psn(struct qib_qp *qp, u32 psn) * This is similar to qib_send_complete but has to check to be sure * that the SGEs are not being referenced if the SWQE is being resent. */ -static struct qib_swqe *do_rc_completion(struct qib_qp *qp, - struct qib_swqe *wqe, +static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, + struct rvt_swqe *wqe, struct qib_ibport *ibp) { struct ib_wc wc; @@ -1067,13 +1047,21 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, */ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { + u32 s_last; + for (i = 0; i < wqe->wr.num_sge; i++) { - struct qib_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); } + s_last = qp->s_last; + if (++s_last >= qp->s_size) + s_last = 0; + qp->s_last = s_last; + /* see post_send() */ + barrier(); /* Post a send completion queue entry if requested. */ - if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; @@ -1081,12 +1069,10 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; - qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, 0); } - if (++qp->s_last >= qp->s_size) - qp->s_last = 0; } else - ibp->n_rc_delayed_comp++; + this_cpu_inc(*ibp->rvp.rc_delayed_comp); qp->s_retry = qp->s_retry_cnt; update_last_psn(qp, wqe->lpsn); @@ -1100,7 +1086,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; qp->s_acked = qp->s_cur; - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); if (qp->s_acked != qp->s_tail) { qp->s_state = OP(SEND_LAST); qp->s_psn = wqe->psn; @@ -1110,7 +1096,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, qp->s_acked = 0; if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) qp->s_draining = 0; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); } return wqe; } @@ -1126,19 +1112,19 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, * Called at interrupt level with the QP s_lock held. * Returns 1 if OK, 0 if current operation should be aborted (NAK). */ -static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, +static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) { struct qib_ibport *ibp; enum ib_wc_status status; - struct qib_swqe *wqe; + struct rvt_swqe *wqe; int ret = 0; u32 ack_psn; int diff; /* Remove QP from retry timer */ - if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } @@ -1151,7 +1137,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, ack_psn = psn; if (aeth >> 29) ack_psn--; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); ibp = to_iport(qp->ibqp.device, qp->port_num); /* @@ -1186,11 +1172,11 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { /* Retry this request. */ - if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { - qp->r_flags |= QIB_R_RDMAR_SEQ; + if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) { + qp->r_flags |= RVT_R_RDMAR_SEQ; qib_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_SEND; + qp->r_flags |= RVT_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); @@ -1213,14 +1199,14 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { qp->s_num_rd_atomic--; /* Restart sending task if fence is complete */ - if ((qp->s_flags & QIB_S_WAIT_FENCE) && + if ((qp->s_flags & RVT_S_WAIT_FENCE) && !qp->s_num_rd_atomic) { - qp->s_flags &= ~(QIB_S_WAIT_FENCE | - QIB_S_WAIT_ACK); + qp->s_flags &= ~(RVT_S_WAIT_FENCE | + RVT_S_WAIT_ACK); qib_schedule_send(qp); - } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { - qp->s_flags &= ~(QIB_S_WAIT_RDMAR | - QIB_S_WAIT_ACK); + } else if (qp->s_flags & RVT_S_WAIT_RDMAR) { + qp->s_flags &= ~(RVT_S_WAIT_RDMAR | + RVT_S_WAIT_ACK); qib_schedule_send(qp); } } @@ -1231,7 +1217,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, switch (aeth >> 29) { case 0: /* ACK */ - ibp->n_rc_acks++; + this_cpu_inc(*ibp->rvp.rc_acks); if (qp->s_acked != qp->s_tail) { /* * We are expecting more ACKs so @@ -1248,8 +1234,8 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, qp->s_state = OP(SEND_LAST); qp->s_psn = psn + 1; } - if (qp->s_flags & QIB_S_WAIT_ACK) { - qp->s_flags &= ~QIB_S_WAIT_ACK; + if (qp->s_flags & RVT_S_WAIT_ACK) { + qp->s_flags &= ~RVT_S_WAIT_ACK; qib_schedule_send(qp); } qib_get_credit(qp, aeth); @@ -1260,10 +1246,10 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, goto bail; case 1: /* RNR NAK */ - ibp->n_rnr_naks++; + ibp->rvp.n_rnr_naks++; if (qp->s_acked == qp->s_tail) goto bail; - if (qp->s_flags & QIB_S_WAIT_RNR) + if (qp->s_flags & RVT_S_WAIT_RNR) goto bail; if (qp->s_rnr_retry == 0) { status = IB_WC_RNR_RETRY_EXC_ERR; @@ -1275,12 +1261,12 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); - ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; + ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; reset_psn(qp, psn); - qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); - qp->s_flags |= QIB_S_WAIT_RNR; + qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK); + qp->s_flags |= RVT_S_WAIT_RNR; qp->s_timer.function = qib_rc_rnr_retry; qp->s_timer.expires = jiffies + usecs_to_jiffies( ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & @@ -1296,7 +1282,7 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ - ibp->n_seq_naks++; + ibp->rvp.n_seq_naks++; /* * Back up to the responder's expected PSN. * Note that we might get a NAK in the middle of an @@ -1309,21 +1295,21 @@ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, case 1: /* Invalid Request */ status = IB_WC_REM_INV_REQ_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; goto class_b; case 2: /* Remote Access Error */ status = IB_WC_REM_ACCESS_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; goto class_b; case 3: /* Remote Operation Error */ status = IB_WC_REM_OP_ERR; - ibp->n_other_naks++; + ibp->rvp.n_other_naks++; class_b: if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, status); - qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } break; @@ -1349,18 +1335,18 @@ bail: * We have seen an out of sequence RDMA read middle or last packet. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. */ -static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, +static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) { - struct qib_swqe *wqe; + struct rvt_swqe *wqe; /* Remove QP from retry timer */ - if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { - qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); + if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) { + qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR); del_timer(&qp->s_timer); } - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); while (qib_cmp24(psn, wqe->lpsn) > 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ || @@ -1370,11 +1356,11 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, wqe = do_rc_completion(qp, wqe, ibp); } - ibp->n_rdma_seq++; - qp->r_flags |= QIB_R_RDMAR_SEQ; + ibp->rvp.n_rdma_seq++; + qp->r_flags |= RVT_R_RDMAR_SEQ; qib_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_SEND; + qp->r_flags |= RVT_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -1399,12 +1385,12 @@ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, static void qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, - struct qib_qp *qp, + struct rvt_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) { - struct qib_swqe *wqe; + struct rvt_swqe *wqe; struct qib_pportdata *ppd = ppd_from_ibp(ibp); enum ib_wc_status status; unsigned long flags; @@ -1425,7 +1411,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, * If send tasklet not running attempt to progress * SDMA queue. */ - if (!(qp->s_flags & QIB_S_BUSY)) { + if (!(qp->s_flags & RVT_S_BUSY)) { /* Acquire SDMA Lock */ spin_lock_irqsave(&ppd->sdma_lock, flags); /* Invoke sdma make progress */ @@ -1437,11 +1423,12 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, } spin_lock_irqsave(&qp->s_lock, flags); - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) goto ack_done; /* Ignore invalid responses. */ - if (qib_cmp24(psn, qp->s_next_psn) >= 0) + smp_read_barrier_depends(); /* see post_one_send */ + if (qib_cmp24(psn, ACCESS_ONCE(qp->s_next_psn)) >= 0) goto ack_done; /* Ignore duplicate responses. */ @@ -1460,15 +1447,15 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, * Skip everything other than the PSN we expect, if we are waiting * for a reply to a restarted RDMA read or atomic op. */ - if (qp->r_flags & QIB_R_RDMAR_SEQ) { + if (qp->r_flags & RVT_R_RDMAR_SEQ) { if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) goto ack_done; - qp->r_flags &= ~QIB_R_RDMAR_SEQ; + qp->r_flags &= ~RVT_R_RDMAR_SEQ; } if (unlikely(qp->s_acked == qp->s_tail)) goto ack_done; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); status = IB_WC_SUCCESS; switch (opcode) { @@ -1487,7 +1474,7 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, opcode != OP(RDMA_READ_RESPONSE_FIRST)) goto ack_done; hdrsize += 4; - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* @@ -1515,10 +1502,10 @@ read_middle: * We got a response so update the timeout. * 4.096 usec. * (1 << qp->timeout) */ - qp->s_flags |= QIB_S_TIMER; + qp->s_flags |= RVT_S_TIMER; mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); - if (qp->s_flags & QIB_S_WAIT_ACK) { - qp->s_flags &= ~QIB_S_WAIT_ACK; + if (qp->s_flags & RVT_S_WAIT_ACK) { + qp->s_flags &= ~RVT_S_WAIT_ACK; qib_schedule_send(qp); } @@ -1553,7 +1540,7 @@ read_middle: * have to be careful to copy the data to the right * location. */ - wqe = get_swqe_ptr(qp, qp->s_acked); + wqe = rvt_get_swqe_ptr(qp, qp->s_acked); qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_last; @@ -1598,7 +1585,7 @@ ack_len_err: ack_err: if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, status); - qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); + rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR); } ack_done: spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1623,14 +1610,14 @@ bail: */ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, - struct qib_qp *qp, + struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); - struct qib_ack_entry *e; + struct rvt_ack_entry *e; unsigned long flags; u8 i, prev; int old_req; @@ -1642,7 +1629,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * Don't queue the NAK if we already sent one. */ if (!qp->r_nak_state) { - ibp->n_rc_seqnak++; + ibp->rvp.n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; @@ -1652,7 +1639,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * Otherwise, we end up propagating congestion. */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -1678,7 +1665,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, */ e = NULL; old_req = 1; - ibp->n_rc_dupreq++; + ibp->rvp.n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); @@ -1732,7 +1719,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; if (e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } if (len != 0) { @@ -1740,7 +1727,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, u64 vaddr = be64_to_cpu(reth->vaddr); int ok; - ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, + ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto unlock_done; @@ -1791,7 +1778,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, * which doesn't accept a RDMA read response or atomic * response as an ACK for earlier SENDs or RDMA writes. */ - if (!(qp->s_flags & QIB_S_RESP_PENDING)) { + if (!(qp->s_flags & RVT_S_RESP_PENDING)) { spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; @@ -1805,7 +1792,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, break; } qp->s_ack_state = OP(ACKNOWLEDGE); - qp->s_flags |= QIB_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qp->r_nak_state = 0; qib_schedule_send(qp); @@ -1818,13 +1805,13 @@ send_ack: return 0; } -void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) +void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err) { unsigned long flags; int lastwqe; spin_lock_irqsave(&qp->s_lock, flags); - lastwqe = qib_error_qp(qp, err); + lastwqe = rvt_error_qp(qp, err); spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { @@ -1837,7 +1824,7 @@ void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) } } -static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) +static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n) { unsigned next; @@ -1862,7 +1849,7 @@ static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) * Called at interrupt level. */ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp) + int has_grh, void *data, u32 tlen, struct rvt_qp *qp) { struct qib_ibport *ibp = &rcd->ppd->ibport_data; struct qib_other_headers *ohdr; @@ -1948,8 +1935,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, break; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { - qp->r_flags |= QIB_R_COMM_EST; + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { + qp->r_flags |= RVT_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; @@ -2026,9 +2013,9 @@ send_last: if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; qib_copy_sge(&qp->r_sge, data, tlen, 1); - qib_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); qp->r_msn++; - if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) break; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -2047,7 +2034,7 @@ send_last: wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); break; @@ -2069,7 +2056,7 @@ send_last: int ok; /* Check rkey & NAK */ - ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, + ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto nack_acc; @@ -2096,7 +2083,7 @@ send_last: goto send_last; case OP(RDMA_READ_REQUEST): { - struct qib_ack_entry *e; + struct rvt_ack_entry *e; u32 len; u8 next; @@ -2114,7 +2101,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } reth = &ohdr->u.rc.reth; @@ -2125,7 +2112,7 @@ send_last: int ok; /* Check rkey & NAK */ - ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, + ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto nack_acc_unlck; @@ -2157,7 +2144,7 @@ send_last: qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ - qp->s_flags |= QIB_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qib_schedule_send(qp); goto sunlock; @@ -2166,7 +2153,7 @@ send_last: case OP(COMPARE_SWAP): case OP(FETCH_ADD): { struct ib_atomic_eth *ateth; - struct qib_ack_entry *e; + struct rvt_ack_entry *e; u64 vaddr; atomic64_t *maddr; u64 sdata; @@ -2186,7 +2173,7 @@ send_last: } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { - qib_put_mr(e->rdma_sge.mr); + rvt_put_mr(e->rdma_sge.mr); e->rdma_sge.mr = NULL; } ateth = &ohdr->u.atomic_eth; @@ -2196,7 +2183,7 @@ send_last: goto nack_inv_unlck; rkey = be32_to_cpu(ateth->rkey); /* Check rkey & NAK */ - if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), vaddr, rkey, IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; @@ -2208,7 +2195,7 @@ send_last: (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); - qib_put_mr(qp->r_sge.sge.mr); + rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; e->opcode = opcode; e->sent = 0; @@ -2221,7 +2208,7 @@ send_last: qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ - qp->s_flags |= QIB_S_RESP_PENDING; + qp->s_flags |= RVT_S_RESP_PENDING; qib_schedule_send(qp); goto sunlock; @@ -2245,7 +2232,7 @@ rnr_nak: qp->r_ack_psn = qp->r_psn; /* Queue RNR NAK for later */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -2257,7 +2244,7 @@ nack_op_err: qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } @@ -2271,7 +2258,7 @@ nack_inv: qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ if (list_empty(&qp->rspwait)) { - qp->r_flags |= QIB_R_RSP_NAK; + qp->r_flags |= RVT_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index b1aa21bdd..a5f07a64b 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -79,16 +79,16 @@ const u32 ib_qib_rnr_table[32] = { * Validate a RWQE and fill in the SGE state. * Return 1 if OK. */ -static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) +static int qib_init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) { int i, j, ret; struct ib_wc wc; - struct qib_lkey_table *rkt; - struct qib_pd *pd; - struct qib_sge_state *ss; + struct rvt_lkey_table *rkt; + struct rvt_pd *pd; + struct rvt_sge_state *ss; - rkt = &to_idev(qp->ibqp.device)->lk_table; - pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); + rkt = &to_idev(qp->ibqp.device)->rdi.lkey_table; + pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); ss = &qp->r_sge; ss->sg_list = qp->r_sg_list; qp->r_len = 0; @@ -96,7 +96,7 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) if (wqe->sg_list[i].length == 0) continue; /* Check LKEY */ - if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, + if (!rvt_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge, &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) goto bad_lkey; qp->r_len += wqe->sg_list[i].length; @@ -109,9 +109,9 @@ static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) bad_lkey: while (j) { - struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; + struct rvt_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); } ss->num_sge = 0; memset(&wc, 0, sizeof(wc)); @@ -120,7 +120,7 @@ bad_lkey: wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; /* Signal solicited completion event. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1); ret = 0; bail: return ret; @@ -136,19 +136,19 @@ bail: * * Can be called from interrupt level. */ -int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) +int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only) { unsigned long flags; - struct qib_rq *rq; - struct qib_rwq *wq; - struct qib_srq *srq; - struct qib_rwqe *wqe; + struct rvt_rq *rq; + struct rvt_rwq *wq; + struct rvt_srq *srq; + struct rvt_rwqe *wqe; void (*handler)(struct ib_event *, void *); u32 tail; int ret; if (qp->ibqp.srq) { - srq = to_isrq(qp->ibqp.srq); + srq = ibsrq_to_rvtsrq(qp->ibqp.srq); handler = srq->ibsrq.event_handler; rq = &srq->rq; } else { @@ -158,7 +158,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) } spin_lock_irqsave(&rq->lock, flags); - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { ret = 0; goto unlock; } @@ -174,7 +174,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) } /* Make sure entry is read after head index is read. */ smp_rmb(); - wqe = get_rwqe_ptr(rq, tail); + wqe = rvt_get_rwqe_ptr(rq, tail); /* * Even though we update the tail index in memory, the verbs * consumer is not supposed to post more entries until a @@ -190,7 +190,7 @@ int qib_get_rwqe(struct qib_qp *qp, int wr_id_only) qp->r_wr_id = wqe->wr_id; ret = 1; - set_bit(QIB_R_WRID_VALID, &qp->r_aflags); + set_bit(RVT_R_WRID_VALID, &qp->r_aflags); if (handler) { u32 n; @@ -227,7 +227,7 @@ bail: * Switch to alternate path. * The QP s_lock should be held and interrupts disabled. */ -void qib_migrate_qp(struct qib_qp *qp) +void qib_migrate_qp(struct rvt_qp *qp) { struct ib_event ev; @@ -266,7 +266,7 @@ static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) * The s_lock will be acquired around the qib_migrate_qp() call. */ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, - int has_grh, struct qib_qp *qp, u32 bth0) + int has_grh, struct rvt_qp *qp, u32 bth0) { __be64 guid; unsigned long flags; @@ -279,7 +279,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH)) goto err; guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index); - if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) + if (!gid_ok(&hdr->u.l.grh.dgid, + ibp->rvp.gid_prefix, guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->alt_ah_attr.grh.dgid.global.subnet_prefix, @@ -311,7 +312,8 @@ int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, goto err; guid = get_sguid(ibp, qp->remote_ah_attr.grh.sgid_index); - if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid)) + if (!gid_ok(&hdr->u.l.grh.dgid, + ibp->rvp.gid_prefix, guid)) goto err; if (!gid_ok(&hdr->u.l.grh.sgid, qp->remote_ah_attr.grh.dgid.global.subnet_prefix, @@ -353,12 +355,15 @@ err: * receive interrupts since this is a connected protocol and all packets * will pass through here. */ -static void qib_ruc_loopback(struct qib_qp *sqp) +static void qib_ruc_loopback(struct rvt_qp *sqp) { struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); - struct qib_qp *qp; - struct qib_swqe *wqe; - struct qib_sge *sge; + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_devdata *dd = ppd->dd; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; + struct rvt_qp *qp; + struct rvt_swqe *wqe; + struct rvt_sge *sge; unsigned long flags; struct ib_wc wc; u64 sdata; @@ -367,29 +372,33 @@ static void qib_ruc_loopback(struct qib_qp *sqp) int release; int ret; + rcu_read_lock(); /* * Note that we check the responder QP state after * checking the requester's state. */ - qp = qib_lookup_qpn(ibp, sqp->remote_qpn); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn); + if (!qp) + goto done; spin_lock_irqsave(&sqp->s_lock, flags); /* Return if we are already busy processing a work request. */ - if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) || - !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) || + !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND)) goto unlock; - sqp->s_flags |= QIB_S_BUSY; + sqp->s_flags |= RVT_S_BUSY; again: - if (sqp->s_last == sqp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) goto clr_busy; - wqe = get_swqe_ptr(sqp, sqp->s_last); + wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); /* Return if it is not OK to start a new work reqeust. */ - if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) { - if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND)) + if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) { + if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND)) goto clr_busy; /* We are in the error state, flush the work request. */ send_status = IB_WC_WR_FLUSH_ERR; @@ -407,9 +416,9 @@ again: } spin_unlock_irqrestore(&sqp->s_lock, flags); - if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) || + if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) || qp->ibqp.qp_type != sqp->ibqp.qp_type) { - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; /* * For RC, the requester would timeout and retry so * shortcut the timeouts and just signal too many retries. @@ -458,7 +467,7 @@ again: goto inv_err; if (wqe->length == 0) break; - if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length, + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length, wqe->rdma_wr.remote_addr, wqe->rdma_wr.rkey, IB_ACCESS_REMOTE_WRITE))) @@ -471,7 +480,7 @@ again: case IB_WR_RDMA_READ: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto inv_err; - if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, + if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length, wqe->rdma_wr.remote_addr, wqe->rdma_wr.rkey, IB_ACCESS_REMOTE_READ))) @@ -489,7 +498,7 @@ again: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; - if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), + if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, IB_ACCESS_REMOTE_ATOMIC))) @@ -502,7 +511,7 @@ again: (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, sdata, wqe->atomic_wr.swap); - qib_put_mr(qp->r_sge.sge.mr); + rvt_put_mr(qp->r_sge.sge.mr); qp->r_sge.num_sge = 0; goto send_comp; @@ -526,11 +535,11 @@ again: sge->sge_length -= len; if (sge->sge_length == 0) { if (!release) - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); if (--sqp->s_sge.num_sge) *sge = *sqp->s_sge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -543,9 +552,9 @@ again: sqp->s_len -= len; } if (release) - qib_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); - if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto send_comp; if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) @@ -561,12 +570,12 @@ again: wc.sl = qp->remote_ah_attr.sl; wc.port_num = 1; /* Signal completion event if the solicited bit is set. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, - wqe->wr.send_flags & IB_SEND_SOLICITED); + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, + wqe->wr.send_flags & IB_SEND_SOLICITED); send_comp: spin_lock_irqsave(&sqp->s_lock, flags); - ibp->n_loop_pkts++; + ibp->rvp.n_loop_pkts++; flush_send: sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; qib_send_complete(sqp, wqe, send_status); @@ -576,7 +585,7 @@ rnr_nak: /* Handle RNR NAK */ if (qp->ibqp.qp_type == IB_QPT_UC) goto send_comp; - ibp->n_rnr_naks++; + ibp->rvp.n_rnr_naks++; /* * Note: we don't need the s_lock held since the BUSY flag * makes this single threaded. @@ -588,9 +597,9 @@ rnr_nak: if (sqp->s_rnr_retry_cnt < 7) sqp->s_rnr_retry--; spin_lock_irqsave(&sqp->s_lock, flags); - if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK)) + if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK)) goto clr_busy; - sqp->s_flags |= QIB_S_WAIT_RNR; + sqp->s_flags |= RVT_S_WAIT_RNR; sqp->s_timer.function = qib_rc_rnr_retry; sqp->s_timer.expires = jiffies + usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]); @@ -618,9 +627,9 @@ serr: spin_lock_irqsave(&sqp->s_lock, flags); qib_send_complete(sqp, wqe, send_status); if (sqp->ibqp.qp_type == IB_QPT_RC) { - int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR); + int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR); - sqp->s_flags &= ~QIB_S_BUSY; + sqp->s_flags &= ~RVT_S_BUSY; spin_unlock_irqrestore(&sqp->s_lock, flags); if (lastwqe) { struct ib_event ev; @@ -633,12 +642,11 @@ serr: goto done; } clr_busy: - sqp->s_flags &= ~QIB_S_BUSY; + sqp->s_flags &= ~RVT_S_BUSY; unlock: spin_unlock_irqrestore(&sqp->s_lock, flags); done: - if (qp && atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); + rcu_read_unlock(); } /** @@ -663,7 +671,7 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, hdr->next_hdr = IB_GRH_NEXT_HDR; hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */ - hdr->sgid.global.subnet_prefix = ibp->gid_prefix; + hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix; hdr->sgid.global.interface_id = grh->sgid_index ? ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid; hdr->dgid = grh->dgid; @@ -672,9 +680,10 @@ u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, return sizeof(struct ib_grh) / sizeof(u32); } -void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, +void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2) { + struct qib_qp_priv *priv = qp->priv; struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); u16 lrh0; u32 nwords; @@ -685,17 +694,18 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, nwords = (qp->s_cur_size + extra_bytes) >> 2; lrh0 = QIB_LRH_BTH; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { - qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, + qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh, &qp->remote_ah_attr.grh, qp->s_hdrwords, nwords); lrh0 = QIB_LRH_GRH; } lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qp->remote_ah_attr.sl << 4; - qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); - qp->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); - qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); - qp->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | + priv->s_hdr->lrh[0] = cpu_to_be16(lrh0); + priv->s_hdr->lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); + priv->s_hdr->lrh[2] = + cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); + priv->s_hdr->lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid | qp->remote_ah_attr.src_path_bits); bth0 |= qib_get_pkey(ibp, qp->s_pkey_index); bth0 |= extra_bytes << 20; @@ -707,20 +717,29 @@ void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, this_cpu_inc(ibp->pmastats->n_unicast_xmit); } +void _qib_do_send(struct work_struct *work) +{ + struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv, + s_work); + struct rvt_qp *qp = priv->owner; + + qib_do_send(qp); +} + /** * qib_do_send - perform a send on a QP - * @work: contains a pointer to the QP + * @qp: pointer to the QP * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP (tasklet). * Otherwise, two threads could send packets out of order. */ -void qib_do_send(struct work_struct *work) +void qib_do_send(struct rvt_qp *qp) { - struct qib_qp *qp = container_of(work, struct qib_qp, s_work); + struct qib_qp_priv *priv = qp->priv; struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_pportdata *ppd = ppd_from_ibp(ibp); - int (*make_req)(struct qib_qp *qp); + int (*make_req)(struct rvt_qp *qp); unsigned long flags; if ((qp->ibqp.qp_type == IB_QPT_RC || @@ -745,50 +764,59 @@ void qib_do_send(struct work_struct *work) return; } - qp->s_flags |= QIB_S_BUSY; - - spin_unlock_irqrestore(&qp->s_lock, flags); + qp->s_flags |= RVT_S_BUSY; do { /* Check for a constructed packet to be sent. */ if (qp->s_hdrwords != 0) { + spin_unlock_irqrestore(&qp->s_lock, flags); /* * If the packet cannot be sent now, return and * the send tasklet will be woken up later. */ - if (qib_verbs_send(qp, qp->s_hdr, qp->s_hdrwords, + if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords, qp->s_cur_sge, qp->s_cur_size)) - break; + return; /* Record that s_hdr is empty. */ qp->s_hdrwords = 0; + spin_lock_irqsave(&qp->s_lock, flags); } } while (make_req(qp)); + + spin_unlock_irqrestore(&qp->s_lock, flags); } /* * This should be called with s_lock held. */ -void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, +void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) { u32 old_last, last; unsigned i; - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND)) return; + last = qp->s_last; + old_last = last; + if (++last >= qp->s_size) + last = 0; + qp->s_last = last; + /* See post_send() */ + barrier(); for (i = 0; i < wqe->wr.num_sge; i++) { - struct qib_sge *sge = &wqe->sg_list[i]; + struct rvt_sge *sge = &wqe->sg_list[i]; - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); } if (qp->ibqp.qp_type == IB_QPT_UD || qp->ibqp.qp_type == IB_QPT_SMI || qp->ibqp.qp_type == IB_QPT_GSI) - atomic_dec(&to_iah(wqe->ud_wr.ah)->refcount); + atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount); /* See ch. 11.2.4.1 and 10.7.3.1 */ - if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || + if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED) || status != IB_WC_SUCCESS) { struct ib_wc wc; @@ -800,15 +828,10 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, wc.qp = &qp->ibqp; if (status == IB_WC_SUCCESS) wc.byte_len = wqe->length; - qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, status != IB_WC_SUCCESS); } - last = qp->s_last; - old_last = last; - if (++last >= qp->s_size) - last = 0; - qp->s_last = last; if (qp->s_acked == old_last) qp->s_acked = last; if (qp->s_cur == old_last) diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c index c6d6a54d2..891873b38 100644 --- a/drivers/infiniband/hw/qib/qib_sdma.c +++ b/drivers/infiniband/hw/qib/qib_sdma.c @@ -513,7 +513,9 @@ int qib_sdma_running(struct qib_pportdata *ppd) static void complete_sdma_err_req(struct qib_pportdata *ppd, struct qib_verbs_txreq *tx) { - atomic_inc(&tx->qp->s_dma_busy); + struct qib_qp_priv *priv = tx->qp->priv; + + atomic_inc(&priv->s_dma_busy); /* no sdma descriptors, so no unmap_desc */ tx->txreq.start_idx = 0; tx->txreq.next_descq_idx = 0; @@ -531,18 +533,19 @@ static void complete_sdma_err_req(struct qib_pportdata *ppd, * 3) The SGE addresses are suitable for passing to dma_map_single(). */ int qib_sdma_verbs_send(struct qib_pportdata *ppd, - struct qib_sge_state *ss, u32 dwords, + struct rvt_sge_state *ss, u32 dwords, struct qib_verbs_txreq *tx) { unsigned long flags; - struct qib_sge *sge; - struct qib_qp *qp; + struct rvt_sge *sge; + struct rvt_qp *qp; int ret = 0; u16 tail; __le64 *descqp; u64 sdmadesc[2]; u32 dwoffset; dma_addr_t addr; + struct qib_qp_priv *priv; spin_lock_irqsave(&ppd->sdma_lock, flags); @@ -621,7 +624,7 @@ retry: if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -644,8 +647,8 @@ retry: descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD); if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) descqp[0] |= cpu_to_le64(SDMA_DESC_INTR); - - atomic_inc(&tx->qp->s_dma_busy); + priv = tx->qp->priv; + atomic_inc(&priv->s_dma_busy); tx->txreq.next_descq_idx = tail; ppd->dd->f_sdma_update_tail(ppd, tail); ppd->sdma_descq_added += tx->txreq.sg_count; @@ -663,13 +666,14 @@ unmap: unmap_desc(ppd, tail); } qp = tx->qp; + priv = qp->priv; qib_put_txreq(tx); spin_lock(&qp->r_lock); spin_lock(&qp->s_lock); if (qp->ibqp.qp_type == IB_QPT_RC) { /* XXX what about error sending RDMA read responses? */ - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) - qib_error_qp(qp, IB_WC_GENERAL_ERR); + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) + rvt_error_qp(qp, IB_WC_GENERAL_ERR); } else if (qp->s_wqe) qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR); spin_unlock(&qp->s_lock); @@ -679,8 +683,9 @@ unmap: busy: qp = tx->qp; + priv = qp->priv; spin_lock(&qp->s_lock); - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { struct qib_ibdev *dev; /* @@ -690,19 +695,19 @@ busy: */ tx->ss = ss; tx->dwords = dwords; - qp->s_tx = tx; + priv->s_tx = tx; dev = &ppd->dd->verbs_dev; - spin_lock(&dev->pending_lock); - if (list_empty(&qp->iowait)) { + spin_lock(&dev->rdi.pending_lock); + if (list_empty(&priv->iowait)) { struct qib_ibport *ibp; ibp = &ppd->ibport_data; - ibp->n_dmawait++; - qp->s_flags |= QIB_S_WAIT_DMA_DESC; - list_add_tail(&qp->iowait, &dev->dmawait); + ibp->rvp.n_dmawait++; + qp->s_flags |= RVT_S_WAIT_DMA_DESC; + list_add_tail(&priv->iowait, &dev->dmawait); } - spin_unlock(&dev->pending_lock); - qp->s_flags &= ~QIB_S_BUSY; + spin_unlock(&dev->rdi.pending_lock); + qp->s_flags &= ~RVT_S_BUSY; spin_unlock(&qp->s_lock); ret = -EBUSY; } else { diff --git a/drivers/infiniband/hw/qib/qib_srq.c b/drivers/infiniband/hw/qib/qib_srq.c deleted file mode 100644 index d6235931a..000000000 --- a/drivers/infiniband/hw/qib/qib_srq.c +++ /dev/null @@ -1,380 +0,0 @@ -/* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. - * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/err.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> - -#include "qib_verbs.h" - -/** - * qib_post_srq_receive - post a receive on a shared receive queue - * @ibsrq: the SRQ to post the receive on - * @wr: the list of work requests to post - * @bad_wr: A pointer to the first WR to cause a problem is put here - * - * This may be called from interrupt context. - */ -int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr) -{ - struct qib_srq *srq = to_isrq(ibsrq); - struct qib_rwq *wq; - unsigned long flags; - int ret; - - for (; wr; wr = wr->next) { - struct qib_rwqe *wqe; - u32 next; - int i; - - if ((unsigned) wr->num_sge > srq->rq.max_sge) { - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - - spin_lock_irqsave(&srq->rq.lock, flags); - wq = srq->rq.wq; - next = wq->head + 1; - if (next >= srq->rq.size) - next = 0; - if (next == wq->tail) { - spin_unlock_irqrestore(&srq->rq.lock, flags); - *bad_wr = wr; - ret = -ENOMEM; - goto bail; - } - - wqe = get_rwqe_ptr(&srq->rq, wq->head); - wqe->wr_id = wr->wr_id; - wqe->num_sge = wr->num_sge; - for (i = 0; i < wr->num_sge; i++) - wqe->sg_list[i] = wr->sg_list[i]; - /* Make sure queue entry is written before the head index. */ - smp_wmb(); - wq->head = next; - spin_unlock_irqrestore(&srq->rq.lock, flags); - } - ret = 0; - -bail: - return ret; -} - -/** - * qib_create_srq - create a shared receive queue - * @ibpd: the protection domain of the SRQ to create - * @srq_init_attr: the attributes of the SRQ - * @udata: data from libibverbs when creating a user SRQ - */ -struct ib_srq *qib_create_srq(struct ib_pd *ibpd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_udata *udata) -{ - struct qib_ibdev *dev = to_idev(ibpd->device); - struct qib_srq *srq; - u32 sz; - struct ib_srq *ret; - - if (srq_init_attr->srq_type != IB_SRQT_BASIC) { - ret = ERR_PTR(-ENOSYS); - goto done; - } - - if (srq_init_attr->attr.max_sge == 0 || - srq_init_attr->attr.max_sge > ib_qib_max_srq_sges || - srq_init_attr->attr.max_wr == 0 || - srq_init_attr->attr.max_wr > ib_qib_max_srq_wrs) { - ret = ERR_PTR(-EINVAL); - goto done; - } - - srq = kmalloc(sizeof(*srq), GFP_KERNEL); - if (!srq) { - ret = ERR_PTR(-ENOMEM); - goto done; - } - - /* - * Need to use vmalloc() if we want to support large #s of entries. - */ - srq->rq.size = srq_init_attr->attr.max_wr + 1; - srq->rq.max_sge = srq_init_attr->attr.max_sge; - sz = sizeof(struct ib_sge) * srq->rq.max_sge + - sizeof(struct qib_rwqe); - srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); - if (!srq->rq.wq) { - ret = ERR_PTR(-ENOMEM); - goto bail_srq; - } - - /* - * Return the address of the RWQ as the offset to mmap. - * See qib_mmap() for details. - */ - if (udata && udata->outlen >= sizeof(__u64)) { - int err; - u32 s = sizeof(struct qib_rwq) + srq->rq.size * sz; - - srq->ip = - qib_create_mmap_info(dev, s, ibpd->uobject->context, - srq->rq.wq); - if (!srq->ip) { - ret = ERR_PTR(-ENOMEM); - goto bail_wq; - } - - err = ib_copy_to_udata(udata, &srq->ip->offset, - sizeof(srq->ip->offset)); - if (err) { - ret = ERR_PTR(err); - goto bail_ip; - } - } else - srq->ip = NULL; - - /* - * ib_create_srq() will initialize srq->ibsrq. - */ - spin_lock_init(&srq->rq.lock); - srq->rq.wq->head = 0; - srq->rq.wq->tail = 0; - srq->limit = srq_init_attr->attr.srq_limit; - - spin_lock(&dev->n_srqs_lock); - if (dev->n_srqs_allocated == ib_qib_max_srqs) { - spin_unlock(&dev->n_srqs_lock); - ret = ERR_PTR(-ENOMEM); - goto bail_ip; - } - - dev->n_srqs_allocated++; - spin_unlock(&dev->n_srqs_lock); - - if (srq->ip) { - spin_lock_irq(&dev->pending_lock); - list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - } - - ret = &srq->ibsrq; - goto done; - -bail_ip: - kfree(srq->ip); -bail_wq: - vfree(srq->rq.wq); -bail_srq: - kfree(srq); -done: - return ret; -} - -/** - * qib_modify_srq - modify a shared receive queue - * @ibsrq: the SRQ to modify - * @attr: the new attributes of the SRQ - * @attr_mask: indicates which attributes to modify - * @udata: user data for libibverbs.so - */ -int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask, - struct ib_udata *udata) -{ - struct qib_srq *srq = to_isrq(ibsrq); - struct qib_rwq *wq; - int ret = 0; - - if (attr_mask & IB_SRQ_MAX_WR) { - struct qib_rwq *owq; - struct qib_rwqe *p; - u32 sz, size, n, head, tail; - - /* Check that the requested sizes are below the limits. */ - if ((attr->max_wr > ib_qib_max_srq_wrs) || - ((attr_mask & IB_SRQ_LIMIT) ? - attr->srq_limit : srq->limit) > attr->max_wr) { - ret = -EINVAL; - goto bail; - } - - sz = sizeof(struct qib_rwqe) + - srq->rq.max_sge * sizeof(struct ib_sge); - size = attr->max_wr + 1; - wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz); - if (!wq) { - ret = -ENOMEM; - goto bail; - } - - /* Check that we can write the offset to mmap. */ - if (udata && udata->inlen >= sizeof(__u64)) { - __u64 offset_addr; - __u64 offset = 0; - - ret = ib_copy_from_udata(&offset_addr, udata, - sizeof(offset_addr)); - if (ret) - goto bail_free; - udata->outbuf = - (void __user *) (unsigned long) offset_addr; - ret = ib_copy_to_udata(udata, &offset, - sizeof(offset)); - if (ret) - goto bail_free; - } - - spin_lock_irq(&srq->rq.lock); - /* - * validate head and tail pointer values and compute - * the number of remaining WQEs. - */ - owq = srq->rq.wq; - head = owq->head; - tail = owq->tail; - if (head >= srq->rq.size || tail >= srq->rq.size) { - ret = -EINVAL; - goto bail_unlock; - } - n = head; - if (n < tail) - n += srq->rq.size - tail; - else - n -= tail; - if (size <= n) { - ret = -EINVAL; - goto bail_unlock; - } - n = 0; - p = wq->wq; - while (tail != head) { - struct qib_rwqe *wqe; - int i; - - wqe = get_rwqe_ptr(&srq->rq, tail); - p->wr_id = wqe->wr_id; - p->num_sge = wqe->num_sge; - for (i = 0; i < wqe->num_sge; i++) - p->sg_list[i] = wqe->sg_list[i]; - n++; - p = (struct qib_rwqe *)((char *) p + sz); - if (++tail >= srq->rq.size) - tail = 0; - } - srq->rq.wq = wq; - srq->rq.size = size; - wq->head = n; - wq->tail = 0; - if (attr_mask & IB_SRQ_LIMIT) - srq->limit = attr->srq_limit; - spin_unlock_irq(&srq->rq.lock); - - vfree(owq); - - if (srq->ip) { - struct qib_mmap_info *ip = srq->ip; - struct qib_ibdev *dev = to_idev(srq->ibsrq.device); - u32 s = sizeof(struct qib_rwq) + size * sz; - - qib_update_mmap_info(dev, ip, s, wq); - - /* - * Return the offset to mmap. - * See qib_mmap() for details. - */ - if (udata && udata->inlen >= sizeof(__u64)) { - ret = ib_copy_to_udata(udata, &ip->offset, - sizeof(ip->offset)); - if (ret) - goto bail; - } - - /* - * Put user mapping info onto the pending list - * unless it already is on the list. - */ - spin_lock_irq(&dev->pending_lock); - if (list_empty(&ip->pending_mmaps)) - list_add(&ip->pending_mmaps, - &dev->pending_mmaps); - spin_unlock_irq(&dev->pending_lock); - } - } else if (attr_mask & IB_SRQ_LIMIT) { - spin_lock_irq(&srq->rq.lock); - if (attr->srq_limit >= srq->rq.size) - ret = -EINVAL; - else - srq->limit = attr->srq_limit; - spin_unlock_irq(&srq->rq.lock); - } - goto bail; - -bail_unlock: - spin_unlock_irq(&srq->rq.lock); -bail_free: - vfree(wq); -bail: - return ret; -} - -int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) -{ - struct qib_srq *srq = to_isrq(ibsrq); - - attr->max_wr = srq->rq.size - 1; - attr->max_sge = srq->rq.max_sge; - attr->srq_limit = srq->limit; - return 0; -} - -/** - * qib_destroy_srq - destroy a shared receive queue - * @ibsrq: the SRQ to destroy - */ -int qib_destroy_srq(struct ib_srq *ibsrq) -{ - struct qib_srq *srq = to_isrq(ibsrq); - struct qib_ibdev *dev = to_idev(ibsrq->device); - - spin_lock(&dev->n_srqs_lock); - dev->n_srqs_allocated--; - spin_unlock(&dev->n_srqs_lock); - if (srq->ip) - kref_put(&srq->ip->ref, qib_release_mmap_info); - else - vfree(srq->rq.wq); - kfree(srq); - - return 0; -} diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 81f56cdff..fe4cf5e4a 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -406,7 +406,13 @@ static struct kobj_type qib_sl2vl_ktype = { #define QIB_DIAGC_ATTR(N) \ static struct qib_diagc_attr qib_diagc_attr_##N = { \ .attr = { .name = __stringify(N), .mode = 0664 }, \ - .counter = offsetof(struct qib_ibport, n_##N) \ + .counter = offsetof(struct qib_ibport, rvp.n_##N) \ + } + +#define QIB_DIAGC_ATTR_PER_CPU(N) \ + static struct qib_diagc_attr qib_diagc_attr_##N = { \ + .attr = { .name = __stringify(N), .mode = 0664 }, \ + .counter = offsetof(struct qib_ibport, rvp.z_##N) \ } struct qib_diagc_attr { @@ -414,10 +420,11 @@ struct qib_diagc_attr { size_t counter; }; +QIB_DIAGC_ATTR_PER_CPU(rc_acks); +QIB_DIAGC_ATTR_PER_CPU(rc_qacks); +QIB_DIAGC_ATTR_PER_CPU(rc_delayed_comp); + QIB_DIAGC_ATTR(rc_resends); -QIB_DIAGC_ATTR(rc_acks); -QIB_DIAGC_ATTR(rc_qacks); -QIB_DIAGC_ATTR(rc_delayed_comp); QIB_DIAGC_ATTR(seq_naks); QIB_DIAGC_ATTR(rdma_seq); QIB_DIAGC_ATTR(rnr_naks); @@ -449,6 +456,35 @@ static struct attribute *diagc_default_attributes[] = { NULL }; +static u64 get_all_cpu_total(u64 __percpu *cntr) +{ + int cpu; + u64 counter = 0; + + for_each_possible_cpu(cpu) + counter += *per_cpu_ptr(cntr, cpu); + return counter; +} + +#define def_write_per_cpu(cntr) \ +static void write_per_cpu_##cntr(struct qib_pportdata *ppd, u32 data) \ +{ \ + struct qib_devdata *dd = ppd->dd; \ + struct qib_ibport *qibp = &ppd->ibport_data; \ + /* A write can only zero the counter */ \ + if (data == 0) \ + qibp->rvp.z_##cntr = get_all_cpu_total(qibp->rvp.cntr); \ + else \ + qib_dev_err(dd, "Per CPU cntrs can only be zeroed"); \ +} + +def_write_per_cpu(rc_acks) +def_write_per_cpu(rc_qacks) +def_write_per_cpu(rc_delayed_comp) + +#define READ_PER_CPU_CNTR(cntr) (get_all_cpu_total(qibp->rvp.cntr) - \ + qibp->rvp.z_##cntr) + static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { @@ -458,7 +494,16 @@ static ssize_t diagc_attr_show(struct kobject *kobj, struct attribute *attr, container_of(kobj, struct qib_pportdata, diagc_kobj); struct qib_ibport *qibp = &ppd->ibport_data; - return sprintf(buf, "%u\n", *(u32 *)((char *)qibp + dattr->counter)); + if (!strncmp(dattr->attr.name, "rc_acks", 7)) + return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_acks)); + else if (!strncmp(dattr->attr.name, "rc_qacks", 8)) + return sprintf(buf, "%llu\n", READ_PER_CPU_CNTR(rc_qacks)); + else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15)) + return sprintf(buf, "%llu\n", + READ_PER_CPU_CNTR(rc_delayed_comp)); + else + return sprintf(buf, "%u\n", + *(u32 *)((char *)qibp + dattr->counter)); } static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, @@ -475,7 +520,15 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr, ret = kstrtou32(buf, 0, &val); if (ret) return ret; - *(u32 *)((char *) qibp + dattr->counter) = val; + + if (!strncmp(dattr->attr.name, "rc_acks", 7)) + write_per_cpu_rc_acks(ppd, val); + else if (!strncmp(dattr->attr.name, "rc_qacks", 8)) + write_per_cpu_rc_qacks(ppd, val); + else if (!strncmp(dattr->attr.name, "rc_delayed_comp", 15)) + write_per_cpu_rc_delayed_comp(ppd, val); + else + *(u32 *)((char *)qibp + dattr->counter) = val; return size; } @@ -502,7 +555,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev); } @@ -511,7 +564,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int ret; @@ -533,7 +586,7 @@ static ssize_t show_boardversion(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* The string printed here is already newline-terminated. */ @@ -545,7 +598,7 @@ static ssize_t show_localbus_info(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* The string printed here is already newline-terminated. */ @@ -557,7 +610,7 @@ static ssize_t show_nctxts(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* Return the number of user ports (contexts) available. */ @@ -572,7 +625,7 @@ static ssize_t show_nfreectxts(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); /* Return the number of free user ports (contexts) available. */ @@ -583,7 +636,7 @@ static ssize_t show_serial(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); buf[sizeof(dd->serial)] = '\0'; @@ -597,7 +650,7 @@ static ssize_t store_chip_reset(struct device *device, size_t count) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int ret; @@ -618,7 +671,7 @@ static ssize_t show_tempsense(struct device *device, struct device_attribute *attr, char *buf) { struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); + container_of(device, struct qib_ibdev, rdi.ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); int ret; int idx; @@ -778,7 +831,7 @@ bail: */ int qib_verbs_register_sysfs(struct qib_devdata *dd) { - struct ib_device *dev = &dd->verbs_dev.ibdev; + struct ib_device *dev = &dd->verbs_dev.rdi.ibdev; int i, ret; for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 06a564589..7bdbc79ce 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -41,61 +41,62 @@ * qib_make_uc_req - construct a request packet (SEND, RDMA write) * @qp: a pointer to the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ -int qib_make_uc_req(struct qib_qp *qp) +int qib_make_uc_req(struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_other_headers *ohdr; - struct qib_swqe *wqe; - unsigned long flags; + struct rvt_swqe *wqe; u32 hwords; u32 bth0; u32 len; u32 pmtu = qp->pmtu; int ret = 0; - spin_lock_irqsave(&qp->s_lock, flags); - - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { - if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_dma_busy)) { - qp->s_flags |= QIB_S_WAIT_DMA; + if (atomic_read(&priv->s_dma_busy)) { + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done; } - ohdr = &qp->s_hdr->u.oth; + ohdr = &priv->s_hdr->u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) - ohdr = &qp->s_hdr->u.l.oth; + ohdr = &priv->s_hdr->u.l.oth; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; bth0 = 0; /* Get the next send request. */ - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); qp->s_wqe = NULL; switch (qp->s_state) { default: - if (!(ib_qib_state_ops[qp->state] & - QIB_PROCESS_NEXT_SEND_OK)) + if (!(ib_rvt_state_ops[qp->state] & + RVT_PROCESS_NEXT_SEND_OK)) goto bail; /* Check if send work queue is empty. */ - if (qp->s_cur == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send() */ + if (qp->s_cur == ACCESS_ONCE(qp->s_head)) goto bail; /* * Start a new request. */ - wqe->psn = qp->s_next_psn; - qp->s_psn = qp->s_next_psn; + qp->s_psn = wqe->psn; qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; @@ -214,15 +215,11 @@ int qib_make_uc_req(struct qib_qp *qp) qp->s_cur_sge = &qp->s_sge; qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), - qp->s_next_psn++ & QIB_PSN_MASK); + qp->s_psn++ & QIB_PSN_MASK); done: - ret = 1; - goto unlock; - + return 1; bail: - qp->s_flags &= ~QIB_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); + qp->s_flags &= ~RVT_S_BUSY; return ret; } @@ -240,7 +237,7 @@ unlock: * Called at interrupt level. */ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp) + int has_grh, void *data, u32 tlen, struct rvt_qp *qp) { struct qib_other_headers *ohdr; u32 opcode; @@ -278,10 +275,10 @@ void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, inv: if (qp->r_state == OP(SEND_FIRST) || qp->r_state == OP(SEND_MIDDLE)) { - set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); + set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; } else - qib_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); qp->r_state = OP(SEND_LAST); switch (opcode) { case OP(SEND_FIRST): @@ -328,8 +325,8 @@ inv: goto inv; } - if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { - qp->r_flags |= QIB_R_COMM_EST; + if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST)) { + qp->r_flags |= RVT_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; @@ -346,7 +343,7 @@ inv: case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): send_first: - if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) qp->r_sge = qp->s_rdma_read_sge; else { ret = qib_get_rwqe(qp, 0); @@ -400,7 +397,7 @@ send_last: goto rewind; wc.opcode = IB_WC_RECV; qib_copy_sge(&qp->r_sge, data, tlen, 0); - qib_put_ss(&qp->s_rdma_read_sge); + rvt_put_ss(&qp->s_rdma_read_sge); last_imm: wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -414,7 +411,7 @@ last_imm: wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); break; @@ -438,7 +435,7 @@ rdma_first: int ok; /* Check rkey */ - ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, + ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto drop; @@ -483,8 +480,8 @@ rdma_last_imm: tlen -= (hdrsize + pad + 4); if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; - if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) - qib_put_ss(&qp->s_rdma_read_sge); + if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) + rvt_put_ss(&qp->s_rdma_read_sge); else { ret = qib_get_rwqe(qp, 1); if (ret < 0) @@ -495,7 +492,7 @@ rdma_last_imm: wc.byte_len = qp->r_len; wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; qib_copy_sge(&qp->r_sge, data, tlen, 1); - qib_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); goto last_imm; case OP(RDMA_WRITE_LAST): @@ -511,7 +508,7 @@ rdma_last: if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) goto drop; qib_copy_sge(&qp->r_sge, data, tlen, 1); - qib_put_ss(&qp->r_sge); + rvt_put_ss(&qp->r_sge); break; default: @@ -523,10 +520,10 @@ rdma_last: return; rewind: - set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); + set_bit(RVT_R_REWIND_SGE, &qp->r_aflags); qp->r_sge.num_sge = 0; drop: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; return; op_err: diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 59193f67e..d9502137d 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -32,6 +32,7 @@ */ #include <rdma/ib_smi.h> +#include <rdma/ib_verbs.h> #include "qib.h" #include "qib_mad.h" @@ -46,22 +47,26 @@ * Note that the receive interrupt handler may be calling qib_ud_rcv() * while this is being called. */ -static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) +static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe) { struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num); - struct qib_pportdata *ppd; - struct qib_qp *qp; + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_devdata *dd = ppd->dd; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; + struct rvt_qp *qp; struct ib_ah_attr *ah_attr; unsigned long flags; - struct qib_sge_state ssge; - struct qib_sge *sge; + struct rvt_sge_state ssge; + struct rvt_sge *sge; struct ib_wc wc; u32 length; enum ib_qp_type sqptype, dqptype; - qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn); + rcu_read_lock(); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn); if (!qp) { - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; + rcu_read_unlock(); return; } @@ -71,12 +76,12 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) IB_QPT_UD : qp->ibqp.qp_type; if (dqptype != sqptype || - !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { - ibp->n_pkt_drops++; + !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { + ibp->rvp.n_pkt_drops++; goto drop; } - ah_attr = &to_iah(swqe->ud_wr.ah)->attr; + ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr; ppd = ppd_from_ibp(ibp); if (qp->ibqp.qp_num > 1) { @@ -140,8 +145,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) /* * Get the next work request entry to find where to put the data. */ - if (qp->r_flags & QIB_R_REUSE_SGE) - qp->r_flags &= ~QIB_R_REUSE_SGE; + if (qp->r_flags & RVT_R_REUSE_SGE) + qp->r_flags &= ~RVT_R_REUSE_SGE; else { int ret; @@ -152,14 +157,14 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) } if (!ret) { if (qp->ibqp.qp_num == 0) - ibp->n_vl15_dropped++; + ibp->rvp.n_vl15_dropped++; goto bail_unlock; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { - qp->r_flags |= QIB_R_REUSE_SGE; - ibp->n_pkt_drops++; + qp->r_flags |= RVT_R_REUSE_SGE; + ibp->rvp.n_pkt_drops++; goto bail_unlock; } @@ -189,7 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) if (--ssge.num_sge) *sge = *ssge.sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -201,8 +206,8 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) } length -= len; } - qib_put_ss(&qp->r_sge); - if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + rvt_put_ss(&qp->r_sge); + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) goto bail_unlock; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -216,30 +221,31 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, swqe->wr.send_flags & IB_SEND_SOLICITED); - ibp->n_loop_pkts++; + ibp->rvp.n_loop_pkts++; bail_unlock: spin_unlock_irqrestore(&qp->r_lock, flags); drop: - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); + rcu_read_unlock(); } /** * qib_make_ud_req - construct a UD request packet * @qp: the QP * + * Assumes the s_lock is held. + * * Return 1 if constructed; otherwise, return 0. */ -int qib_make_ud_req(struct qib_qp *qp) +int qib_make_ud_req(struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_other_headers *ohdr; struct ib_ah_attr *ah_attr; struct qib_pportdata *ppd; struct qib_ibport *ibp; - struct qib_swqe *wqe; - unsigned long flags; + struct rvt_swqe *wqe; u32 nwords; u32 extra_bytes; u32 bth0; @@ -248,28 +254,29 @@ int qib_make_ud_req(struct qib_qp *qp) int ret = 0; int next_cur; - spin_lock_irqsave(&qp->s_lock, flags); - - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) { - if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { + if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ - if (qp->s_last == qp->s_head) + smp_read_barrier_depends(); /* see post_one_send */ + if (qp->s_last == ACCESS_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ - if (atomic_read(&qp->s_dma_busy)) { - qp->s_flags |= QIB_S_WAIT_DMA; + if (atomic_read(&priv->s_dma_busy)) { + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } - wqe = get_swqe_ptr(qp, qp->s_last); + wqe = rvt_get_swqe_ptr(qp, qp->s_last); qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); goto done; } - if (qp->s_cur == qp->s_head) + /* see post_one_send() */ + smp_read_barrier_depends(); + if (qp->s_cur == ACCESS_ONCE(qp->s_head)) goto bail; - wqe = get_swqe_ptr(qp, qp->s_cur); + wqe = rvt_get_swqe_ptr(qp, qp->s_cur); next_cur = qp->s_cur + 1; if (next_cur >= qp->s_size) next_cur = 0; @@ -277,9 +284,9 @@ int qib_make_ud_req(struct qib_qp *qp) /* Construct the header. */ ibp = to_iport(qp->ibqp.device, qp->port_num); ppd = ppd_from_ibp(ibp); - ah_attr = &to_iah(wqe->ud_wr.ah)->attr; - if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) { - if (ah_attr->dlid != QIB_PERMISSIVE_LID) + ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr; + if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) { + if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE)) this_cpu_inc(ibp->pmastats->n_multicast_xmit); else this_cpu_inc(ibp->pmastats->n_unicast_xmit); @@ -287,6 +294,7 @@ int qib_make_ud_req(struct qib_qp *qp) this_cpu_inc(ibp->pmastats->n_unicast_xmit); lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1); if (unlikely(lid == ppd->lid)) { + unsigned long flags; /* * If DMAs are in progress, we can't generate * a completion for the loopback packet since @@ -294,11 +302,12 @@ int qib_make_ud_req(struct qib_qp *qp) * XXX Instead of waiting, we could queue a * zero length descriptor so we get a callback. */ - if (atomic_read(&qp->s_dma_busy)) { - qp->s_flags |= QIB_S_WAIT_DMA; + if (atomic_read(&priv->s_dma_busy)) { + qp->s_flags |= RVT_S_WAIT_DMA; goto bail; } qp->s_cur = next_cur; + local_irq_save(flags); spin_unlock_irqrestore(&qp->s_lock, flags); qib_ud_loopback(qp, wqe); spin_lock_irqsave(&qp->s_lock, flags); @@ -324,11 +333,11 @@ int qib_make_ud_req(struct qib_qp *qp) if (ah_attr->ah_flags & IB_AH_GRH) { /* Header size in 32-bit words. */ - qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr->u.l.grh, + qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh, &ah_attr->grh, qp->s_hdrwords, nwords); lrh0 = QIB_LRH_GRH; - ohdr = &qp->s_hdr->u.l.oth; + ohdr = &priv->s_hdr->u.l.oth; /* * Don't worry about sending to locally attached multicast * QPs. It is unspecified by the spec. what happens. @@ -336,7 +345,7 @@ int qib_make_ud_req(struct qib_qp *qp) } else { /* Header size in 32-bit words. */ lrh0 = QIB_LRH_BTH; - ohdr = &qp->s_hdr->u.oth; + ohdr = &priv->s_hdr->u.oth; } if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) { qp->s_hdrwords++; @@ -349,15 +358,16 @@ int qib_make_ud_req(struct qib_qp *qp) lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */ else lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12; - qp->s_hdr->lrh[0] = cpu_to_be16(lrh0); - qp->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ - qp->s_hdr->lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); + priv->s_hdr->lrh[0] = cpu_to_be16(lrh0); + priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */ + priv->s_hdr->lrh[2] = + cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC); lid = ppd->lid; if (lid) { lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1); - qp->s_hdr->lrh[3] = cpu_to_be16(lid); + priv->s_hdr->lrh[3] = cpu_to_be16(lid); } else - qp->s_hdr->lrh[3] = IB_LID_PERMISSIVE; + priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; bth0 |= extra_bytes << 20; @@ -368,11 +378,11 @@ int qib_make_ud_req(struct qib_qp *qp) /* * Use the multicast QP if the destination LID is a multicast LID. */ - ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE && - ah_attr->dlid != QIB_PERMISSIVE_LID ? + ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) && + ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ? cpu_to_be32(QIB_MULTICAST_QPN) : cpu_to_be32(wqe->ud_wr.remote_qpn); - ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK); + ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK); /* * Qkeys with the high order bit set mean use the * qkey from the QP context instead of the WR (see 10.2.5). @@ -382,13 +392,9 @@ int qib_make_ud_req(struct qib_qp *qp) ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); done: - ret = 1; - goto unlock; - + return 1; bail: - qp->s_flags &= ~QIB_S_BUSY; -unlock: - spin_unlock_irqrestore(&qp->s_lock, flags); + qp->s_flags &= ~RVT_S_BUSY; return ret; } @@ -426,7 +432,7 @@ static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey) * Called at interrupt level. */ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp) + int has_grh, void *data, u32 tlen, struct rvt_qp *qp) { struct qib_other_headers *ohdr; int opcode; @@ -446,7 +452,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */ } qkey = be32_to_cpu(ohdr->u.ud.deth[0]); - src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK; + src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK; /* * Get the number of bytes the message was padded by @@ -531,8 +537,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, /* * Get the next work request entry to find where to put the data. */ - if (qp->r_flags & QIB_R_REUSE_SGE) - qp->r_flags &= ~QIB_R_REUSE_SGE; + if (qp->r_flags & RVT_R_REUSE_SGE) + qp->r_flags &= ~RVT_R_REUSE_SGE; else { int ret; @@ -543,13 +549,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, } if (!ret) { if (qp->ibqp.qp_num == 0) - ibp->n_vl15_dropped++; + ibp->rvp.n_vl15_dropped++; return; } } /* Silently drop packets which are too big. */ if (unlikely(wc.byte_len > qp->r_len)) { - qp->r_flags |= QIB_R_REUSE_SGE; + qp->r_flags |= RVT_R_REUSE_SGE; goto drop; } if (has_grh) { @@ -559,8 +565,8 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, } else qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); - qib_put_ss(&qp->r_sge); - if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) + rvt_put_ss(&qp->r_sge); + if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) return; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; @@ -576,15 +582,15 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, /* * Save the LMC lower bits if the destination LID is a unicast LID. */ - wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 : + wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 : dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1); wc.port_num = qp->port_num; /* Signal completion event if the solicited bit is set. */ - qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, + rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); return; drop: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; } diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index 74f90b261..2d2b94fd3 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -66,8 +66,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages, } for (got = 0; got < num_pages; got += ret) { - ret = get_user_pages(current, current->mm, - start_page + got * PAGE_SIZE, + ret = get_user_pages(start_page + got * PAGE_SIZE, num_pages - got, 1, 1, p + got, NULL); if (ret < 0) diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index baf1e42b6..cbf6200e6 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -41,6 +41,7 @@ #include <linux/mm.h> #include <linux/random.h> #include <linux/vmalloc.h> +#include <rdma/rdma_vt.h> #include "qib.h" #include "qib_common.h" @@ -49,8 +50,8 @@ static unsigned int ib_qib_qp_table_size = 256; module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); -unsigned int ib_qib_lkey_table_size = 16; -module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, +static unsigned int qib_lkey_table_size = 16; +module_param_named(lkey_table_size, qib_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); @@ -113,36 +114,6 @@ module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(disable_sma, "Disable the SMA"); /* - * Note that it is OK to post send work requests in the SQE and ERR - * states; qib_do_send() will process them and generate error - * completions as per IB 1.2 C10-96. - */ -const int ib_qib_state_ops[IB_QPS_ERR + 1] = { - [IB_QPS_RESET] = 0, - [IB_QPS_INIT] = QIB_POST_RECV_OK, - [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, - [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | - QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | - QIB_PROCESS_NEXT_SEND_OK, - [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | - QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, - [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | - QIB_POST_SEND_OK | QIB_FLUSH_SEND, - [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | - QIB_POST_SEND_OK | QIB_FLUSH_SEND, -}; - -struct qib_ucontext { - struct ib_ucontext ibucontext; -}; - -static inline struct qib_ucontext *to_iucontext(struct ib_ucontext - *ibucontext) -{ - return container_of(ibucontext, struct qib_ucontext, ibucontext); -} - -/* * Translate ib_wr_opcode into ib_wc_opcode. */ const enum ib_wc_opcode ib_qib_wc_opcode[] = { @@ -166,9 +137,9 @@ __be64 ib_qib_sys_image_guid; * @data: the data to copy * @length: the length of the data */ -void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) +void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release) { - struct qib_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; while (length) { u32 len = sge->length; @@ -184,11 +155,11 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) sge->sge_length -= len; if (sge->sge_length == 0) { if (release) - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -208,9 +179,9 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) * @ss: the SGE state * @length: the number of bytes to skip */ -void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) +void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release) { - struct qib_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; while (length) { u32 len = sge->length; @@ -225,11 +196,11 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) sge->sge_length -= len; if (sge->sge_length == 0) { if (release) - qib_put_mr(sge->mr); + rvt_put_mr(sge->mr); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -248,10 +219,10 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) * Don't modify the qib_sge_state to get the count. * Return zero if any of the segments is not aligned. */ -static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) +static u32 qib_count_sge(struct rvt_sge_state *ss, u32 length) { - struct qib_sge *sg_list = ss->sg_list; - struct qib_sge sge = ss->sge; + struct rvt_sge *sg_list = ss->sg_list; + struct rvt_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 ndesc = 1; /* count the header */ @@ -276,7 +247,7 @@ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) if (--num_sge) sge = *sg_list++; } else if (sge.length == 0 && sge.mr->lkey) { - if (++sge.n >= QIB_SEGSZ) { + if (++sge.n >= RVT_SEGSZ) { if (++sge.m >= sge.mr->mapsz) break; sge.n = 0; @@ -294,9 +265,9 @@ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) /* * Copy from the SGEs to the data buffer. */ -static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) +static void qib_copy_from_sge(void *data, struct rvt_sge_state *ss, u32 length) { - struct qib_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; while (length) { u32 len = sge->length; @@ -314,7 +285,7 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; @@ -330,242 +301,6 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) } /** - * qib_post_one_send - post one RC, UC, or UD send work request - * @qp: the QP to post on - * @wr: the work request to send - */ -static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr, - int *scheduled) -{ - struct qib_swqe *wqe; - u32 next; - int i; - int j; - int acc; - int ret; - unsigned long flags; - struct qib_lkey_table *rkt; - struct qib_pd *pd; - int avoid_schedule = 0; - - spin_lock_irqsave(&qp->s_lock, flags); - - /* Check that state is OK to post send. */ - if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) - goto bail_inval; - - /* IB spec says that num_sge == 0 is OK. */ - if (wr->num_sge > qp->s_max_sge) - goto bail_inval; - - /* - * Don't allow RDMA reads or atomic operations on UC or - * undefined operations. - * Make sure buffer is large enough to hold the result for atomics. - */ - if (wr->opcode == IB_WR_REG_MR) { - if (qib_reg_mr(qp, reg_wr(wr))) - goto bail_inval; - } else if (qp->ibqp.qp_type == IB_QPT_UC) { - if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) - goto bail_inval; - } else if (qp->ibqp.qp_type != IB_QPT_RC) { - /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ - if (wr->opcode != IB_WR_SEND && - wr->opcode != IB_WR_SEND_WITH_IMM) - goto bail_inval; - /* Check UD destination address PD */ - if (qp->ibqp.pd != ud_wr(wr)->ah->pd) - goto bail_inval; - } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) - goto bail_inval; - else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && - (wr->num_sge == 0 || - wr->sg_list[0].length < sizeof(u64) || - wr->sg_list[0].addr & (sizeof(u64) - 1))) - goto bail_inval; - else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) - goto bail_inval; - - next = qp->s_head + 1; - if (next >= qp->s_size) - next = 0; - if (next == qp->s_last) { - ret = -ENOMEM; - goto bail; - } - - rkt = &to_idev(qp->ibqp.device)->lk_table; - pd = to_ipd(qp->ibqp.pd); - wqe = get_swqe_ptr(qp, qp->s_head); - - if (qp->ibqp.qp_type != IB_QPT_UC && - qp->ibqp.qp_type != IB_QPT_RC) - memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); - else if (wr->opcode == IB_WR_REG_MR) - memcpy(&wqe->reg_wr, reg_wr(wr), - sizeof(wqe->reg_wr)); - else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || - wr->opcode == IB_WR_RDMA_WRITE || - wr->opcode == IB_WR_RDMA_READ) - memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr)); - else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || - wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) - memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr)); - else - memcpy(&wqe->wr, wr, sizeof(wqe->wr)); - - wqe->length = 0; - j = 0; - if (wr->num_sge) { - acc = wr->opcode >= IB_WR_RDMA_READ ? - IB_ACCESS_LOCAL_WRITE : 0; - for (i = 0; i < wr->num_sge; i++) { - u32 length = wr->sg_list[i].length; - int ok; - - if (length == 0) - continue; - ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], - &wr->sg_list[i], acc); - if (!ok) - goto bail_inval_free; - wqe->length += length; - j++; - } - wqe->wr.num_sge = j; - } - if (qp->ibqp.qp_type == IB_QPT_UC || - qp->ibqp.qp_type == IB_QPT_RC) { - if (wqe->length > 0x80000000U) - goto bail_inval_free; - if (wqe->length <= qp->pmtu) - avoid_schedule = 1; - } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + - qp->port_num - 1)->ibmtu) { - goto bail_inval_free; - } else { - atomic_inc(&to_iah(ud_wr(wr)->ah)->refcount); - avoid_schedule = 1; - } - wqe->ssn = qp->s_ssn++; - qp->s_head = next; - - ret = 0; - goto bail; - -bail_inval_free: - while (j) { - struct qib_sge *sge = &wqe->sg_list[--j]; - - qib_put_mr(sge->mr); - } -bail_inval: - ret = -EINVAL; -bail: - if (!ret && !wr->next && !avoid_schedule && - !qib_sdma_empty( - dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) { - qib_schedule_send(qp); - *scheduled = 1; - } - spin_unlock_irqrestore(&qp->s_lock, flags); - return ret; -} - -/** - * qib_post_send - post a send on a QP - * @ibqp: the QP to post the send on - * @wr: the list of work requests to post - * @bad_wr: the first bad WR is put here - * - * This may be called from interrupt context. - */ -static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, - struct ib_send_wr **bad_wr) -{ - struct qib_qp *qp = to_iqp(ibqp); - int err = 0; - int scheduled = 0; - - for (; wr; wr = wr->next) { - err = qib_post_one_send(qp, wr, &scheduled); - if (err) { - *bad_wr = wr; - goto bail; - } - } - - /* Try to do the send work in the caller's context. */ - if (!scheduled) - qib_do_send(&qp->s_work); - -bail: - return err; -} - -/** - * qib_post_receive - post a receive on a QP - * @ibqp: the QP to post the receive on - * @wr: the WR to post - * @bad_wr: the first bad WR is put here - * - * This may be called from interrupt context. - */ -static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr) -{ - struct qib_qp *qp = to_iqp(ibqp); - struct qib_rwq *wq = qp->r_rq.wq; - unsigned long flags; - int ret; - - /* Check that state is OK to post receive. */ - if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - - for (; wr; wr = wr->next) { - struct qib_rwqe *wqe; - u32 next; - int i; - - if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { - *bad_wr = wr; - ret = -EINVAL; - goto bail; - } - - spin_lock_irqsave(&qp->r_rq.lock, flags); - next = wq->head + 1; - if (next >= qp->r_rq.size) - next = 0; - if (next == wq->tail) { - spin_unlock_irqrestore(&qp->r_rq.lock, flags); - *bad_wr = wr; - ret = -ENOMEM; - goto bail; - } - - wqe = get_rwqe_ptr(&qp->r_rq, wq->head); - wqe->wr_id = wr->wr_id; - wqe->num_sge = wr->num_sge; - for (i = 0; i < wr->num_sge; i++) - wqe->sg_list[i] = wr->sg_list[i]; - /* Make sure queue entry is written before the head index. */ - smp_wmb(); - wq->head = next; - spin_unlock_irqrestore(&qp->r_rq.lock, flags); - } - ret = 0; - -bail: - return ret; -} - -/** * qib_qp_rcv - processing an incoming packet on a QP * @rcd: the context pointer * @hdr: the packet header @@ -579,15 +314,15 @@ bail: * Called at interrupt level. */ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp) + int has_grh, void *data, u32 tlen, struct rvt_qp *qp) { struct qib_ibport *ibp = &rcd->ppd->ibport_data; spin_lock(&qp->r_lock); /* Check for valid receive state. */ - if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { - ibp->n_pkt_drops++; + if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) { + ibp->rvp.n_pkt_drops++; goto unlock; } @@ -632,8 +367,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) struct qib_pportdata *ppd = rcd->ppd; struct qib_ibport *ibp = &ppd->ibport_data; struct qib_ib_header *hdr = rhdr; + struct qib_devdata *dd = ppd->dd; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; struct qib_other_headers *ohdr; - struct qib_qp *qp; + struct rvt_qp *qp; u32 qp_num; int lnh; u8 opcode; @@ -645,7 +382,7 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) /* Check for a valid destination LID (see ch. 7.11.1). */ lid = be16_to_cpu(hdr->lrh[1]); - if (lid < QIB_MULTICAST_LID_BASE) { + if (lid < be16_to_cpu(IB_MULTICAST_LID_BASE)) { lid &= ~((1 << ppd->lmc) - 1); if (unlikely(lid != ppd->lid)) goto drop; @@ -674,50 +411,40 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) #endif /* Get the destination QP number. */ - qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; + qp_num = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; if (qp_num == QIB_MULTICAST_QPN) { - struct qib_mcast *mcast; - struct qib_mcast_qp *p; + struct rvt_mcast *mcast; + struct rvt_mcast_qp *p; if (lnh != QIB_LRH_GRH) goto drop; - mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); + mcast = rvt_mcast_find(&ibp->rvp, &hdr->u.l.grh.dgid); if (mcast == NULL) goto drop; this_cpu_inc(ibp->pmastats->n_multicast_rcv); list_for_each_entry_rcu(p, &mcast->qp_list, list) qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); /* - * Notify qib_multicast_detach() if it is waiting for us + * Notify rvt_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { - if (rcd->lookaside_qp) { - if (rcd->lookaside_qpn != qp_num) { - if (atomic_dec_and_test( - &rcd->lookaside_qp->refcount)) - wake_up( - &rcd->lookaside_qp->wait); - rcd->lookaside_qp = NULL; - } + rcu_read_lock(); + qp = rvt_lookup_qpn(rdi, &ibp->rvp, qp_num); + if (!qp) { + rcu_read_unlock(); + goto drop; } - if (!rcd->lookaside_qp) { - qp = qib_lookup_qpn(ibp, qp_num); - if (!qp) - goto drop; - rcd->lookaside_qp = qp; - rcd->lookaside_qpn = qp_num; - } else - qp = rcd->lookaside_qp; this_cpu_inc(ibp->pmastats->n_unicast_rcv); qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); + rcu_read_unlock(); } return; drop: - ibp->n_pkt_drops++; + ibp->rvp.n_pkt_drops++; } /* @@ -728,23 +455,25 @@ static void mem_timer(unsigned long data) { struct qib_ibdev *dev = (struct qib_ibdev *) data; struct list_head *list = &dev->memwait; - struct qib_qp *qp = NULL; + struct rvt_qp *qp = NULL; + struct qib_qp_priv *priv = NULL; unsigned long flags; - spin_lock_irqsave(&dev->pending_lock, flags); + spin_lock_irqsave(&dev->rdi.pending_lock, flags); if (!list_empty(list)) { - qp = list_entry(list->next, struct qib_qp, iowait); - list_del_init(&qp->iowait); + priv = list_entry(list->next, struct qib_qp_priv, iowait); + qp = priv->owner; + list_del_init(&priv->iowait); atomic_inc(&qp->refcount); if (!list_empty(list)) mod_timer(&dev->mem_timer, jiffies + 1); } - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); if (qp) { spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & QIB_S_WAIT_KMEM) { - qp->s_flags &= ~QIB_S_WAIT_KMEM; + if (qp->s_flags & RVT_S_WAIT_KMEM) { + qp->s_flags &= ~RVT_S_WAIT_KMEM; qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); @@ -753,9 +482,9 @@ static void mem_timer(unsigned long data) } } -static void update_sge(struct qib_sge_state *ss, u32 length) +static void update_sge(struct rvt_sge_state *ss, u32 length) { - struct qib_sge *sge = &ss->sge; + struct rvt_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; @@ -764,7 +493,7 @@ static void update_sge(struct qib_sge_state *ss, u32 length) if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { - if (++sge->n >= QIB_SEGSZ) { + if (++sge->n >= RVT_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; @@ -810,7 +539,7 @@ static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) } #endif -static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, +static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss, u32 length, unsigned flush_wc) { u32 extra = 0; @@ -947,30 +676,31 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, } static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, - struct qib_qp *qp) + struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_verbs_txreq *tx; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); - spin_lock(&dev->pending_lock); + spin_lock(&dev->rdi.pending_lock); if (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; list_del(l); - spin_unlock(&dev->pending_lock); + spin_unlock(&dev->rdi.pending_lock); spin_unlock_irqrestore(&qp->s_lock, flags); tx = list_entry(l, struct qib_verbs_txreq, txreq.list); } else { - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && - list_empty(&qp->iowait)) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK && + list_empty(&priv->iowait)) { dev->n_txwait++; - qp->s_flags |= QIB_S_WAIT_TX; - list_add_tail(&qp->iowait, &dev->txwait); + qp->s_flags |= RVT_S_WAIT_TX; + list_add_tail(&priv->iowait, &dev->txwait); } - qp->s_flags &= ~QIB_S_BUSY; - spin_unlock(&dev->pending_lock); + qp->s_flags &= ~RVT_S_BUSY; + spin_unlock(&dev->rdi.pending_lock); spin_unlock_irqrestore(&qp->s_lock, flags); tx = ERR_PTR(-EBUSY); } @@ -978,22 +708,22 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, } static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, - struct qib_qp *qp) + struct rvt_qp *qp) { struct qib_verbs_txreq *tx; unsigned long flags; - spin_lock_irqsave(&dev->pending_lock, flags); + spin_lock_irqsave(&dev->rdi.pending_lock, flags); /* assume the list non empty */ if (likely(!list_empty(&dev->txreq_free))) { struct list_head *l = dev->txreq_free.next; list_del(l); - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); tx = list_entry(l, struct qib_verbs_txreq, txreq.list); } else { /* call slow path to get the extra lock */ - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); tx = __get_txreq(dev, qp); } return tx; @@ -1002,16 +732,15 @@ static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, void qib_put_txreq(struct qib_verbs_txreq *tx) { struct qib_ibdev *dev; - struct qib_qp *qp; + struct rvt_qp *qp; + struct qib_qp_priv *priv; unsigned long flags; qp = tx->qp; dev = to_idev(qp->ibqp.device); - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); if (tx->mr) { - qib_put_mr(tx->mr); + rvt_put_mr(tx->mr); tx->mr = NULL; } if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { @@ -1022,21 +751,23 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) kfree(tx->align_buf); } - spin_lock_irqsave(&dev->pending_lock, flags); + spin_lock_irqsave(&dev->rdi.pending_lock, flags); /* Put struct back on free list */ list_add(&tx->txreq.list, &dev->txreq_free); if (!list_empty(&dev->txwait)) { /* Wake up first QP wanting a free struct */ - qp = list_entry(dev->txwait.next, struct qib_qp, iowait); - list_del_init(&qp->iowait); + priv = list_entry(dev->txwait.next, struct qib_qp_priv, + iowait); + qp = priv->owner; + list_del_init(&priv->iowait); atomic_inc(&qp->refcount); - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & QIB_S_WAIT_TX) { - qp->s_flags &= ~QIB_S_WAIT_TX; + if (qp->s_flags & RVT_S_WAIT_TX) { + qp->s_flags &= ~RVT_S_WAIT_TX; qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1044,7 +775,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } else - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); } /* @@ -1055,36 +786,39 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) */ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) { - struct qib_qp *qp, *nqp; - struct qib_qp *qps[20]; + struct rvt_qp *qp, *nqp; + struct qib_qp_priv *qpp, *nqpp; + struct rvt_qp *qps[20]; struct qib_ibdev *dev; unsigned i, n; n = 0; dev = &ppd->dd->verbs_dev; - spin_lock(&dev->pending_lock); + spin_lock(&dev->rdi.pending_lock); /* Search wait list for first QP wanting DMA descriptors. */ - list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { + list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) { + qp = qpp->owner; + nqp = nqpp->owner; if (qp->port_num != ppd->port) continue; if (n == ARRAY_SIZE(qps)) break; - if (qp->s_tx->txreq.sg_count > avail) + if (qpp->s_tx->txreq.sg_count > avail) break; - avail -= qp->s_tx->txreq.sg_count; - list_del_init(&qp->iowait); + avail -= qpp->s_tx->txreq.sg_count; + list_del_init(&qpp->iowait); atomic_inc(&qp->refcount); qps[n++] = qp; } - spin_unlock(&dev->pending_lock); + spin_unlock(&dev->rdi.pending_lock); for (i = 0; i < n; i++) { qp = qps[i]; spin_lock(&qp->s_lock); - if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { - qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; + if (qp->s_flags & RVT_S_WAIT_DMA_DESC) { + qp->s_flags &= ~RVT_S_WAIT_DMA_DESC; qib_schedule_send(qp); } spin_unlock(&qp->s_lock); @@ -1100,7 +834,8 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) { struct qib_verbs_txreq *tx = container_of(cookie, struct qib_verbs_txreq, txreq); - struct qib_qp *qp = tx->qp; + struct rvt_qp *qp = tx->qp; + struct qib_qp_priv *priv = qp->priv; spin_lock(&qp->s_lock); if (tx->wqe) @@ -1117,11 +852,11 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) } qib_rc_send_complete(qp, hdr); } - if (atomic_dec_and_test(&qp->s_dma_busy)) { + if (atomic_dec_and_test(&priv->s_dma_busy)) { if (qp->state == IB_QPS_RESET) - wake_up(&qp->wait_dma); - else if (qp->s_flags & QIB_S_WAIT_DMA) { - qp->s_flags &= ~QIB_S_WAIT_DMA; + wake_up(&priv->wait_dma); + else if (qp->s_flags & RVT_S_WAIT_DMA) { + qp->s_flags &= ~RVT_S_WAIT_DMA; qib_schedule_send(qp); } } @@ -1130,22 +865,23 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) qib_put_txreq(tx); } -static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) +static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; unsigned long flags; int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { - spin_lock(&dev->pending_lock); - if (list_empty(&qp->iowait)) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { + spin_lock(&dev->rdi.pending_lock); + if (list_empty(&priv->iowait)) { if (list_empty(&dev->memwait)) mod_timer(&dev->mem_timer, jiffies + 1); - qp->s_flags |= QIB_S_WAIT_KMEM; - list_add_tail(&qp->iowait, &dev->memwait); + qp->s_flags |= RVT_S_WAIT_KMEM; + list_add_tail(&priv->iowait, &dev->memwait); } - spin_unlock(&dev->pending_lock); - qp->s_flags &= ~QIB_S_BUSY; + spin_unlock(&dev->rdi.pending_lock); + qp->s_flags &= ~RVT_S_BUSY; ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1153,10 +889,11 @@ static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) return ret; } -static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, - u32 hdrwords, struct qib_sge_state *ss, u32 len, +static int qib_verbs_send_dma(struct rvt_qp *qp, struct qib_ib_header *hdr, + u32 hdrwords, struct rvt_sge_state *ss, u32 len, u32 plen, u32 dwords) { + struct qib_qp_priv *priv = qp->priv; struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); @@ -1167,9 +904,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, u32 ndesc; int ret; - tx = qp->s_tx; + tx = priv->s_tx; if (tx) { - qp->s_tx = NULL; + priv->s_tx = NULL; /* resend previously constructed packet */ ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); goto bail; @@ -1182,7 +919,6 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, control = dd->f_setpbc_control(ppd, plen, qp->s_srate, be16_to_cpu(hdr->lrh[0]) >> 12); tx->qp = qp; - atomic_inc(&qp->refcount); tx->wqe = qp->s_wqe; tx->mr = qp->s_rdma_mr; if (qp->s_rdma_mr) @@ -1245,7 +981,7 @@ err_tx: qib_put_txreq(tx); ret = wait_kmem(dev, qp); unaligned: - ibp->n_unaligned++; + ibp->rvp.n_unaligned++; bail: return ret; bail_tx: @@ -1257,8 +993,9 @@ bail_tx: * If we are now in the error state, return zero to flush the * send work request. */ -static int no_bufs_available(struct qib_qp *qp) +static int no_bufs_available(struct rvt_qp *qp) { + struct qib_qp_priv *priv = qp->priv; struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_devdata *dd; unsigned long flags; @@ -1271,25 +1008,25 @@ static int no_bufs_available(struct qib_qp *qp) * enabling the PIO avail interrupt. */ spin_lock_irqsave(&qp->s_lock, flags); - if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { - spin_lock(&dev->pending_lock); - if (list_empty(&qp->iowait)) { + if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { + spin_lock(&dev->rdi.pending_lock); + if (list_empty(&priv->iowait)) { dev->n_piowait++; - qp->s_flags |= QIB_S_WAIT_PIO; - list_add_tail(&qp->iowait, &dev->piowait); + qp->s_flags |= RVT_S_WAIT_PIO; + list_add_tail(&priv->iowait, &dev->piowait); dd = dd_from_dev(dev); dd->f_wantpiobuf_intr(dd, 1); } - spin_unlock(&dev->pending_lock); - qp->s_flags &= ~QIB_S_BUSY; + spin_unlock(&dev->rdi.pending_lock); + qp->s_flags &= ~RVT_S_BUSY; ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } -static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, - u32 hdrwords, struct qib_sge_state *ss, u32 len, +static int qib_verbs_send_pio(struct rvt_qp *qp, struct qib_ib_header *ibhdr, + u32 hdrwords, struct rvt_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); @@ -1370,7 +1107,7 @@ done: } qib_sendbuf_done(dd, pbufn); if (qp->s_rdma_mr) { - qib_put_mr(qp->s_rdma_mr); + rvt_put_mr(qp->s_rdma_mr); qp->s_rdma_mr = NULL; } if (qp->s_wqe) { @@ -1394,10 +1131,10 @@ done: * @len: the length of the packet in bytes * * Return zero if packet is sent or queued OK. - * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. + * Return non-zero and clear qp->s_flags RVT_S_BUSY otherwise. */ -int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, - u32 hdrwords, struct qib_sge_state *ss, u32 len) +int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr, + u32 hdrwords, struct rvt_sge_state *ss, u32 len) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); u32 plen; @@ -1529,10 +1266,11 @@ void qib_ib_piobufavail(struct qib_devdata *dd) { struct qib_ibdev *dev = &dd->verbs_dev; struct list_head *list; - struct qib_qp *qps[5]; - struct qib_qp *qp; + struct rvt_qp *qps[5]; + struct rvt_qp *qp; unsigned long flags; unsigned i, n; + struct qib_qp_priv *priv; list = &dev->piowait; n = 0; @@ -1543,25 +1281,26 @@ void qib_ib_piobufavail(struct qib_devdata *dd) * could end up with QPs on the wait list with the interrupt * disabled. */ - spin_lock_irqsave(&dev->pending_lock, flags); + spin_lock_irqsave(&dev->rdi.pending_lock, flags); while (!list_empty(list)) { if (n == ARRAY_SIZE(qps)) goto full; - qp = list_entry(list->next, struct qib_qp, iowait); - list_del_init(&qp->iowait); + priv = list_entry(list->next, struct qib_qp_priv, iowait); + qp = priv->owner; + list_del_init(&priv->iowait); atomic_inc(&qp->refcount); qps[n++] = qp; } dd->f_wantpiobuf_intr(dd, 0); full: - spin_unlock_irqrestore(&dev->pending_lock, flags); + spin_unlock_irqrestore(&dev->rdi.pending_lock, flags); for (i = 0; i < n; i++) { qp = qps[i]; spin_lock_irqsave(&qp->s_lock, flags); - if (qp->s_flags & QIB_S_WAIT_PIO) { - qp->s_flags &= ~QIB_S_WAIT_PIO; + if (qp->s_flags & RVT_S_WAIT_PIO) { + qp->s_flags &= ~RVT_S_WAIT_PIO; qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); @@ -1572,82 +1311,24 @@ full: } } -static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, - struct ib_udata *uhw) -{ - struct qib_devdata *dd = dd_from_ibdev(ibdev); - struct qib_ibdev *dev = to_idev(ibdev); - - if (uhw->inlen || uhw->outlen) - return -EINVAL; - memset(props, 0, sizeof(*props)); - - props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | - IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | - IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | - IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; - props->page_size_cap = PAGE_SIZE; - props->vendor_id = - QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; - props->vendor_part_id = dd->deviceid; - props->hw_ver = dd->minrev; - props->sys_image_guid = ib_qib_sys_image_guid; - props->max_mr_size = ~0ULL; - props->max_qp = ib_qib_max_qps; - props->max_qp_wr = ib_qib_max_qp_wrs; - props->max_sge = ib_qib_max_sges; - props->max_sge_rd = ib_qib_max_sges; - props->max_cq = ib_qib_max_cqs; - props->max_ah = ib_qib_max_ahs; - props->max_cqe = ib_qib_max_cqes; - props->max_mr = dev->lk_table.max; - props->max_fmr = dev->lk_table.max; - props->max_map_per_fmr = 32767; - props->max_pd = ib_qib_max_pds; - props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; - props->max_qp_init_rd_atom = 255; - /* props->max_res_rd_atom */ - props->max_srq = ib_qib_max_srqs; - props->max_srq_wr = ib_qib_max_srq_wrs; - props->max_srq_sge = ib_qib_max_srq_sges; - /* props->local_ca_ack_delay */ - props->atomic_cap = IB_ATOMIC_GLOB; - props->max_pkeys = qib_get_npkeys(dd); - props->max_mcast_grp = ib_qib_max_mcast_grps; - props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; - props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * - props->max_mcast_grp; - - return 0; -} - -static int qib_query_port(struct ib_device *ibdev, u8 port, +static int qib_query_port(struct rvt_dev_info *rdi, u8 port_num, struct ib_port_attr *props) { - struct qib_devdata *dd = dd_from_ibdev(ibdev); - struct qib_ibport *ibp = to_iport(ibdev, port); - struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = dd_from_dev(ibdev); + struct qib_pportdata *ppd = &dd->pport[port_num - 1]; enum ib_mtu mtu; u16 lid = ppd->lid; - memset(props, 0, sizeof(*props)); props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lmc = ppd->lmc; - props->sm_lid = ibp->sm_lid; - props->sm_sl = ibp->sm_sl; props->state = dd->f_iblink_state(ppd->lastibcstat); props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); - props->port_cap_flags = ibp->port_cap_flags; props->gid_tbl_len = QIB_GUIDS_PER_PORT; - props->max_msg_sz = 0x80000000; - props->pkey_tbl_len = qib_get_npkeys(dd); - props->bad_pkey_cntr = ibp->pkey_violations; - props->qkey_viol_cntr = ibp->qkey_violations; props->active_width = ppd->link_width_active; /* See rate_show() */ props->active_speed = ppd->link_speed_active; props->max_vl_num = qib_num_vls(ppd->vls_supported); - props->init_type_reply = 0; props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; switch (ppd->ibmtu) { @@ -1670,7 +1351,6 @@ static int qib_query_port(struct ib_device *ibdev, u8 port, mtu = IB_MTU_2048; } props->active_mtu = mtu; - props->subnet_timeout = ibp->subnet_timeout; return 0; } @@ -1714,185 +1394,70 @@ bail: return ret; } -static int qib_modify_port(struct ib_device *ibdev, u8 port, - int port_modify_mask, struct ib_port_modify *props) +static int qib_shut_down_port(struct rvt_dev_info *rdi, u8 port_num) { - struct qib_ibport *ibp = to_iport(ibdev, port); - struct qib_pportdata *ppd = ppd_from_ibp(ibp); - - ibp->port_cap_flags |= props->set_port_cap_mask; - ibp->port_cap_flags &= ~props->clr_port_cap_mask; - if (props->set_port_cap_mask || props->clr_port_cap_mask) - qib_cap_mask_chg(ibp); - if (port_modify_mask & IB_PORT_SHUTDOWN) - qib_set_linkstate(ppd, QIB_IB_LINKDOWN); - if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) - ibp->qkey_violations = 0; - return 0; -} - -static int qib_query_gid(struct ib_device *ibdev, u8 port, - int index, union ib_gid *gid) -{ - struct qib_devdata *dd = dd_from_ibdev(ibdev); - int ret = 0; - - if (!port || port > dd->num_pports) - ret = -EINVAL; - else { - struct qib_ibport *ibp = to_iport(ibdev, port); - struct qib_pportdata *ppd = ppd_from_ibp(ibp); - - gid->global.subnet_prefix = ibp->gid_prefix; - if (index == 0) - gid->global.interface_id = ppd->guid; - else if (index < QIB_GUIDS_PER_PORT) - gid->global.interface_id = ibp->guids[index - 1]; - else - ret = -EINVAL; - } - - return ret; -} + struct qib_ibdev *ibdev = container_of(rdi, struct qib_ibdev, rdi); + struct qib_devdata *dd = dd_from_dev(ibdev); + struct qib_pportdata *ppd = &dd->pport[port_num - 1]; -static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, - struct ib_ucontext *context, - struct ib_udata *udata) -{ - struct qib_ibdev *dev = to_idev(ibdev); - struct qib_pd *pd; - struct ib_pd *ret; + qib_set_linkstate(ppd, QIB_IB_LINKDOWN); - /* - * This is actually totally arbitrary. Some correctness tests - * assume there's a maximum number of PDs that can be allocated. - * We don't actually have this limit, but we fail the test if - * we allow allocations of more than we report for this value. - */ - - pd = kmalloc(sizeof(*pd), GFP_KERNEL); - if (!pd) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - spin_lock(&dev->n_pds_lock); - if (dev->n_pds_allocated == ib_qib_max_pds) { - spin_unlock(&dev->n_pds_lock); - kfree(pd); - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - dev->n_pds_allocated++; - spin_unlock(&dev->n_pds_lock); - - /* ib_alloc_pd() will initialize pd->ibpd. */ - pd->user = udata != NULL; - - ret = &pd->ibpd; - -bail: - return ret; + return 0; } -static int qib_dealloc_pd(struct ib_pd *ibpd) +static int qib_get_guid_be(struct rvt_dev_info *rdi, struct rvt_ibport *rvp, + int guid_index, __be64 *guid) { - struct qib_pd *pd = to_ipd(ibpd); - struct qib_ibdev *dev = to_idev(ibpd->device); - - spin_lock(&dev->n_pds_lock); - dev->n_pds_allocated--; - spin_unlock(&dev->n_pds_lock); + struct qib_ibport *ibp = container_of(rvp, struct qib_ibport, rvp); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); - kfree(pd); + if (guid_index == 0) + *guid = ppd->guid; + else if (guid_index < QIB_GUIDS_PER_PORT) + *guid = ibp->guids[guid_index - 1]; + else + return -EINVAL; return 0; } int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) { - /* A multicast address requires a GRH (see ch. 8.4.1). */ - if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && - ah_attr->dlid != QIB_PERMISSIVE_LID && - !(ah_attr->ah_flags & IB_AH_GRH)) - goto bail; - if ((ah_attr->ah_flags & IB_AH_GRH) && - ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) - goto bail; - if (ah_attr->dlid == 0) - goto bail; - if (ah_attr->port_num < 1 || - ah_attr->port_num > ibdev->phys_port_cnt) - goto bail; - if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && - ib_rate_to_mult(ah_attr->static_rate) < 0) - goto bail; if (ah_attr->sl > 15) - goto bail; + return -EINVAL; + return 0; -bail: - return -EINVAL; } -/** - * qib_create_ah - create an address handle - * @pd: the protection domain - * @ah_attr: the attributes of the AH - * - * This may be called from interrupt context. - */ -static struct ib_ah *qib_create_ah(struct ib_pd *pd, - struct ib_ah_attr *ah_attr) +static void qib_notify_new_ah(struct ib_device *ibdev, + struct ib_ah_attr *ah_attr, + struct rvt_ah *ah) { - struct qib_ah *ah; - struct ib_ah *ret; - struct qib_ibdev *dev = to_idev(pd->device); - unsigned long flags; + struct qib_ibport *ibp; + struct qib_pportdata *ppd; - if (qib_check_ah(pd->device, ah_attr)) { - ret = ERR_PTR(-EINVAL); - goto bail; - } - - ah = kmalloc(sizeof(*ah), GFP_ATOMIC); - if (!ah) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - spin_lock_irqsave(&dev->n_ahs_lock, flags); - if (dev->n_ahs_allocated == ib_qib_max_ahs) { - spin_unlock_irqrestore(&dev->n_ahs_lock, flags); - kfree(ah); - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - dev->n_ahs_allocated++; - spin_unlock_irqrestore(&dev->n_ahs_lock, flags); - - /* ib_create_ah() will initialize ah->ibah. */ - ah->attr = *ah_attr; - atomic_set(&ah->refcount, 0); - - ret = &ah->ibah; + /* + * Do not trust reading anything from rvt_ah at this point as it is not + * done being setup. We can however modify things which we need to set. + */ -bail: - return ret; + ibp = to_iport(ibdev, ah_attr->port_num); + ppd = ppd_from_ibp(ibp); + ah->vl = ibp->sl_to_vl[ah->attr.sl]; + ah->log_pmtu = ilog2(ppd->ibmtu); } struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) { struct ib_ah_attr attr; struct ib_ah *ah = ERR_PTR(-EINVAL); - struct qib_qp *qp0; + struct rvt_qp *qp0; memset(&attr, 0, sizeof(attr)); attr.dlid = dlid; attr.port_num = ppd_from_ibp(ibp)->port; rcu_read_lock(); - qp0 = rcu_dereference(ibp->qp0); + qp0 = rcu_dereference(ibp->rvp.qp[0]); if (qp0) ah = ib_create_ah(qp0->ibqp.pd, &attr); rcu_read_unlock(); @@ -1900,51 +1465,6 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) } /** - * qib_destroy_ah - destroy an address handle - * @ibah: the AH to destroy - * - * This may be called from interrupt context. - */ -static int qib_destroy_ah(struct ib_ah *ibah) -{ - struct qib_ibdev *dev = to_idev(ibah->device); - struct qib_ah *ah = to_iah(ibah); - unsigned long flags; - - if (atomic_read(&ah->refcount) != 0) - return -EBUSY; - - spin_lock_irqsave(&dev->n_ahs_lock, flags); - dev->n_ahs_allocated--; - spin_unlock_irqrestore(&dev->n_ahs_lock, flags); - - kfree(ah); - - return 0; -} - -static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) -{ - struct qib_ah *ah = to_iah(ibah); - - if (qib_check_ah(ibah->device, ah_attr)) - return -EINVAL; - - ah->attr = *ah_attr; - - return 0; -} - -static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) -{ - struct qib_ah *ah = to_iah(ibah); - - *ah_attr = ah->attr; - - return 0; -} - -/** * qib_get_npkeys - return the size of the PKEY table for context 0 * @dd: the qlogic_ib device */ @@ -1973,75 +1493,27 @@ unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) return ret; } -static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, - u16 *pkey) -{ - struct qib_devdata *dd = dd_from_ibdev(ibdev); - int ret; - - if (index >= qib_get_npkeys(dd)) { - ret = -EINVAL; - goto bail; - } - - *pkey = qib_get_pkey(to_iport(ibdev, port), index); - ret = 0; - -bail: - return ret; -} - -/** - * qib_alloc_ucontext - allocate a ucontest - * @ibdev: the infiniband device - * @udata: not used by the QLogic_IB driver - */ - -static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, - struct ib_udata *udata) -{ - struct qib_ucontext *context; - struct ib_ucontext *ret; - - context = kmalloc(sizeof(*context), GFP_KERNEL); - if (!context) { - ret = ERR_PTR(-ENOMEM); - goto bail; - } - - ret = &context->ibucontext; - -bail: - return ret; -} - -static int qib_dealloc_ucontext(struct ib_ucontext *context) -{ - kfree(to_iucontext(context)); - return 0; -} - static void init_ibport(struct qib_pportdata *ppd) { struct qib_verbs_counters cntrs; struct qib_ibport *ibp = &ppd->ibport_data; - spin_lock_init(&ibp->lock); + spin_lock_init(&ibp->rvp.lock); /* Set the prefix to the default value (see ch. 4.1.1) */ - ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; - ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); - ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | + ibp->rvp.gid_prefix = IB_DEFAULT_GID_PREFIX; + ibp->rvp.sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); + ibp->rvp.port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | IB_PORT_OTHER_LOCAL_CHANGES_SUP; if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) - ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; - ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; - ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; - ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; - ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; - ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; + ibp->rvp.port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; + ibp->rvp.pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; + ibp->rvp.pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; + ibp->rvp.pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; + ibp->rvp.pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; + ibp->rvp.pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; /* Snapshot current HW counters to "clear" them. */ qib_get_counters(ppd, &cntrs); @@ -2061,26 +1533,55 @@ static void init_ibport(struct qib_pportdata *ppd) ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; ibp->z_vl15_dropped = cntrs.vl15_dropped; - RCU_INIT_POINTER(ibp->qp0, NULL); - RCU_INIT_POINTER(ibp->qp1, NULL); + RCU_INIT_POINTER(ibp->rvp.qp[0], NULL); + RCU_INIT_POINTER(ibp->rvp.qp[1], NULL); } -static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, - struct ib_port_immutable *immutable) +/** + * qib_fill_device_attr - Fill in rvt dev info device attributes. + * @dd: the device data structure + */ +static void qib_fill_device_attr(struct qib_devdata *dd) { - struct ib_port_attr attr; - int err; - - err = qib_query_port(ibdev, port_num, &attr); - if (err) - return err; + struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; - immutable->pkey_tbl_len = attr.pkey_tbl_len; - immutable->gid_tbl_len = attr.gid_tbl_len; - immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; - immutable->max_mad_size = IB_MGMT_MAD_SIZE; + memset(&rdi->dparms.props, 0, sizeof(rdi->dparms.props)); - return 0; + rdi->dparms.props.max_pd = ib_qib_max_pds; + rdi->dparms.props.max_ah = ib_qib_max_ahs; + rdi->dparms.props.device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | + IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | + IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | + IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; + rdi->dparms.props.page_size_cap = PAGE_SIZE; + rdi->dparms.props.vendor_id = + QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; + rdi->dparms.props.vendor_part_id = dd->deviceid; + rdi->dparms.props.hw_ver = dd->minrev; + rdi->dparms.props.sys_image_guid = ib_qib_sys_image_guid; + rdi->dparms.props.max_mr_size = ~0ULL; + rdi->dparms.props.max_qp = ib_qib_max_qps; + rdi->dparms.props.max_qp_wr = ib_qib_max_qp_wrs; + rdi->dparms.props.max_sge = ib_qib_max_sges; + rdi->dparms.props.max_sge_rd = ib_qib_max_sges; + rdi->dparms.props.max_cq = ib_qib_max_cqs; + rdi->dparms.props.max_cqe = ib_qib_max_cqes; + rdi->dparms.props.max_ah = ib_qib_max_ahs; + rdi->dparms.props.max_mr = rdi->lkey_table.max; + rdi->dparms.props.max_fmr = rdi->lkey_table.max; + rdi->dparms.props.max_map_per_fmr = 32767; + rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; + rdi->dparms.props.max_qp_init_rd_atom = 255; + rdi->dparms.props.max_srq = ib_qib_max_srqs; + rdi->dparms.props.max_srq_wr = ib_qib_max_srq_wrs; + rdi->dparms.props.max_srq_sge = ib_qib_max_srq_sges; + rdi->dparms.props.atomic_cap = IB_ATOMIC_GLOB; + rdi->dparms.props.max_pkeys = qib_get_npkeys(dd); + rdi->dparms.props.max_mcast_grp = ib_qib_max_mcast_grps; + rdi->dparms.props.max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; + rdi->dparms.props.max_total_mcast_qp_attach = + rdi->dparms.props.max_mcast_qp_attach * + rdi->dparms.props.max_mcast_grp; } /** @@ -2091,68 +1592,20 @@ static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, int qib_register_ib_device(struct qib_devdata *dd) { struct qib_ibdev *dev = &dd->verbs_dev; - struct ib_device *ibdev = &dev->ibdev; + struct ib_device *ibdev = &dev->rdi.ibdev; struct qib_pportdata *ppd = dd->pport; - unsigned i, lk_tab_size; + unsigned i, ctxt; int ret; - dev->qp_table_size = ib_qib_qp_table_size; get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); - dev->qp_table = kmalloc_array( - dev->qp_table_size, - sizeof(*dev->qp_table), - GFP_KERNEL); - if (!dev->qp_table) { - ret = -ENOMEM; - goto err_qpt; - } - for (i = 0; i < dev->qp_table_size; i++) - RCU_INIT_POINTER(dev->qp_table[i], NULL); - for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); /* Only need to initialize non-zero fields. */ - spin_lock_init(&dev->qpt_lock); - spin_lock_init(&dev->n_pds_lock); - spin_lock_init(&dev->n_ahs_lock); - spin_lock_init(&dev->n_cqs_lock); - spin_lock_init(&dev->n_qps_lock); - spin_lock_init(&dev->n_srqs_lock); - spin_lock_init(&dev->n_mcast_grps_lock); - init_timer(&dev->mem_timer); - dev->mem_timer.function = mem_timer; - dev->mem_timer.data = (unsigned long) dev; - - qib_init_qpn_table(dd, &dev->qpn_table); + setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev); + + qpt_mask = dd->qpn_mask; - /* - * The top ib_qib_lkey_table_size bits are used to index the - * table. The lower 8 bits can be owned by the user (copied from - * the LKEY). The remaining bits act as a generation number or tag. - */ - spin_lock_init(&dev->lk_table.lock); - /* insure generation is at least 4 bits see keys.c */ - if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) { - qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", - ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS); - ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS; - } - dev->lk_table.max = 1 << ib_qib_lkey_table_size; - lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); - dev->lk_table.table = (struct qib_mregion __rcu **) - vmalloc(lk_tab_size); - if (dev->lk_table.table == NULL) { - ret = -ENOMEM; - goto err_lk; - } - RCU_INIT_POINTER(dev->dma_mr, NULL); - for (i = 0; i < dev->lk_table.max; i++) - RCU_INIT_POINTER(dev->lk_table.table[i], NULL); - INIT_LIST_HEAD(&dev->pending_mmaps); - spin_lock_init(&dev->pending_lock); - dev->mmap_offset = PAGE_SIZE; - spin_lock_init(&dev->mmap_offset_lock); INIT_LIST_HEAD(&dev->piowait); INIT_LIST_HEAD(&dev->dmawait); INIT_LIST_HEAD(&dev->txwait); @@ -2194,110 +1647,91 @@ int qib_register_ib_device(struct qib_devdata *dd) strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); ibdev->owner = THIS_MODULE; ibdev->node_guid = ppd->guid; - ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; - ibdev->uverbs_cmd_mask = - (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | - (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | - (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | - (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | - (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | - (1ull << IB_USER_VERBS_CMD_CREATE_AH) | - (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | - (1ull << IB_USER_VERBS_CMD_QUERY_AH) | - (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | - (1ull << IB_USER_VERBS_CMD_REG_MR) | - (1ull << IB_USER_VERBS_CMD_DEREG_MR) | - (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | - (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | - (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | - (1ull << IB_USER_VERBS_CMD_POLL_CQ) | - (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | - (1ull << IB_USER_VERBS_CMD_CREATE_QP) | - (1ull << IB_USER_VERBS_CMD_QUERY_QP) | - (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | - (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | - (1ull << IB_USER_VERBS_CMD_POST_SEND) | - (1ull << IB_USER_VERBS_CMD_POST_RECV) | - (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | - (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | - (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | - (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | - (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | - (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); - ibdev->node_type = RDMA_NODE_IB_CA; ibdev->phys_port_cnt = dd->num_pports; - ibdev->num_comp_vectors = 1; ibdev->dma_device = &dd->pcidev->dev; - ibdev->query_device = qib_query_device; ibdev->modify_device = qib_modify_device; - ibdev->query_port = qib_query_port; - ibdev->modify_port = qib_modify_port; - ibdev->query_pkey = qib_query_pkey; - ibdev->query_gid = qib_query_gid; - ibdev->alloc_ucontext = qib_alloc_ucontext; - ibdev->dealloc_ucontext = qib_dealloc_ucontext; - ibdev->alloc_pd = qib_alloc_pd; - ibdev->dealloc_pd = qib_dealloc_pd; - ibdev->create_ah = qib_create_ah; - ibdev->destroy_ah = qib_destroy_ah; - ibdev->modify_ah = qib_modify_ah; - ibdev->query_ah = qib_query_ah; - ibdev->create_srq = qib_create_srq; - ibdev->modify_srq = qib_modify_srq; - ibdev->query_srq = qib_query_srq; - ibdev->destroy_srq = qib_destroy_srq; - ibdev->create_qp = qib_create_qp; - ibdev->modify_qp = qib_modify_qp; - ibdev->query_qp = qib_query_qp; - ibdev->destroy_qp = qib_destroy_qp; - ibdev->post_send = qib_post_send; - ibdev->post_recv = qib_post_receive; - ibdev->post_srq_recv = qib_post_srq_receive; - ibdev->create_cq = qib_create_cq; - ibdev->destroy_cq = qib_destroy_cq; - ibdev->resize_cq = qib_resize_cq; - ibdev->poll_cq = qib_poll_cq; - ibdev->req_notify_cq = qib_req_notify_cq; - ibdev->get_dma_mr = qib_get_dma_mr; - ibdev->reg_user_mr = qib_reg_user_mr; - ibdev->dereg_mr = qib_dereg_mr; - ibdev->alloc_mr = qib_alloc_mr; - ibdev->map_mr_sg = qib_map_mr_sg; - ibdev->alloc_fmr = qib_alloc_fmr; - ibdev->map_phys_fmr = qib_map_phys_fmr; - ibdev->unmap_fmr = qib_unmap_fmr; - ibdev->dealloc_fmr = qib_dealloc_fmr; - ibdev->attach_mcast = qib_multicast_attach; - ibdev->detach_mcast = qib_multicast_detach; ibdev->process_mad = qib_process_mad; - ibdev->mmap = qib_mmap; - ibdev->dma_ops = &qib_dma_mapping_ops; - ibdev->get_port_immutable = qib_port_immutable; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), "Intel Infiniband HCA %s", init_utsname()->nodename); - ret = ib_register_device(ibdev, qib_create_port_files); - if (ret) - goto err_reg; + /* + * Fill in rvt info object. + */ + dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files; + dd->verbs_dev.rdi.driver_f.get_card_name = qib_get_card_name; + dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev; + dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah; + dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe; + dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah; + dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn; + dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc; + dd->verbs_dev.rdi.driver_f.qp_priv_free = qib_qp_priv_free; + dd->verbs_dev.rdi.driver_f.free_all_qps = qib_free_all_qps; + dd->verbs_dev.rdi.driver_f.notify_qp_reset = qib_notify_qp_reset; + dd->verbs_dev.rdi.driver_f.do_send = qib_do_send; + dd->verbs_dev.rdi.driver_f.schedule_send = qib_schedule_send; + dd->verbs_dev.rdi.driver_f.quiesce_qp = qib_quiesce_qp; + dd->verbs_dev.rdi.driver_f.stop_send_queue = qib_stop_send_queue; + dd->verbs_dev.rdi.driver_f.flush_qp_waiters = qib_flush_qp_waiters; + dd->verbs_dev.rdi.driver_f.notify_error_qp = qib_notify_error_qp; + dd->verbs_dev.rdi.driver_f.mtu_to_path_mtu = qib_mtu_to_path_mtu; + dd->verbs_dev.rdi.driver_f.mtu_from_qp = qib_mtu_from_qp; + dd->verbs_dev.rdi.driver_f.get_pmtu_from_attr = qib_get_pmtu_from_attr; + dd->verbs_dev.rdi.driver_f.schedule_send_no_lock = _qib_schedule_send; + dd->verbs_dev.rdi.driver_f.query_port_state = qib_query_port; + dd->verbs_dev.rdi.driver_f.shut_down_port = qib_shut_down_port; + dd->verbs_dev.rdi.driver_f.cap_mask_chg = qib_cap_mask_chg; + dd->verbs_dev.rdi.driver_f.notify_create_mad_agent = + qib_notify_create_mad_agent; + dd->verbs_dev.rdi.driver_f.notify_free_mad_agent = + qib_notify_free_mad_agent; + + dd->verbs_dev.rdi.dparms.max_rdma_atomic = QIB_MAX_RDMA_ATOMIC; + dd->verbs_dev.rdi.driver_f.get_guid_be = qib_get_guid_be; + dd->verbs_dev.rdi.dparms.lkey_table_size = qib_lkey_table_size; + dd->verbs_dev.rdi.dparms.qp_table_size = ib_qib_qp_table_size; + dd->verbs_dev.rdi.dparms.qpn_start = 1; + dd->verbs_dev.rdi.dparms.qpn_res_start = QIB_KD_QP; + dd->verbs_dev.rdi.dparms.qpn_res_end = QIB_KD_QP; /* Reserve one QP */ + dd->verbs_dev.rdi.dparms.qpn_inc = 1; + dd->verbs_dev.rdi.dparms.qos_shift = 1; + dd->verbs_dev.rdi.dparms.psn_mask = QIB_PSN_MASK; + dd->verbs_dev.rdi.dparms.psn_shift = QIB_PSN_SHIFT; + dd->verbs_dev.rdi.dparms.psn_modify_mask = QIB_PSN_MASK; + dd->verbs_dev.rdi.dparms.nports = dd->num_pports; + dd->verbs_dev.rdi.dparms.npkeys = qib_get_npkeys(dd); + dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id; + dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB; + dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE; + + snprintf(dd->verbs_dev.rdi.dparms.cq_name, + sizeof(dd->verbs_dev.rdi.dparms.cq_name), + "qib_cq%d", dd->unit); + + qib_fill_device_attr(dd); + + ppd = dd->pport; + for (i = 0; i < dd->num_pports; i++, ppd++) { + ctxt = ppd->hw_pidx; + rvt_init_port(&dd->verbs_dev.rdi, + &ppd->ibport_data.rvp, + i, + dd->rcd[ctxt]->pkeys); + } - ret = qib_create_agents(dev); + ret = rvt_register_device(&dd->verbs_dev.rdi); if (ret) - goto err_agents; + goto err_tx; ret = qib_verbs_register_sysfs(dd); if (ret) goto err_class; - goto bail; + return ret; err_class: - qib_free_agents(dev); -err_agents: - ib_unregister_device(ibdev); -err_reg: + rvt_unregister_device(&dd->verbs_dev.rdi); err_tx: while (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; @@ -2313,27 +1747,17 @@ err_tx: sizeof(struct qib_pio_header), dev->pio_hdrs, dev->pio_hdrs_phys); err_hdrs: - vfree(dev->lk_table.table); -err_lk: - kfree(dev->qp_table); -err_qpt: qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); -bail: return ret; } void qib_unregister_ib_device(struct qib_devdata *dd) { struct qib_ibdev *dev = &dd->verbs_dev; - struct ib_device *ibdev = &dev->ibdev; - u32 qps_inuse; - unsigned lk_tab_size; qib_verbs_unregister_sysfs(dd); - qib_free_agents(dev); - - ib_unregister_device(ibdev); + rvt_unregister_device(&dd->verbs_dev.rdi); if (!list_empty(&dev->piowait)) qib_dev_err(dd, "piowait list not empty!\n"); @@ -2343,16 +1767,8 @@ void qib_unregister_ib_device(struct qib_devdata *dd) qib_dev_err(dd, "txwait list not empty!\n"); if (!list_empty(&dev->memwait)) qib_dev_err(dd, "memwait list not empty!\n"); - if (dev->dma_mr) - qib_dev_err(dd, "DMA MR not NULL!\n"); - - qps_inuse = qib_free_all_qps(dd); - if (qps_inuse) - qib_dev_err(dd, "QP memory leak! %u still in use\n", - qps_inuse); del_timer_sync(&dev->mem_timer); - qib_free_qpn_table(&dev->qpn_table); while (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; struct qib_verbs_txreq *tx; @@ -2366,21 +1782,36 @@ void qib_unregister_ib_device(struct qib_devdata *dd) dd->pport->sdma_descq_cnt * sizeof(struct qib_pio_header), dev->pio_hdrs, dev->pio_hdrs_phys); - lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); - vfree(dev->lk_table.table); - kfree(dev->qp_table); } -/* - * This must be called with s_lock held. +/** + * _qib_schedule_send - schedule progress + * @qp - the qp + * + * This schedules progress w/o regard to the s_flags. + * + * It is only used in post send, which doesn't hold + * the s_lock. */ -void qib_schedule_send(struct qib_qp *qp) +void _qib_schedule_send(struct rvt_qp *qp) { - if (qib_send_ok(qp)) { - struct qib_ibport *ibp = - to_iport(qp->ibqp.device, qp->port_num); - struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_ibport *ibp = + to_iport(qp->ibqp.device, qp->port_num); + struct qib_pportdata *ppd = ppd_from_ibp(ibp); + struct qib_qp_priv *priv = qp->priv; - queue_work(ppd->qib_wq, &qp->s_work); - } + queue_work(ppd->qib_wq, &priv->s_work); +} + +/** + * qib_schedule_send - schedule progress + * @qp - the qp + * + * This schedules qp progress. The s_lock + * should be held. + */ +void qib_schedule_send(struct rvt_qp *qp) +{ + if (qib_send_ok(qp)) + _qib_schedule_send(qp); } diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 6c5e77753..4b76a8d59 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -45,6 +45,8 @@ #include <linux/completion.h> #include <rdma/ib_pack.h> #include <rdma/ib_user_verbs.h> +#include <rdma/rdma_vt.h> +#include <rdma/rdmavt_cq.h> struct qib_ctxtdata; struct qib_pportdata; @@ -53,9 +55,7 @@ struct qib_verbs_txreq; #define QIB_MAX_RDMA_ATOMIC 16 #define QIB_GUIDS_PER_PORT 5 - -#define QPN_MAX (1 << 24) -#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) +#define QIB_PSN_SHIFT 8 /* * Increment this value if any changes that break userspace ABI @@ -63,12 +63,6 @@ struct qib_verbs_txreq; */ #define QIB_UVERBS_ABI_VERSION 2 -/* - * Define an ib_cq_notify value that is not valid so we know when CQ - * notifications are armed. - */ -#define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) - #define IB_SEQ_NAK (3 << 29) /* AETH NAK opcode values */ @@ -79,17 +73,6 @@ struct qib_verbs_txreq; #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 #define IB_NAK_INVALID_RD_REQUEST 0x64 -/* Flags for checking QP state (see ib_qib_state_ops[]) */ -#define QIB_POST_SEND_OK 0x01 -#define QIB_POST_RECV_OK 0x02 -#define QIB_PROCESS_RECV_OK 0x04 -#define QIB_PROCESS_SEND_OK 0x08 -#define QIB_PROCESS_NEXT_SEND_OK 0x10 -#define QIB_FLUSH_SEND 0x20 -#define QIB_FLUSH_RECV 0x40 -#define QIB_PROCESS_OR_FLUSH_SEND \ - (QIB_PROCESS_SEND_OK | QIB_FLUSH_SEND) - /* IB Performance Manager status values */ #define IB_PMA_SAMPLE_STATUS_DONE 0x00 #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 @@ -203,468 +186,21 @@ struct qib_pio_header { } __packed; /* - * There is one struct qib_mcast for each multicast GID. - * All attached QPs are then stored as a list of - * struct qib_mcast_qp. + * qib specific data structure that will be hidden from rvt after the queue pair + * is made common. */ -struct qib_mcast_qp { - struct list_head list; - struct qib_qp *qp; -}; - -struct qib_mcast { - struct rb_node rb_node; - union ib_gid mgid; - struct list_head qp_list; - wait_queue_head_t wait; - atomic_t refcount; - int n_attached; -}; - -/* Protection domain */ -struct qib_pd { - struct ib_pd ibpd; - int user; /* non-zero if created from user space */ -}; - -/* Address Handle */ -struct qib_ah { - struct ib_ah ibah; - struct ib_ah_attr attr; - atomic_t refcount; -}; - -/* - * This structure is used by qib_mmap() to validate an offset - * when an mmap() request is made. The vm_area_struct then uses - * this as its vm_private_data. - */ -struct qib_mmap_info { - struct list_head pending_mmaps; - struct ib_ucontext *context; - void *obj; - __u64 offset; - struct kref ref; - unsigned size; -}; - -/* - * This structure is used to contain the head pointer, tail pointer, - * and completion queue entries as a single memory allocation so - * it can be mmap'ed into user space. - */ -struct qib_cq_wc { - u32 head; /* index of next entry to fill */ - u32 tail; /* index of next ib_poll_cq() entry */ - union { - /* these are actually size ibcq.cqe + 1 */ - struct ib_uverbs_wc uqueue[0]; - struct ib_wc kqueue[0]; - }; -}; - -/* - * The completion queue structure. - */ -struct qib_cq { - struct ib_cq ibcq; - struct kthread_work comptask; - struct qib_devdata *dd; - spinlock_t lock; /* protect changes in this struct */ - u8 notify; - u8 triggered; - struct qib_cq_wc *queue; - struct qib_mmap_info *ip; -}; - -/* - * A segment is a linear region of low physical memory. - * XXX Maybe we should use phys addr here and kmap()/kunmap(). - * Used by the verbs layer. - */ -struct qib_seg { - void *vaddr; - size_t length; -}; - -/* The number of qib_segs that fit in a page. */ -#define QIB_SEGSZ (PAGE_SIZE / sizeof(struct qib_seg)) - -struct qib_segarray { - struct qib_seg segs[QIB_SEGSZ]; -}; - -struct qib_mregion { - struct ib_pd *pd; /* shares refcnt of ibmr.pd */ - u64 user_base; /* User's address for this region */ - u64 iova; /* IB start address of this region */ - size_t length; - u32 lkey; - u32 offset; /* offset (bytes) to start of region */ - int access_flags; - u32 max_segs; /* number of qib_segs in all the arrays */ - u32 mapsz; /* size of the map array */ - u8 page_shift; /* 0 - non unform/non powerof2 sizes */ - u8 lkey_published; /* in global table */ - struct completion comp; /* complete when refcount goes to zero */ - struct rcu_head list; - atomic_t refcount; - struct qib_segarray *map[0]; /* the segments */ -}; - -/* - * These keep track of the copy progress within a memory region. - * Used by the verbs layer. - */ -struct qib_sge { - struct qib_mregion *mr; - void *vaddr; /* kernel virtual address of segment */ - u32 sge_length; /* length of the SGE */ - u32 length; /* remaining length of the segment */ - u16 m; /* current index: mr->map[m] */ - u16 n; /* current index: mr->map[m]->segs[n] */ -}; - -/* Memory region */ -struct qib_mr { - struct ib_mr ibmr; - struct ib_umem *umem; - u64 *pages; - u32 npages; - struct qib_mregion mr; /* must be last */ -}; - -/* - * Send work request queue entry. - * The size of the sg_list is determined when the QP is created and stored - * in qp->s_max_sge. - */ -struct qib_swqe { - union { - struct ib_send_wr wr; /* don't use wr.sg_list */ - struct ib_ud_wr ud_wr; - struct ib_reg_wr reg_wr; - struct ib_rdma_wr rdma_wr; - struct ib_atomic_wr atomic_wr; - }; - u32 psn; /* first packet sequence number */ - u32 lpsn; /* last packet sequence number */ - u32 ssn; /* send sequence number */ - u32 length; /* total length of data in sg_list */ - struct qib_sge sg_list[0]; -}; - -/* - * Receive work request queue entry. - * The size of the sg_list is determined when the QP (or SRQ) is created - * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). - */ -struct qib_rwqe { - u64 wr_id; - u8 num_sge; - struct ib_sge sg_list[0]; -}; - -/* - * This structure is used to contain the head pointer, tail pointer, - * and receive work queue entries as a single memory allocation so - * it can be mmap'ed into user space. - * Note that the wq array elements are variable size so you can't - * just index into the array to get the N'th element; - * use get_rwqe_ptr() instead. - */ -struct qib_rwq { - u32 head; /* new work requests posted to the head */ - u32 tail; /* receives pull requests from here. */ - struct qib_rwqe wq[0]; -}; - -struct qib_rq { - struct qib_rwq *wq; - u32 size; /* size of RWQE array */ - u8 max_sge; - spinlock_t lock /* protect changes in this struct */ - ____cacheline_aligned_in_smp; -}; - -struct qib_srq { - struct ib_srq ibsrq; - struct qib_rq rq; - struct qib_mmap_info *ip; - /* send signal when number of RWQEs < limit */ - u32 limit; -}; - -struct qib_sge_state { - struct qib_sge *sg_list; /* next SGE to be used if any */ - struct qib_sge sge; /* progress state for the current SGE */ - u32 total_len; - u8 num_sge; -}; - -/* - * This structure holds the information that the send tasklet needs - * to send a RDMA read response or atomic operation. - */ -struct qib_ack_entry { - u8 opcode; - u8 sent; - u32 psn; - u32 lpsn; - union { - struct qib_sge rdma_sge; - u64 atomic_data; - }; -}; - -/* - * Variables prefixed with s_ are for the requester (sender). - * Variables prefixed with r_ are for the responder (receiver). - * Variables prefixed with ack_ are for responder replies. - * - * Common variables are protected by both r_rq.lock and s_lock in that order - * which only happens in modify_qp() or changing the QP 'state'. - */ -struct qib_qp { - struct ib_qp ibqp; - /* read mostly fields above and below */ - struct ib_ah_attr remote_ah_attr; - struct ib_ah_attr alt_ah_attr; - struct qib_qp __rcu *next; /* link list for QPN hash table */ - struct qib_swqe *s_wq; /* send work queue */ - struct qib_mmap_info *ip; - struct qib_ib_header *s_hdr; /* next packet header to send */ - unsigned long timeout_jiffies; /* computed from timeout */ - - enum ib_mtu path_mtu; - u32 remote_qpn; - u32 pmtu; /* decoded from path_mtu */ - u32 qkey; /* QKEY for this QP (for UD or RD) */ - u32 s_size; /* send work queue size */ - u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ - - u8 state; /* QP state */ - u8 qp_access_flags; - u8 alt_timeout; /* Alternate path timeout for this QP */ - u8 timeout; /* Timeout for this QP */ - u8 s_srate; - u8 s_mig_state; - u8 port_num; - u8 s_pkey_index; /* PKEY index to use */ - u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ - u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ - u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ - u8 s_retry_cnt; /* number of times to retry */ - u8 s_rnr_retry_cnt; - u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ - u8 s_max_sge; /* size of s_wq->sg_list */ - u8 s_draining; - - /* start of read/write fields */ - - atomic_t refcount ____cacheline_aligned_in_smp; - wait_queue_head_t wait; - - - struct qib_ack_entry s_ack_queue[QIB_MAX_RDMA_ATOMIC + 1] - ____cacheline_aligned_in_smp; - struct qib_sge_state s_rdma_read_sge; - - spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ - unsigned long r_aflags; - u64 r_wr_id; /* ID for current receive WQE */ - u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ - u32 r_len; /* total length of r_sge */ - u32 r_rcv_len; /* receive data len processed */ - u32 r_psn; /* expected rcv packet sequence number */ - u32 r_msn; /* message sequence number */ - - u8 r_state; /* opcode of last packet received */ - u8 r_flags; - u8 r_head_ack_queue; /* index into s_ack_queue[] */ - - struct list_head rspwait; /* link for waititing to respond */ - - struct qib_sge_state r_sge; /* current receive data */ - struct qib_rq r_rq; /* receive work queue */ - - spinlock_t s_lock ____cacheline_aligned_in_smp; - struct qib_sge_state *s_cur_sge; - u32 s_flags; - struct qib_verbs_txreq *s_tx; - struct qib_swqe *s_wqe; - struct qib_sge_state s_sge; /* current send request data */ - struct qib_mregion *s_rdma_mr; - atomic_t s_dma_busy; - u32 s_cur_size; /* size of send packet in bytes */ - u32 s_len; /* total length of s_sge */ - u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ - u32 s_next_psn; /* PSN for next request */ - u32 s_last_psn; /* last response PSN processed */ - u32 s_sending_psn; /* lowest PSN that is being sent */ - u32 s_sending_hpsn; /* highest PSN that is being sent */ - u32 s_psn; /* current packet sequence number */ - u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ - u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ - u32 s_head; /* new entries added here */ - u32 s_tail; /* next entry to process */ - u32 s_cur; /* current work queue entry */ - u32 s_acked; /* last un-ACK'ed entry */ - u32 s_last; /* last completed entry */ - u32 s_ssn; /* SSN of tail entry */ - u32 s_lsn; /* limit sequence number (credit) */ - u16 s_hdrwords; /* size of s_hdr in 32 bit words */ - u16 s_rdma_ack_cnt; - u8 s_state; /* opcode of last packet sent */ - u8 s_ack_state; /* opcode of packet to ACK */ - u8 s_nak_state; /* non-zero if NAK is pending */ - u8 r_nak_state; /* non-zero if NAK is pending */ - u8 s_retry; /* requester retry counter */ - u8 s_rnr_retry; /* requester RNR retry counter */ - u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ - u8 s_tail_ack_queue; /* index into s_ack_queue[] */ - - struct qib_sge_state s_ack_rdma_sge; - struct timer_list s_timer; +struct qib_qp_priv { + struct qib_ib_header *s_hdr; /* next packet header to send */ struct list_head iowait; /* link for wait PIO buf */ - + atomic_t s_dma_busy; + struct qib_verbs_txreq *s_tx; struct work_struct s_work; - wait_queue_head_t wait_dma; - - struct qib_sge r_sg_list[0] /* verified SGEs */ - ____cacheline_aligned_in_smp; + struct rvt_qp *owner; }; -/* - * Atomic bit definitions for r_aflags. - */ -#define QIB_R_WRID_VALID 0 -#define QIB_R_REWIND_SGE 1 - -/* - * Bit definitions for r_flags. - */ -#define QIB_R_REUSE_SGE 0x01 -#define QIB_R_RDMAR_SEQ 0x02 -#define QIB_R_RSP_NAK 0x04 -#define QIB_R_RSP_SEND 0x08 -#define QIB_R_COMM_EST 0x10 - -/* - * Bit definitions for s_flags. - * - * QIB_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled - * QIB_S_BUSY - send tasklet is processing the QP - * QIB_S_TIMER - the RC retry timer is active - * QIB_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics - * QIB_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs - * before processing the next SWQE - * QIB_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete - * before processing the next SWQE - * QIB_S_WAIT_RNR - waiting for RNR timeout - * QIB_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE - * QIB_S_WAIT_DMA - waiting for send DMA queue to drain before generating - * next send completion entry not via send DMA - * QIB_S_WAIT_PIO - waiting for a send buffer to be available - * QIB_S_WAIT_TX - waiting for a struct qib_verbs_txreq to be available - * QIB_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available - * QIB_S_WAIT_KMEM - waiting for kernel memory to be available - * QIB_S_WAIT_PSN - waiting for a packet to exit the send DMA queue - * QIB_S_WAIT_ACK - waiting for an ACK packet before sending more requests - * QIB_S_SEND_ONE - send one packet, request ACK, then wait for ACK - */ -#define QIB_S_SIGNAL_REQ_WR 0x0001 -#define QIB_S_BUSY 0x0002 -#define QIB_S_TIMER 0x0004 -#define QIB_S_RESP_PENDING 0x0008 -#define QIB_S_ACK_PENDING 0x0010 -#define QIB_S_WAIT_FENCE 0x0020 -#define QIB_S_WAIT_RDMAR 0x0040 -#define QIB_S_WAIT_RNR 0x0080 -#define QIB_S_WAIT_SSN_CREDIT 0x0100 -#define QIB_S_WAIT_DMA 0x0200 -#define QIB_S_WAIT_PIO 0x0400 -#define QIB_S_WAIT_TX 0x0800 -#define QIB_S_WAIT_DMA_DESC 0x1000 -#define QIB_S_WAIT_KMEM 0x2000 -#define QIB_S_WAIT_PSN 0x4000 -#define QIB_S_WAIT_ACK 0x8000 -#define QIB_S_SEND_ONE 0x10000 -#define QIB_S_UNLIMITED_CREDIT 0x20000 - -/* - * Wait flags that would prevent any packet type from being sent. - */ -#define QIB_S_ANY_WAIT_IO (QIB_S_WAIT_PIO | QIB_S_WAIT_TX | \ - QIB_S_WAIT_DMA_DESC | QIB_S_WAIT_KMEM) - -/* - * Wait flags that would prevent send work requests from making progress. - */ -#define QIB_S_ANY_WAIT_SEND (QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | \ - QIB_S_WAIT_RNR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_DMA | \ - QIB_S_WAIT_PSN | QIB_S_WAIT_ACK) - -#define QIB_S_ANY_WAIT (QIB_S_ANY_WAIT_IO | QIB_S_ANY_WAIT_SEND) - #define QIB_PSN_CREDIT 16 -/* - * Since struct qib_swqe is not a fixed size, we can't simply index into - * struct qib_qp.s_wq. This function does the array index computation. - */ -static inline struct qib_swqe *get_swqe_ptr(struct qib_qp *qp, - unsigned n) -{ - return (struct qib_swqe *)((char *)qp->s_wq + - (sizeof(struct qib_swqe) + - qp->s_max_sge * - sizeof(struct qib_sge)) * n); -} - -/* - * Since struct qib_rwqe is not a fixed size, we can't simply index into - * struct qib_rwq.wq. This function does the array index computation. - */ -static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) -{ - return (struct qib_rwqe *) - ((char *) rq->wq->wq + - (sizeof(struct qib_rwqe) + - rq->max_sge * sizeof(struct ib_sge)) * n); -} - -/* - * QPN-map pages start out as NULL, they get allocated upon - * first use and are never deallocated. This way, - * large bitmaps are not allocated unless large numbers of QPs are used. - */ -struct qpn_map { - void *page; -}; - -struct qib_qpn_table { - spinlock_t lock; /* protect changes in this struct */ - unsigned flags; /* flags for QP0/1 allocated for each port */ - u32 last; /* last QP number allocated */ - u32 nmaps; /* size of the map table */ - u16 limit; - u16 mask; - /* bit map of free QP numbers other than 0/1 */ - struct qpn_map map[QPNMAP_ENTRIES]; -}; - -#define MAX_LKEY_TABLE_BITS 23 - -struct qib_lkey_table { - spinlock_t lock; /* protect changes in this struct */ - u32 next; /* next unused index (speeds search) */ - u32 gen; /* generation count */ - u32 max; /* size of the table */ - struct qib_mregion __rcu **table; -}; - struct qib_opcode_stats { u64 n_packets; /* number of packets */ u64 n_bytes; /* total number of bytes */ @@ -682,21 +218,9 @@ struct qib_pma_counters { }; struct qib_ibport { - struct qib_qp __rcu *qp0; - struct qib_qp __rcu *qp1; - struct ib_mad_agent *send_agent; /* agent for SMI (traps) */ - struct qib_ah *sm_ah; - struct qib_ah *smi_ah; - struct rb_root mcast_tree; - spinlock_t lock; /* protect changes in this struct */ - - /* non-zero when timer is set */ - unsigned long mkey_lease_timeout; - unsigned long trap_timeout; - __be64 gid_prefix; /* in network order */ - __be64 mkey; + struct rvt_ibport rvp; + struct rvt_ah *smi_ah; __be64 guids[QIB_GUIDS_PER_PORT - 1]; /* writable GUIDs */ - u64 tid; /* TID for traps */ struct qib_pma_counters __percpu *pmastats; u64 z_unicast_xmit; /* starting count for PMA */ u64 z_unicast_rcv; /* starting count for PMA */ @@ -715,82 +239,25 @@ struct qib_ibport { u32 z_local_link_integrity_errors; /* starting count for PMA */ u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ u32 z_vl15_dropped; /* starting count for PMA */ - u32 n_rc_resends; - u32 n_rc_acks; - u32 n_rc_qacks; - u32 n_rc_delayed_comp; - u32 n_seq_naks; - u32 n_rdma_seq; - u32 n_rnr_naks; - u32 n_other_naks; - u32 n_loop_pkts; - u32 n_pkt_drops; - u32 n_vl15_dropped; - u32 n_rc_timeouts; - u32 n_dmawait; - u32 n_unaligned; - u32 n_rc_dupreq; - u32 n_rc_seqnak; - u32 port_cap_flags; - u32 pma_sample_start; - u32 pma_sample_interval; - __be16 pma_counter_select[5]; - u16 pma_tag; - u16 pkey_violations; - u16 qkey_violations; - u16 mkey_violations; - u16 mkey_lease_period; - u16 sm_lid; - u16 repress_traps; - u8 sm_sl; - u8 mkeyprot; - u8 subnet_timeout; - u8 vl_high_limit; u8 sl_to_vl[16]; - }; - struct qib_ibdev { - struct ib_device ibdev; - struct list_head pending_mmaps; - spinlock_t mmap_offset_lock; /* protect mmap_offset */ - u32 mmap_offset; - struct qib_mregion __rcu *dma_mr; - - /* QP numbers are shared by all IB ports */ - struct qib_qpn_table qpn_table; - struct qib_lkey_table lk_table; + struct rvt_dev_info rdi; + struct list_head piowait; /* list for wait PIO buf */ struct list_head dmawait; /* list for wait DMA */ struct list_head txwait; /* list for wait qib_verbs_txreq */ struct list_head memwait; /* list for wait kernel memory */ struct list_head txreq_free; struct timer_list mem_timer; - struct qib_qp __rcu **qp_table; struct qib_pio_header *pio_hdrs; dma_addr_t pio_hdrs_phys; - /* list of QPs waiting for RNR timer */ - spinlock_t pending_lock; /* protect wait lists, PMA counters, etc. */ - u32 qp_table_size; /* size of the hash table */ u32 qp_rnd; /* random bytes for hash */ - spinlock_t qpt_lock; u32 n_piowait; u32 n_txwait; - u32 n_pds_allocated; /* number of PDs allocated for device */ - spinlock_t n_pds_lock; - u32 n_ahs_allocated; /* number of AHs allocated for device */ - spinlock_t n_ahs_lock; - u32 n_cqs_allocated; /* number of CQs allocated for device */ - spinlock_t n_cqs_lock; - u32 n_qps_allocated; /* number of QPs allocated for device */ - spinlock_t n_qps_lock; - u32 n_srqs_allocated; /* number of SRQs allocated for device */ - spinlock_t n_srqs_lock; - u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ - spinlock_t n_mcast_grps_lock; #ifdef CONFIG_DEBUG_FS /* per HCA debugfs */ struct dentry *qib_ibdev_dbg; @@ -813,56 +280,27 @@ struct qib_verbs_counters { u32 vl15_dropped; }; -static inline struct qib_mr *to_imr(struct ib_mr *ibmr) -{ - return container_of(ibmr, struct qib_mr, ibmr); -} - -static inline struct qib_pd *to_ipd(struct ib_pd *ibpd) -{ - return container_of(ibpd, struct qib_pd, ibpd); -} - -static inline struct qib_ah *to_iah(struct ib_ah *ibah) -{ - return container_of(ibah, struct qib_ah, ibah); -} - -static inline struct qib_cq *to_icq(struct ib_cq *ibcq) -{ - return container_of(ibcq, struct qib_cq, ibcq); -} - -static inline struct qib_srq *to_isrq(struct ib_srq *ibsrq) -{ - return container_of(ibsrq, struct qib_srq, ibsrq); -} - -static inline struct qib_qp *to_iqp(struct ib_qp *ibqp) -{ - return container_of(ibqp, struct qib_qp, ibqp); -} - static inline struct qib_ibdev *to_idev(struct ib_device *ibdev) { - return container_of(ibdev, struct qib_ibdev, ibdev); + struct rvt_dev_info *rdi; + + rdi = container_of(ibdev, struct rvt_dev_info, ibdev); + return container_of(rdi, struct qib_ibdev, rdi); } /* * Send if not busy or waiting for I/O and either * a RC response is pending or we can process send work requests. */ -static inline int qib_send_ok(struct qib_qp *qp) +static inline int qib_send_ok(struct rvt_qp *qp) { - return !(qp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT_IO)) && - (qp->s_hdrwords || (qp->s_flags & QIB_S_RESP_PENDING) || - !(qp->s_flags & QIB_S_ANY_WAIT_SEND)); + return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) && + (qp->s_hdrwords || (qp->s_flags & RVT_S_RESP_PENDING) || + !(qp->s_flags & RVT_S_ANY_WAIT_SEND)); } -/* - * This must be called with s_lock held. - */ -void qib_schedule_send(struct qib_qp *qp); +void _qib_schedule_send(struct rvt_qp *qp); +void qib_schedule_send(struct rvt_qp *qp); static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) { @@ -878,7 +316,7 @@ static inline int qib_pkey_ok(u16 pkey1, u16 pkey2) void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, u32 qp1, u32 qp2, __be16 lid1, __be16 lid2); -void qib_cap_mask_chg(struct qib_ibport *ibp); +void qib_cap_mask_chg(struct rvt_dev_info *rdi, u8 port_num); void qib_sys_guid_chg(struct qib_ibport *ibp); void qib_node_desc_chg(struct qib_ibport *ibp); int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, @@ -886,8 +324,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, const struct ib_mad_hdr *in, size_t in_mad_size, struct ib_mad_hdr *out, size_t *out_mad_size, u16 *out_mad_pkey_index); -int qib_create_agents(struct qib_ibdev *dev); -void qib_free_agents(struct qib_ibdev *dev); +void qib_notify_create_mad_agent(struct rvt_dev_info *rdi, int port_idx); +void qib_notify_free_mad_agent(struct rvt_dev_info *rdi, int port_idx); /* * Compare the lower 24 bits of the two values. @@ -898,8 +336,6 @@ static inline int qib_cmp24(u32 a, u32 b) return (((int) a) - ((int) b)) << 8; } -struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid); - int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, u64 *rwords, u64 *spkts, u64 *rpkts, u64 *xmit_wait); @@ -907,35 +343,17 @@ int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, int qib_get_counters(struct qib_pportdata *ppd, struct qib_verbs_counters *cntrs); -int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); - -int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); - -int qib_mcast_tree_empty(struct qib_ibport *ibp); - -__be32 qib_compute_aeth(struct qib_qp *qp); - -struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn); - -struct ib_qp *qib_create_qp(struct ib_pd *ibpd, - struct ib_qp_init_attr *init_attr, - struct ib_udata *udata); - -int qib_destroy_qp(struct ib_qp *ibqp); - -int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err); - -int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_udata *udata); - -int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, - int attr_mask, struct ib_qp_init_attr *init_attr); - -unsigned qib_free_all_qps(struct qib_devdata *dd); +__be32 qib_compute_aeth(struct rvt_qp *qp); -void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt); - -void qib_free_qpn_table(struct qib_qpn_table *qpt); +/* + * Functions provided by qib driver for rdmavt to use + */ +unsigned qib_free_all_qps(struct rvt_dev_info *rdi); +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); +void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); +void qib_notify_qp_reset(struct rvt_qp *qp); +int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, + enum ib_qp_type type, u8 port, gfp_t gfp); #ifdef CONFIG_DEBUG_FS @@ -949,7 +367,7 @@ void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter); #endif -void qib_get_credit(struct qib_qp *qp, u32 aeth); +void qib_get_credit(struct rvt_qp *qp, u32 aeth); unsigned qib_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult); @@ -957,166 +375,66 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail); void qib_put_txreq(struct qib_verbs_txreq *tx); -int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, - u32 hdrwords, struct qib_sge_state *ss, u32 len); +int qib_verbs_send(struct rvt_qp *qp, struct qib_ib_header *hdr, + u32 hdrwords, struct rvt_sge_state *ss, u32 len); -void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, +void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release); -void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release); +void qib_skip_sge(struct rvt_sge_state *ss, u32 length, int release); void qib_uc_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp); + int has_grh, void *data, u32 tlen, struct rvt_qp *qp); void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp); + int has_grh, void *data, u32 tlen, struct rvt_qp *qp); int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); +int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe); + struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid); void qib_rc_rnr_retry(unsigned long arg); -void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr); +void qib_rc_send_complete(struct rvt_qp *qp, struct qib_ib_header *hdr); -void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err); +void qib_rc_error(struct rvt_qp *qp, enum ib_wc_status err); -int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); +int qib_post_ud_send(struct rvt_qp *qp, struct ib_send_wr *wr); void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, - int has_grh, void *data, u32 tlen, struct qib_qp *qp); - -int qib_alloc_lkey(struct qib_mregion *mr, int dma_region); - -void qib_free_lkey(struct qib_mregion *mr); - -int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, - struct qib_sge *isge, struct ib_sge *sge, int acc); - -int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, - u32 len, u64 vaddr, u32 rkey, int acc); - -int qib_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, - struct ib_recv_wr **bad_wr); - -struct ib_srq *qib_create_srq(struct ib_pd *ibpd, - struct ib_srq_init_attr *srq_init_attr, - struct ib_udata *udata); - -int qib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, - enum ib_srq_attr_mask attr_mask, - struct ib_udata *udata); - -int qib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); - -int qib_destroy_srq(struct ib_srq *ibsrq); - -int qib_cq_init(struct qib_devdata *dd); - -void qib_cq_exit(struct qib_devdata *dd); - -void qib_cq_enter(struct qib_cq *cq, struct ib_wc *entry, int sig); - -int qib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); - -struct ib_cq *qib_create_cq(struct ib_device *ibdev, - const struct ib_cq_init_attr *attr, - struct ib_ucontext *context, - struct ib_udata *udata); - -int qib_destroy_cq(struct ib_cq *ibcq); - -int qib_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); - -int qib_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); - -struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc); - -struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, - u64 virt_addr, int mr_access_flags, - struct ib_udata *udata); - -int qib_dereg_mr(struct ib_mr *ibmr); - -struct ib_mr *qib_alloc_mr(struct ib_pd *pd, - enum ib_mr_type mr_type, - u32 max_entries); - -int qib_map_mr_sg(struct ib_mr *ibmr, - struct scatterlist *sg, - int sg_nents); - -int qib_reg_mr(struct qib_qp *qp, struct ib_reg_wr *wr); - -struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, - struct ib_fmr_attr *fmr_attr); - -int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, - int list_len, u64 iova); - -int qib_unmap_fmr(struct list_head *fmr_list); - -int qib_dealloc_fmr(struct ib_fmr *ibfmr); - -static inline void qib_get_mr(struct qib_mregion *mr) -{ - atomic_inc(&mr->refcount); -} + int has_grh, void *data, u32 tlen, struct rvt_qp *qp); void mr_rcu_callback(struct rcu_head *list); -static inline void qib_put_mr(struct qib_mregion *mr) -{ - if (unlikely(atomic_dec_and_test(&mr->refcount))) - call_rcu(&mr->list, mr_rcu_callback); -} - -static inline void qib_put_ss(struct qib_sge_state *ss) -{ - while (ss->num_sge) { - qib_put_mr(ss->sge.mr); - if (--ss->num_sge) - ss->sge = *ss->sg_list++; - } -} - - -void qib_release_mmap_info(struct kref *ref); +int qib_get_rwqe(struct rvt_qp *qp, int wr_id_only); -struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, - struct ib_ucontext *context, - void *obj); - -void qib_update_mmap_info(struct qib_ibdev *dev, struct qib_mmap_info *ip, - u32 size, void *obj); - -int qib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); - -int qib_get_rwqe(struct qib_qp *qp, int wr_id_only); - -void qib_migrate_qp(struct qib_qp *qp); +void qib_migrate_qp(struct rvt_qp *qp); int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr, - int has_grh, struct qib_qp *qp, u32 bth0); + int has_grh, struct rvt_qp *qp, u32 bth0); u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr, struct ib_global_route *grh, u32 hwords, u32 nwords); -void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr, +void qib_make_ruc_header(struct rvt_qp *qp, struct qib_other_headers *ohdr, u32 bth0, u32 bth2); -void qib_do_send(struct work_struct *work); +void _qib_do_send(struct work_struct *work); + +void qib_do_send(struct rvt_qp *qp); -void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, +void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status); -void qib_send_rc_ack(struct qib_qp *qp); +void qib_send_rc_ack(struct rvt_qp *qp); -int qib_make_rc_req(struct qib_qp *qp); +int qib_make_rc_req(struct rvt_qp *qp); -int qib_make_uc_req(struct qib_qp *qp); +int qib_make_uc_req(struct rvt_qp *qp); -int qib_make_ud_req(struct qib_qp *qp); +int qib_make_ud_req(struct rvt_qp *qp); int qib_register_ib_device(struct qib_devdata *); @@ -1150,11 +468,11 @@ extern const enum ib_wc_opcode ib_qib_wc_opcode[]; #define IB_PHYSPORTSTATE_CFG_ENH 0x10 #define IB_PHYSPORTSTATE_CFG_WAIT_ENH 0x13 -extern const int ib_qib_state_ops[]; +extern const int ib_rvt_state_ops[]; extern __be64 ib_qib_sys_image_guid; /* in network order */ -extern unsigned int ib_qib_lkey_table_size; +extern unsigned int ib_rvt_lkey_table_size; extern unsigned int ib_qib_max_cqes; @@ -1178,6 +496,4 @@ extern unsigned int ib_qib_max_srq_wrs; extern const u32 ib_qib_rnr_table[]; -extern struct ib_dma_mapping_ops qib_dma_mapping_ops; - #endif /* QIB_VERBS_H */ diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c deleted file mode 100644 index b2fb5286d..000000000 --- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c +++ /dev/null @@ -1,363 +0,0 @@ -/* - * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. - * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/rculist.h> - -#include "qib.h" - -/** - * qib_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct - * @qp: the QP to link - */ -static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) -{ - struct qib_mcast_qp *mqp; - - mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); - if (!mqp) - goto bail; - - mqp->qp = qp; - atomic_inc(&qp->refcount); - -bail: - return mqp; -} - -static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) -{ - struct qib_qp *qp = mqp->qp; - - /* Notify qib_destroy_qp() if it is waiting. */ - if (atomic_dec_and_test(&qp->refcount)) - wake_up(&qp->wait); - - kfree(mqp); -} - -/** - * qib_mcast_alloc - allocate the multicast GID structure - * @mgid: the multicast GID - * - * A list of QPs will be attached to this structure. - */ -static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) -{ - struct qib_mcast *mcast; - - mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); - if (!mcast) - goto bail; - - mcast->mgid = *mgid; - INIT_LIST_HEAD(&mcast->qp_list); - init_waitqueue_head(&mcast->wait); - atomic_set(&mcast->refcount, 0); - mcast->n_attached = 0; - -bail: - return mcast; -} - -static void qib_mcast_free(struct qib_mcast *mcast) -{ - struct qib_mcast_qp *p, *tmp; - - list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) - qib_mcast_qp_free(p); - - kfree(mcast); -} - -/** - * qib_mcast_find - search the global table for the given multicast GID - * @ibp: the IB port structure - * @mgid: the multicast GID to search for - * - * Returns NULL if not found. - * - * The caller is responsible for decrementing the reference count if found. - */ -struct qib_mcast *qib_mcast_find(struct qib_ibport *ibp, union ib_gid *mgid) -{ - struct rb_node *n; - unsigned long flags; - struct qib_mcast *mcast; - - spin_lock_irqsave(&ibp->lock, flags); - n = ibp->mcast_tree.rb_node; - while (n) { - int ret; - - mcast = rb_entry(n, struct qib_mcast, rb_node); - - ret = memcmp(mgid->raw, mcast->mgid.raw, - sizeof(union ib_gid)); - if (ret < 0) - n = n->rb_left; - else if (ret > 0) - n = n->rb_right; - else { - atomic_inc(&mcast->refcount); - spin_unlock_irqrestore(&ibp->lock, flags); - goto bail; - } - } - spin_unlock_irqrestore(&ibp->lock, flags); - - mcast = NULL; - -bail: - return mcast; -} - -/** - * qib_mcast_add - insert mcast GID into table and attach QP struct - * @mcast: the mcast GID table - * @mqp: the QP to attach - * - * Return zero if both were added. Return EEXIST if the GID was already in - * the table but the QP was added. Return ESRCH if the QP was already - * attached and neither structure was added. - */ -static int qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, - struct qib_mcast *mcast, struct qib_mcast_qp *mqp) -{ - struct rb_node **n = &ibp->mcast_tree.rb_node; - struct rb_node *pn = NULL; - int ret; - - spin_lock_irq(&ibp->lock); - - while (*n) { - struct qib_mcast *tmcast; - struct qib_mcast_qp *p; - - pn = *n; - tmcast = rb_entry(pn, struct qib_mcast, rb_node); - - ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, - sizeof(union ib_gid)); - if (ret < 0) { - n = &pn->rb_left; - continue; - } - if (ret > 0) { - n = &pn->rb_right; - continue; - } - - /* Search the QP list to see if this is already there. */ - list_for_each_entry_rcu(p, &tmcast->qp_list, list) { - if (p->qp == mqp->qp) { - ret = ESRCH; - goto bail; - } - } - if (tmcast->n_attached == ib_qib_max_mcast_qp_attached) { - ret = ENOMEM; - goto bail; - } - - tmcast->n_attached++; - - list_add_tail_rcu(&mqp->list, &tmcast->qp_list); - ret = EEXIST; - goto bail; - } - - spin_lock(&dev->n_mcast_grps_lock); - if (dev->n_mcast_grps_allocated == ib_qib_max_mcast_grps) { - spin_unlock(&dev->n_mcast_grps_lock); - ret = ENOMEM; - goto bail; - } - - dev->n_mcast_grps_allocated++; - spin_unlock(&dev->n_mcast_grps_lock); - - mcast->n_attached++; - - list_add_tail_rcu(&mqp->list, &mcast->qp_list); - - atomic_inc(&mcast->refcount); - rb_link_node(&mcast->rb_node, pn, n); - rb_insert_color(&mcast->rb_node, &ibp->mcast_tree); - - ret = 0; - -bail: - spin_unlock_irq(&ibp->lock); - - return ret; -} - -int qib_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) -{ - struct qib_qp *qp = to_iqp(ibqp); - struct qib_ibdev *dev = to_idev(ibqp->device); - struct qib_ibport *ibp; - struct qib_mcast *mcast; - struct qib_mcast_qp *mqp; - int ret; - - if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) { - ret = -EINVAL; - goto bail; - } - - /* - * Allocate data structures since its better to do this outside of - * spin locks and it will most likely be needed. - */ - mcast = qib_mcast_alloc(gid); - if (mcast == NULL) { - ret = -ENOMEM; - goto bail; - } - mqp = qib_mcast_qp_alloc(qp); - if (mqp == NULL) { - qib_mcast_free(mcast); - ret = -ENOMEM; - goto bail; - } - ibp = to_iport(ibqp->device, qp->port_num); - switch (qib_mcast_add(dev, ibp, mcast, mqp)) { - case ESRCH: - /* Neither was used: OK to attach the same QP twice. */ - qib_mcast_qp_free(mqp); - qib_mcast_free(mcast); - break; - - case EEXIST: /* The mcast wasn't used */ - qib_mcast_free(mcast); - break; - - case ENOMEM: - /* Exceeded the maximum number of mcast groups. */ - qib_mcast_qp_free(mqp); - qib_mcast_free(mcast); - ret = -ENOMEM; - goto bail; - - default: - break; - } - - ret = 0; - -bail: - return ret; -} - -int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) -{ - struct qib_qp *qp = to_iqp(ibqp); - struct qib_ibdev *dev = to_idev(ibqp->device); - struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num); - struct qib_mcast *mcast = NULL; - struct qib_mcast_qp *p, *tmp, *delp = NULL; - struct rb_node *n; - int last = 0; - int ret; - - if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) - return -EINVAL; - - spin_lock_irq(&ibp->lock); - - /* Find the GID in the mcast table. */ - n = ibp->mcast_tree.rb_node; - while (1) { - if (n == NULL) { - spin_unlock_irq(&ibp->lock); - return -EINVAL; - } - - mcast = rb_entry(n, struct qib_mcast, rb_node); - ret = memcmp(gid->raw, mcast->mgid.raw, - sizeof(union ib_gid)); - if (ret < 0) - n = n->rb_left; - else if (ret > 0) - n = n->rb_right; - else - break; - } - - /* Search the QP list. */ - list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { - if (p->qp != qp) - continue; - /* - * We found it, so remove it, but don't poison the forward - * link until we are sure there are no list walkers. - */ - list_del_rcu(&p->list); - mcast->n_attached--; - delp = p; - - /* If this was the last attached QP, remove the GID too. */ - if (list_empty(&mcast->qp_list)) { - rb_erase(&mcast->rb_node, &ibp->mcast_tree); - last = 1; - } - break; - } - - spin_unlock_irq(&ibp->lock); - /* QP not attached */ - if (!delp) - return -EINVAL; - /* - * Wait for any list walkers to finish before freeing the - * list element. - */ - wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); - qib_mcast_qp_free(delp); - - if (last) { - atomic_dec(&mcast->refcount); - wait_event(mcast->wait, !atomic_read(&mcast->refcount)); - qib_mcast_free(mcast); - spin_lock_irq(&dev->n_mcast_grps_lock); - dev->n_mcast_grps_allocated--; - spin_unlock_irq(&dev->n_mcast_grps_lock); - } - return 0; -} - -int qib_mcast_tree_empty(struct qib_ibport *ibp) -{ - return ibp->mcast_tree.rb_node == NULL; -} diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c index 5f44b66cc..5b0248adf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c @@ -64,7 +64,7 @@ const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state) case IB_QPS_ERR: return "ERR"; default: - return "UNKOWN STATE"; + return "UNKNOWN STATE"; } } diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 6cdb4d23f..a5bfbba6b 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -269,7 +269,6 @@ int usnic_ib_query_device(struct ib_device *ibdev, struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); union ib_gid gid; struct ethtool_drvinfo info; - struct ethtool_cmd cmd; int qp_per_vf; usnic_dbg("\n"); @@ -278,7 +277,6 @@ int usnic_ib_query_device(struct ib_device *ibdev, mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); - us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, &gid.raw[0]); @@ -326,12 +324,12 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); - struct ethtool_cmd cmd; + struct ethtool_link_ksettings cmd; usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); - us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); + __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); props->lid = 0; @@ -355,8 +353,8 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, props->pkey_tbl_len = 1; props->bad_pkey_cntr = 0; props->qkey_viol_cntr = 0; - eth_speed_to_ib_speed(cmd.speed, &props->active_speed, - &props->active_width); + eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed, + &props->active_width); props->max_mtu = IB_MTU_4096; props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); /* Userspace will adjust for hdrs */ diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 645a5f6e6..7209fbc03 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -144,7 +144,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = 0; while (npages) { - ret = get_user_pages(current, current->mm, cur_base, + ret = get_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), 1, !writable, page_list, NULL); |