diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
commit | d0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch) | |
tree | 7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /drivers/infiniband/hw/cxgb4/cm.c | |
parent | e914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff) |
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 193 |
1 files changed, 133 insertions, 60 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index be00c4bc0..80f988984 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -294,6 +294,25 @@ static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) return; } +static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) +{ + struct sk_buff *skb; + unsigned int i; + size_t len; + + len = roundup(sizeof(union cpl_wr_size), 16); + for (i = 0; i < size; i++) { + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + goto fail; + skb_queue_tail(ep_skb_list, skb); + } + return 0; +fail: + skb_queue_purge(ep_skb_list); + return -ENOMEM; +} + static void *alloc_ep(int size, gfp_t gfp) { struct c4iw_ep_common *epc; @@ -314,6 +333,8 @@ static void remove_ep_tid(struct c4iw_ep *ep) spin_lock_irqsave(&ep->com.dev->lock, flags); _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); + if (idr_is_empty(&ep->com.dev->hwtid_idr)) + wake_up(&ep->com.dev->wait); spin_unlock_irqrestore(&ep->com.dev->lock, flags); } @@ -384,6 +405,8 @@ void _c4iw_free_ep(struct kref *kref) if (ep->mpa_skb) kfree_skb(ep->mpa_skb); } + if (!skb_queue_empty(&ep->com.ep_skb_list)) + skb_queue_purge(&ep->com.ep_skb_list); kfree(ep); } @@ -620,25 +643,27 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) } } -static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) +static int send_flowc(struct c4iw_ep *ep) { - unsigned int flowclen = 80; struct fw_flowc_wr *flowc; + struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); int i; u16 vlan = ep->l2t->vlan; int nparams; + if (WARN_ON(!skb)) + return -ENOMEM; + if (vlan == CPL_L2T_VLAN_NONE) nparams = 8; else nparams = 9; - skb = get_skb(skb, flowclen, GFP_KERNEL); - flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); + flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams)); - flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, + flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN, 16)) | FW_WR_FLOWID_V(ep->hwtid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; @@ -679,18 +704,16 @@ static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) return c4iw_ofld_send(&ep->com.dev->rdev, skb); } -static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) +static int send_halfclose(struct c4iw_ep *ep) { struct cpl_close_con_req *req; - struct sk_buff *skb; + struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); int wrlen = roundup(sizeof *req, 16); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - skb = get_skb(NULL, wrlen, gfp); - if (!skb) { - printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__); + if (WARN_ON(!skb)) return -ENOMEM; - } + set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); req = (struct cpl_close_con_req *) skb_put(skb, wrlen); @@ -701,26 +724,24 @@ static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } -static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) +static int send_abort(struct c4iw_ep *ep) { struct cpl_abort_req *req; int wrlen = roundup(sizeof *req, 16); + struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - skb = get_skb(skb, wrlen, gfp); - if (!skb) { - printk(KERN_ERR MOD "%s - failed to alloc skb.\n", - __func__); + if (WARN_ON(!req_skb)) return -ENOMEM; - } - set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - t4_set_arp_err_handler(skb, ep, abort_arp_failure); - req = (struct cpl_abort_req *) skb_put(skb, wrlen); + + set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx); + t4_set_arp_err_handler(req_skb, ep, abort_arp_failure); + req = (struct cpl_abort_req *)skb_put(req_skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); req->cmd = CPL_ABORT_SEND_RST; - return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); } static void best_mtu(const unsigned short *mtus, unsigned short mtu, @@ -992,9 +1013,19 @@ static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, mpa = (struct mpa_message *)(req + 1); memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); - mpa->flags = (crc_enabled ? MPA_CRC : 0) | - (markers_enabled ? MPA_MARKERS : 0) | - (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); + + mpa->flags = 0; + if (crc_enabled) + mpa->flags |= MPA_CRC; + if (markers_enabled) { + mpa->flags |= MPA_MARKERS; + ep->mpa_attr.recv_marker_enabled = 1; + } else { + ep->mpa_attr.recv_marker_enabled = 0; + } + if (mpa_rev_to_use == 2) + mpa->flags |= MPA_ENHANCED_RDMA_CONN; + mpa->private_data_size = htons(ep->plen); mpa->revision = mpa_rev_to_use; if (mpa_rev_to_use == 1) { @@ -1169,8 +1200,11 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) mpa = (struct mpa_message *)(req + 1); memset(mpa, 0, sizeof(*mpa)); memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); - mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | - (markers_enabled ? MPA_MARKERS : 0); + mpa->flags = 0; + if (ep->mpa_attr.crc_enabled) + mpa->flags |= MPA_CRC; + if (ep->mpa_attr.recv_marker_enabled) + mpa->flags |= MPA_MARKERS; mpa->revision = ep->mpa_attr.version; mpa->private_data_size = htons(plen); @@ -1248,7 +1282,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) set_bit(ACT_ESTAB, &ep->com.history); /* start MPA negotiation */ - ret = send_flowc(ep, NULL); + ret = send_flowc(ep); if (ret) goto err; if (ep->retry_with_mpa_v1) @@ -1555,7 +1589,6 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) */ __state_set(&ep->com, FPDU_MODE); ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; - ep->mpa_attr.recv_marker_enabled = markers_enabled; ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; ep->mpa_attr.version = mpa->revision; ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; @@ -1796,8 +1829,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) (ep->mpa_pkt + sizeof(*mpa)); ep->ird = ntohs(mpa_v2_params->ird) & MPA_V2_IRD_ORD_MASK; + ep->ird = min_t(u32, ep->ird, + cur_max_read_depth(ep->com.dev)); ep->ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; + ep->ord = min_t(u32, ep->ord, + cur_max_read_depth(ep->com.dev)); PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, ep->ord); if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) @@ -2004,12 +2041,17 @@ static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) } /* - * Return whether a failed active open has allocated a TID + * Some of the error codes above implicitly indicate that there is no TID + * allocated with the result of an ACT_OPEN. We use this predicate to make + * that explicit. */ static inline int act_open_has_tid(int status) { - return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && - status != CPL_ERR_ARP_MISS; + return (status != CPL_ERR_TCAM_PARITY && + status != CPL_ERR_TCAM_MISS && + status != CPL_ERR_TCAM_FULL && + status != CPL_ERR_CONN_EXIST_SYNRECV && + status != CPL_ERR_CONN_EXIST); } /* Returns whether a CPL status conveys negative advice. @@ -2077,8 +2119,10 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, n, pdev, rt_tos2priority(tos)); - if (!ep->l2t) + if (!ep->l2t) { + dev_put(pdev); goto out; + } ep->mtu = pdev->mtu; ep->tx_chan = cxgb4_port_chan(pdev); ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, @@ -2130,6 +2174,7 @@ out: static int c4iw_reconnect(struct c4iw_ep *ep) { int err = 0; + int size = 0; struct sockaddr_in *laddr = (struct sockaddr_in *) &ep->com.cm_id->m_local_addr; struct sockaddr_in *raddr = (struct sockaddr_in *) @@ -2145,6 +2190,21 @@ static int c4iw_reconnect(struct c4iw_ep *ep) init_timer(&ep->timer); c4iw_init_wr_wait(&ep->com.wr_wait); + /* When MPA revision is different on nodes, the node with MPA_rev=2 + * tries to reconnect with MPA_rev 1 for the same EP through + * c4iw_reconnect(), where the same EP is assigned with new tid for + * further connection establishment. As we are using the same EP pointer + * for reconnect, few skbs are used during the previous c4iw_connect(), + * which leaves the EP with inadequate skbs for further + * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty + * skb_list() during peer_abort(). Allocate skbs which is already used. + */ + size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); + if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { + err = -ENOMEM; + goto fail1; + } + /* * Allocate an active TID to initiate a TCP connection. */ @@ -2210,6 +2270,7 @@ fail2: * response of 1st connect request. */ connect_reply_upcall(ep, -ECONNRESET); +fail1: c4iw_put_ep(&ep->com); out: return err; @@ -2576,6 +2637,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) child_ep->mtu = peer_mss + hdrs; + skb_queue_head_init(&child_ep->com.ep_skb_list); + if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) + goto fail; + state_set(&child_ep->com, CONNECTING); child_ep->com.dev = dev; child_ep->com.cm_id = NULL; @@ -2640,6 +2705,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) (const u32 *)&sin6->sin6_addr.s6_addr, 1); } goto out; +fail: + c4iw_put_ep(&child_ep->com); reject: reject_cr(dev, hwtid, skb); if (parent_ep) @@ -2670,7 +2737,7 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) ep->com.state = MPA_REQ_WAIT; start_ep_timer(ep); set_bit(PASS_ESTAB, &ep->com.history); - ret = send_flowc(ep, skb); + ret = send_flowc(ep); mutex_unlock(&ep->com.mutex); if (ret) c4iw_ep_disconnect(ep, 1, GFP_KERNEL); @@ -2871,10 +2938,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) } mutex_unlock(&ep->com.mutex); - rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL); - if (!rpl_skb) { - printk(KERN_ERR MOD "%s - cannot allocate skb!\n", - __func__); + rpl_skb = skb_dequeue(&ep->com.ep_skb_list); + if (WARN_ON(!rpl_skb)) { release = 1; goto out; } @@ -3025,9 +3090,9 @@ out: int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) { - int err = 0; - int disconnect = 0; + int abort; struct c4iw_ep *ep = to_ep(cm_id); + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); mutex_lock(&ep->com.mutex); @@ -3038,16 +3103,13 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) } set_bit(ULP_REJECT, &ep->com.history); if (mpa_rev == 0) - disconnect = 2; - else { - err = send_mpa_reject(ep, pdata, pdata_len); - disconnect = 1; - } + abort = 1; + else + abort = send_mpa_reject(ep, pdata, pdata_len); mutex_unlock(&ep->com.mutex); - if (disconnect) { - stop_ep_timer(ep); - err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); - } + + stop_ep_timer(ep); + c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); c4iw_put_ep(&ep->com); return 0; } @@ -3082,7 +3144,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (conn_param->ord > ep->ird) { if (RELAXED_IRD_NEGOTIATION) { - ep->ord = ep->ird; + conn_param->ord = ep->ird; } else { ep->ird = conn_param->ird; ep->ord = conn_param->ord; @@ -3248,6 +3310,13 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) err = -ENOMEM; goto out; } + + skb_queue_head_init(&ep->com.ep_skb_list); + if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { + err = -ENOMEM; + goto fail1; + } + init_timer(&ep->timer); ep->plen = conn_param->private_data_len; if (ep->plen) @@ -3266,7 +3335,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (!ep->com.qp) { PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); err = -EINVAL; - goto fail1; + goto fail2; } ref_qp(ep); PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, @@ -3279,7 +3348,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ep->atid == -1) { printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); err = -ENOMEM; - goto fail1; + goto fail2; } insert_handle(dev, &dev->atid_idr, ep, ep->atid); @@ -3303,7 +3372,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) - goto fail1; + goto fail2; } /* find a route */ @@ -3323,7 +3392,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { err = pick_local_ip6addrs(dev, cm_id); if (err) - goto fail1; + goto fail2; } /* find a route */ @@ -3339,14 +3408,14 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (!ep->dst) { printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); err = -EHOSTUNREACH; - goto fail2; + goto fail3; } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); - goto fail3; + goto fail4; } PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", @@ -3362,13 +3431,15 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) goto out; cxgb4_l2t_release(ep->l2t); -fail3: +fail4: dst_release(ep->dst); -fail2: +fail3: remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); -fail1: +fail2: + skb_queue_purge(&ep->com.ep_skb_list); deref_cm_id(&ep->com); +fail1: c4iw_put_ep(&ep->com); out: return err; @@ -3461,6 +3532,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) err = -ENOMEM; goto fail1; } + skb_queue_head_init(&ep->com.ep_skb_list); PDBG("%s ep %p\n", __func__, ep); ep->com.cm_id = cm_id; ref_cm_id(&ep->com); @@ -3577,6 +3649,7 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) case MPA_REQ_RCVD: case MPA_REP_SENT: case FPDU_MODE: + case CONNECTING: close = 1; if (abrupt) ep->com.state = ABORTING; @@ -3621,10 +3694,10 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) if (abrupt) { set_bit(EP_DISC_ABORT, &ep->com.history); close_complete_upcall(ep, -ECONNRESET); - ret = send_abort(ep, NULL, gfp); + ret = send_abort(ep); } else { set_bit(EP_DISC_CLOSE, &ep->com.history); - ret = send_halfclose(ep, gfp); + ret = send_halfclose(ep); } if (ret) { set_bit(EP_DISC_FAIL, &ep->com.history); |