From 863981e96738983919de841ec669e157e6bdaeb0 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Sun, 11 Sep 2016 04:34:46 -0300 Subject: Linux-libre 4.7.1-gnu --- drivers/infiniband/hw/cxgb4/cm.c | 611 ++++++++++++++++++++++----------- drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 14 +- drivers/infiniband/hw/cxgb4/mem.c | 12 +- drivers/infiniband/hw/cxgb4/provider.c | 58 +++- 4 files changed, 476 insertions(+), 219 deletions(-) (limited to 'drivers/infiniband/hw/cxgb4') diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 651711370..a3a67216b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -119,7 +119,7 @@ MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " static int mpa_rev = 2; module_param(mpa_rev, int, 0644); MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " - "1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft" + "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft" " compliant (default=2)"); static int markers_enabled; @@ -145,19 +145,35 @@ static struct sk_buff_head rxq; static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); static void ep_timeout(unsigned long arg); static void connect_reply_upcall(struct c4iw_ep *ep, int status); +static int sched(struct c4iw_dev *dev, struct sk_buff *skb); static LIST_HEAD(timeout_list); static spinlock_t timeout_lock; +static void deref_cm_id(struct c4iw_ep_common *epc) +{ + epc->cm_id->rem_ref(epc->cm_id); + epc->cm_id = NULL; + set_bit(CM_ID_DEREFED, &epc->history); +} + +static void ref_cm_id(struct c4iw_ep_common *epc) +{ + set_bit(CM_ID_REFED, &epc->history); + epc->cm_id->add_ref(epc->cm_id); +} + static void deref_qp(struct c4iw_ep *ep) { c4iw_qp_rem_ref(&ep->com.qp->ibqp); clear_bit(QP_REFERENCED, &ep->com.flags); + set_bit(QP_DEREFED, &ep->com.history); } static void ref_qp(struct c4iw_ep *ep) { set_bit(QP_REFERENCED, &ep->com.flags); + set_bit(QP_REFED, &ep->com.history); c4iw_qp_add_ref(&ep->com.qp->ibqp); } @@ -201,6 +217,8 @@ static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); if (error < 0) kfree_skb(skb); + else if (error == NET_XMIT_DROP) + return -ENOMEM; return error < 0 ? error : 0; } @@ -290,12 +308,63 @@ static void *alloc_ep(int size, gfp_t gfp) return epc; } +static void remove_ep_tid(struct c4iw_ep *ep) +{ + unsigned long flags; + + spin_lock_irqsave(&ep->com.dev->lock, flags); + _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); + spin_unlock_irqrestore(&ep->com.dev->lock, flags); +} + +static void insert_ep_tid(struct c4iw_ep *ep) +{ + unsigned long flags; + + spin_lock_irqsave(&ep->com.dev->lock, flags); + _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0); + spin_unlock_irqrestore(&ep->com.dev->lock, flags); +} + +/* + * Atomically lookup the ep ptr given the tid and grab a reference on the ep. + */ +static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid) +{ + struct c4iw_ep *ep; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + ep = idr_find(&dev->hwtid_idr, tid); + if (ep) + c4iw_get_ep(&ep->com); + spin_unlock_irqrestore(&dev->lock, flags); + return ep; +} + +/* + * Atomically lookup the ep ptr given the stid and grab a reference on the ep. + */ +static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev, + unsigned int stid) +{ + struct c4iw_listen_ep *ep; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + ep = idr_find(&dev->stid_idr, stid); + if (ep) + c4iw_get_ep(&ep->com); + spin_unlock_irqrestore(&dev->lock, flags); + return ep; +} + void _c4iw_free_ep(struct kref *kref) { struct c4iw_ep *ep; ep = container_of(kref, struct c4iw_ep, com.kref); - PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); + PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]); if (test_bit(QP_REFERENCED, &ep->com.flags)) deref_qp(ep); if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { @@ -309,10 +378,11 @@ void _c4iw_free_ep(struct kref *kref) (const u32 *)&sin6->sin6_addr.s6_addr, 1); } - remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); dst_release(ep->dst); cxgb4_l2t_release(ep->l2t); + if (ep->mpa_skb) + kfree_skb(ep->mpa_skb); } kfree(ep); } @@ -320,6 +390,15 @@ void _c4iw_free_ep(struct kref *kref) static void release_ep_resources(struct c4iw_ep *ep) { set_bit(RELEASE_RESOURCES, &ep->com.flags); + + /* + * If we have a hwtid, then remove it from the idr table + * so lookups will no longer find this endpoint. Otherwise + * we have a race where one thread finds the ep ptr just + * before the other thread is freeing the ep memory. + */ + if (ep->hwtid != -1) + remove_ep_tid(ep); c4iw_put_ep(&ep->com); } @@ -432,10 +511,74 @@ static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, static void arp_failure_discard(void *handle, struct sk_buff *skb) { - PDBG("%s c4iw_dev %p\n", __func__, handle); + pr_err(MOD "ARP failure\n"); kfree_skb(skb); } +static void mpa_start_arp_failure(void *handle, struct sk_buff *skb) +{ + pr_err("ARP failure during MPA Negotiation - Closing Connection\n"); +} + +enum { + NUM_FAKE_CPLS = 2, + FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0, + FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1, +}; + +static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + + ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); + release_ep_resources(ep); + return 0; +} + +static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) +{ + struct c4iw_ep *ep; + + ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); + c4iw_put_ep(&ep->parent_ep->com); + release_ep_resources(ep); + return 0; +} + +/* + * Fake up a special CPL opcode and call sched() so process_work() will call + * _put_ep_safe() in a safe context to free the ep resources. This is needed + * because ARP error handlers are called in an ATOMIC context, and + * _c4iw_free_ep() needs to block. + */ +static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, + int cpl) +{ + struct cpl_act_establish *rpl = cplhdr(skb); + + /* Set our special ARP_FAILURE opcode */ + rpl->ot.opcode = cpl; + + /* + * Save ep in the skb->cb area, after where sched() will save the dev + * ptr. + */ + *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; + sched(ep->com.dev, skb); +} + +/* Handle an ARP failure for an accept */ +static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) +{ + struct c4iw_ep *ep = handle; + + pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n", + ep->hwtid); + + __state_set(&ep->com, DEAD); + queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); +} + /* * Handle an ARP failure for an active open. */ @@ -444,9 +587,8 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) struct c4iw_ep *ep = handle; printk(KERN_ERR MOD "ARP failure during connect\n"); - kfree_skb(skb); connect_reply_upcall(ep, -EHOSTUNREACH); - state_set(&ep->com, DEAD); + __state_set(&ep->com, DEAD); if (ep->com.remote_addr.ss_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; @@ -455,9 +597,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) } remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); - dst_release(ep->dst); - cxgb4_l2t_release(ep->l2t); - c4iw_put_ep(&ep->com); + queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); } /* @@ -466,15 +606,21 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) */ static void abort_arp_failure(void *handle, struct sk_buff *skb) { - struct c4iw_rdev *rdev = handle; + int ret; + struct c4iw_ep *ep = handle; + struct c4iw_rdev *rdev = &ep->com.dev->rdev; struct cpl_abort_req *req = cplhdr(skb); PDBG("%s rdev %p\n", __func__, rdev); req->cmd = CPL_ABORT_NO_RST; - c4iw_ofld_send(rdev, skb); + ret = c4iw_ofld_send(rdev, skb); + if (ret) { + __state_set(&ep->com, DEAD); + queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); + } } -static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) +static int send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) { unsigned int flowclen = 80; struct fw_flowc_wr *flowc; @@ -530,7 +676,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - c4iw_ofld_send(&ep->com.dev->rdev, skb); + return c4iw_ofld_send(&ep->com.dev->rdev, skb); } static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp) @@ -568,7 +714,7 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure); + t4_set_arp_err_handler(skb, ep, abort_arp_failure); req = (struct cpl_abort_req *) skb_put(skb, wrlen); memset(req, 0, wrlen); INIT_TP_WR(req, ep->hwtid); @@ -807,10 +953,10 @@ clip_release: return ret; } -static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, - u8 mpa_rev_to_use) +static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, + u8 mpa_rev_to_use) { - int mpalen, wrlen; + int mpalen, wrlen, ret; struct fw_ofld_tx_data_wr *req; struct mpa_message *mpa; struct mpa_v2_conn_params mpa_v2_params; @@ -826,7 +972,7 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, skb = get_skb(skb, wrlen, GFP_KERNEL); if (!skb) { connect_reply_upcall(ep, -ENOMEM); - return; + return -ENOMEM; } set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); @@ -894,12 +1040,14 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, t4_set_arp_err_handler(skb, NULL, arp_failure_discard); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; - c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + if (ret) + return ret; start_ep_timer(ep); __state_set(&ep->com, MPA_REQ_SENT); ep->mpa_attr.initiator = 1; ep->snd_seq += mpalen; - return; + return ret; } static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) @@ -975,7 +1123,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) */ skb_get(skb); set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); - t4_set_arp_err_handler(skb, NULL, arp_failure_discard); + t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; ep->snd_seq += mpalen; @@ -1060,7 +1208,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) * Function fw4_ack() will deref it. */ skb_get(skb); - t4_set_arp_err_handler(skb, NULL, arp_failure_discard); + t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); ep->mpa_skb = skb; __state_set(&ep->com, MPA_REP_SENT); ep->snd_seq += mpalen; @@ -1074,6 +1222,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int tid = GET_TID(req); unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); struct tid_info *t = dev->rdev.lldi.tids; + int ret; ep = lookup_atid(t, atid); @@ -1086,7 +1235,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) /* setup the hwtid for this connection */ ep->hwtid = tid; cxgb4_insert_tid(t, ep, tid); - insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid); + insert_ep_tid(ep); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); @@ -1099,13 +1248,22 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) set_bit(ACT_ESTAB, &ep->com.history); /* start MPA negotiation */ - send_flowc(ep, NULL); + ret = send_flowc(ep, NULL); + if (ret) + goto err; if (ep->retry_with_mpa_v1) - send_mpa_req(ep, skb, 1); + ret = send_mpa_req(ep, skb, 1); else - send_mpa_req(ep, skb, mpa_rev); + ret = send_mpa_req(ep, skb, mpa_rev); + if (ret) + goto err; mutex_unlock(&ep->com.mutex); return 0; +err: + mutex_unlock(&ep->com.mutex); + connect_reply_upcall(ep, -ENOMEM); + c4iw_ep_disconnect(ep, 0, GFP_KERNEL); + return 0; } static void close_complete_upcall(struct c4iw_ep *ep, int status) @@ -1120,20 +1278,11 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) PDBG("close complete delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); - ep->com.cm_id->rem_ref(ep->com.cm_id); - ep->com.cm_id = NULL; + deref_cm_id(&ep->com); set_bit(CLOSE_UPCALL, &ep->com.history); } } -static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) -{ - PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - __state_set(&ep->com, ABORTING); - set_bit(ABORT_CONN, &ep->com.history); - return send_abort(ep, skb, gfp); -} - static void peer_close_upcall(struct c4iw_ep *ep) { struct iw_cm_event event; @@ -1161,8 +1310,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep) PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, ep->com.cm_id, ep->hwtid); ep->com.cm_id->event_handler(ep->com.cm_id, &event); - ep->com.cm_id->rem_ref(ep->com.cm_id); - ep->com.cm_id = NULL; + deref_cm_id(&ep->com); set_bit(ABORT_UPCALL, &ep->com.history); } } @@ -1205,10 +1353,8 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status) set_bit(CONN_RPL_UPCALL, &ep->com.history); ep->com.cm_id->event_handler(ep->com.cm_id, &event); - if (status < 0) { - ep->com.cm_id->rem_ref(ep->com.cm_id); - ep->com.cm_id = NULL; - } + if (status < 0) + deref_cm_id(&ep->com); } static int connect_request_upcall(struct c4iw_ep *ep) @@ -1301,6 +1447,18 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) #define RELAXED_IRD_NEGOTIATION 1 +/* + * process_mpa_reply - process streaming mode MPA reply + * + * Returns: + * + * 0 upon success indicating a connect request was delivered to the ULP + * or the mpa request is incomplete but valid so far. + * + * 1 if a failure requires the caller to close the connection. + * + * 2 if a failure requires the caller to abort the connection. + */ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; @@ -1315,21 +1473,13 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - /* - * Stop mpa timer. If it expired, then - * we ignore the MPA reply. process_timeout() - * will abort the connection. - */ - if (stop_ep_timer(ep)) - return 0; - /* * If we get more than the supported amount of private data * then we must fail this connection. */ if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { err = -EINVAL; - goto err; + goto err_stop_timer; } /* @@ -1351,11 +1501,11 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); err = -EPROTO; - goto err; + goto err_stop_timer; } if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { err = -EPROTO; - goto err; + goto err_stop_timer; } plen = ntohs(mpa->private_data_size); @@ -1365,7 +1515,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) */ if (plen > MPA_MAX_PRIVATE_DATA) { err = -EPROTO; - goto err; + goto err_stop_timer; } /* @@ -1373,7 +1523,7 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) */ if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { err = -EPROTO; - goto err; + goto err_stop_timer; } ep->plen = (u8) plen; @@ -1387,9 +1537,17 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) if (mpa->flags & MPA_REJECT) { err = -ECONNREFUSED; - goto err; + goto err_stop_timer; } + /* + * Stop mpa timer. If it expired, then + * we ignore the MPA reply. process_timeout() + * will abort the connection. + */ + if (stop_ep_timer(ep)) + return 0; + /* * If we get here we have accumulated the entire mpa * start reply message including private data. And @@ -1529,15 +1687,28 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) goto out; } goto out; +err_stop_timer: + stop_ep_timer(ep); err: - __state_set(&ep->com, ABORTING); - send_abort(ep, skb, GFP_KERNEL); + disconnect = 2; out: connect_reply_upcall(ep, err); return disconnect; } -static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) +/* + * process_mpa_request - process streaming mode MPA request + * + * Returns: + * + * 0 upon success indicating a connect request was delivered to the ULP + * or the mpa request is incomplete but valid so far. + * + * 1 if a failure requires the caller to close the connection. + * + * 2 if a failure requires the caller to abort the connection. + */ +static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; @@ -1549,11 +1720,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) * If we get more than the supported amount of private data * then we must fail this connection. */ - if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { - (void)stop_ep_timer(ep); - abort_connection(ep, skb, GFP_KERNEL); - return; - } + if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) + goto err_stop_timer; PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); @@ -1569,7 +1737,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < sizeof(*mpa)) - return; + return 0; PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); mpa = (struct mpa_message *) ep->mpa_pkt; @@ -1580,43 +1748,32 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) if (mpa->revision > mpa_rev) { printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," " Received = %d\n", __func__, mpa_rev, mpa->revision); - (void)stop_ep_timer(ep); - abort_connection(ep, skb, GFP_KERNEL); - return; + goto err_stop_timer; } - if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { - (void)stop_ep_timer(ep); - abort_connection(ep, skb, GFP_KERNEL); - return; - } + if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) + goto err_stop_timer; plen = ntohs(mpa->private_data_size); /* * Fail if there's too much private data. */ - if (plen > MPA_MAX_PRIVATE_DATA) { - (void)stop_ep_timer(ep); - abort_connection(ep, skb, GFP_KERNEL); - return; - } + if (plen > MPA_MAX_PRIVATE_DATA) + goto err_stop_timer; /* * If plen does not account for pkt size */ - if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { - (void)stop_ep_timer(ep); - abort_connection(ep, skb, GFP_KERNEL); - return; - } + if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) + goto err_stop_timer; ep->plen = (u8) plen; /* * If we don't have all the pdata yet, then bail. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) - return; + return 0; /* * If we get here we have accumulated the entire mpa @@ -1665,26 +1822,26 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, ep->mpa_attr.p2p_type); - /* - * If the endpoint timer already expired, then we ignore - * the start request. process_timeout() will abort - * the connection. - */ - if (!stop_ep_timer(ep)) { - __state_set(&ep->com, MPA_REQ_RCVD); - - /* drive upcall */ - mutex_lock_nested(&ep->parent_ep->com.mutex, - SINGLE_DEPTH_NESTING); - if (ep->parent_ep->com.state != DEAD) { - if (connect_request_upcall(ep)) - abort_connection(ep, skb, GFP_KERNEL); - } else { - abort_connection(ep, skb, GFP_KERNEL); - } - mutex_unlock(&ep->parent_ep->com.mutex); + __state_set(&ep->com, MPA_REQ_RCVD); + + /* drive upcall */ + mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); + if (ep->parent_ep->com.state != DEAD) { + if (connect_request_upcall(ep)) + goto err_unlock_parent; + } else { + goto err_unlock_parent; } - return; + mutex_unlock(&ep->parent_ep->com.mutex); + return 0; + +err_unlock_parent: + mutex_unlock(&ep->parent_ep->com.mutex); + goto err_out; +err_stop_timer: + (void)stop_ep_timer(ep); +err_out: + return 2; } static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) @@ -1693,11 +1850,10 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) struct cpl_rx_data *hdr = cplhdr(skb); unsigned int dlen = ntohs(hdr->len); unsigned int tid = GET_TID(hdr); - struct tid_info *t = dev->rdev.lldi.tids; __u8 status = hdr->status; int disconnect = 0; - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); if (!ep) return 0; PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); @@ -1715,7 +1871,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) break; case MPA_REQ_WAIT: ep->rcv_seq += dlen; - process_mpa_request(ep, skb); + disconnect = process_mpa_request(ep, skb); break; case FPDU_MODE: { struct c4iw_qp_attributes attrs; @@ -1736,7 +1892,8 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) } mutex_unlock(&ep->com.mutex); if (disconnect) - c4iw_ep_disconnect(ep, 0, GFP_KERNEL); + c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); + c4iw_put_ep(&ep->com); return 0; } @@ -1746,9 +1903,8 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct cpl_abort_rpl_rss *rpl = cplhdr(skb); int release = 0; unsigned int tid = GET_TID(rpl); - struct tid_info *t = dev->rdev.lldi.tids; - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); if (!ep) { printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); return 0; @@ -1770,10 +1926,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) if (release) release_ep_resources(ep); + c4iw_put_ep(&ep->com); return 0; } -static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) +static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) { struct sk_buff *skb; struct fw_ofld_connection_wr *req; @@ -1843,7 +2000,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); set_bit(ACT_OFLD_CONN, &ep->com.history); - c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } /* @@ -1986,6 +2143,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); init_timer(&ep->timer); + c4iw_init_wr_wait(&ep->com.wr_wait); /* * Allocate an active TID to initiate a TCP connection. @@ -2069,6 +2227,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct sockaddr_in *ra; struct sockaddr_in6 *la6; struct sockaddr_in6 *ra6; + int ret = 0; ep = lookup_atid(t, atid); la = (struct sockaddr_in *)&ep->com.local_addr; @@ -2104,9 +2263,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) mutex_unlock(&dev->rdev.stats.lock); if (ep->com.local_addr.ss_family == AF_INET && dev->rdev.lldi.enable_fw_ofld_conn) { - send_fw_act_open_req(ep, - TID_TID_G(AOPEN_ATID_G( - ntohl(rpl->atid_status)))); + ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( + ntohl(rpl->atid_status)))); + if (ret) + goto fail; return 0; } break; @@ -2146,6 +2306,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) break; } +fail: connect_reply_upcall(ep, status2errno(status)); state_set(&ep->com, DEAD); @@ -2170,9 +2331,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_pass_open_rpl *rpl = cplhdr(skb); - struct tid_info *t = dev->rdev.lldi.tids; unsigned int stid = GET_TID(rpl); - struct c4iw_listen_ep *ep = lookup_stid(t, stid); + struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); if (!ep) { PDBG("%s stid %d lookup failure!\n", __func__, stid); @@ -2181,7 +2341,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) PDBG("%s ep %p status %d error %d\n", __func__, ep, rpl->status, status2errno(rpl->status)); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); - + c4iw_put_ep(&ep->com); out: return 0; } @@ -2189,17 +2349,17 @@ out: static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); - struct tid_info *t = dev->rdev.lldi.tids; unsigned int stid = GET_TID(rpl); - struct c4iw_listen_ep *ep = lookup_stid(t, stid); + struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); PDBG("%s ep %p\n", __func__, ep); c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); + c4iw_put_ep(&ep->com); return 0; } -static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, - struct cpl_pass_accept_req *req) +static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, + struct cpl_pass_accept_req *req) { struct cpl_pass_accept_rpl *rpl; unsigned int mtu_idx; @@ -2287,10 +2447,9 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, rpl->opt0 = cpu_to_be64(opt0); rpl->opt2 = cpu_to_be32(opt2); set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); - t4_set_arp_err_handler(skb, NULL, arp_failure_discard); - c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); + t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); - return; + return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); } static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) @@ -2355,7 +2514,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) unsigned short hdrs; u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); - parent_ep = lookup_stid(t, stid); + parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); if (!parent_ep) { PDBG("%s connect request on invalid stid %d\n", __func__, stid); goto reject; @@ -2468,9 +2627,13 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) init_timer(&child_ep->timer); cxgb4_insert_tid(t, child_ep, hwtid); - insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid); - accept_cr(child_ep, skb, req); - set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); + insert_ep_tid(child_ep); + if (accept_cr(child_ep, skb, req)) { + c4iw_put_ep(&parent_ep->com); + release_ep_resources(child_ep); + } else { + set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); + } if (iptype == 6) { sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], @@ -2479,6 +2642,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) goto out; reject: reject_cr(dev, hwtid, skb); + if (parent_ep) + c4iw_put_ep(&parent_ep->com); out: return 0; } @@ -2487,10 +2652,10 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) { struct c4iw_ep *ep; struct cpl_pass_establish *req = cplhdr(skb); - struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); + int ret; - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); ep->snd_seq = be32_to_cpu(req->snd_isn); ep->rcv_seq = be32_to_cpu(req->rcv_isn); @@ -2501,10 +2666,15 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) set_emss(ep, ntohs(req->tcp_opt)); dst_confirm(ep->dst); - state_set(&ep->com, MPA_REQ_WAIT); + mutex_lock(&ep->com.mutex); + ep->com.state = MPA_REQ_WAIT; start_ep_timer(ep); - send_flowc(ep, skb); set_bit(PASS_ESTAB, &ep->com.history); + ret = send_flowc(ep, skb); + mutex_unlock(&ep->com.mutex); + if (ret) + c4iw_ep_disconnect(ep, 1, GFP_KERNEL); + c4iw_put_ep(&ep->com); return 0; } @@ -2516,11 +2686,13 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; - struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); int ret; - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); + if (!ep) + return 0; + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); dst_confirm(ep->dst); @@ -2592,6 +2764,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) release_ep_resources(ep); + c4iw_put_ep(&ep->com); return 0; } @@ -2604,10 +2777,12 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_qp_attributes attrs; int ret; int release = 0; - struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); + if (!ep) + return 0; + if (is_neg_adv(req->status)) { PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", __func__, ep->hwtid, req->status, @@ -2616,7 +2791,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) mutex_lock(&dev->rdev.stats.lock); dev->rdev.stats.neg_adv++; mutex_unlock(&dev->rdev.stats.lock); - return 0; + goto deref_ep; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); @@ -2633,6 +2808,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) mutex_lock(&ep->com.mutex); switch (ep->com.state) { case CONNECTING: + c4iw_put_ep(&ep->parent_ep->com); break; case MPA_REQ_WAIT: (void)stop_ep_timer(ep); @@ -2681,7 +2857,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) case DEAD: PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); mutex_unlock(&ep->com.mutex); - return 0; + goto deref_ep; default: BUG_ON(1); break; @@ -2728,6 +2904,10 @@ out: c4iw_reconnect(ep); } +deref_ep: + c4iw_put_ep(&ep->com); + /* Dereferencing ep, referenced in peer_abort_intr() */ + c4iw_put_ep(&ep->com); return 0; } @@ -2737,16 +2917,18 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_qp_attributes attrs; struct cpl_close_con_rpl *rpl = cplhdr(skb); int release = 0; - struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); + if (!ep) + return 0; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); BUG_ON(!ep); /* The cm_id may be null if we failed to connect */ mutex_lock(&ep->com.mutex); + set_bit(CLOSE_CON_RPL, &ep->com.history); switch (ep->com.state) { case CLOSING: __state_set(&ep->com, MORIBUND); @@ -2774,18 +2956,18 @@ static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) mutex_unlock(&ep->com.mutex); if (release) release_ep_resources(ep); + c4iw_put_ep(&ep->com); return 0; } static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_rdma_terminate *rpl = cplhdr(skb); - struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(rpl); struct c4iw_ep *ep; struct c4iw_qp_attributes attrs; - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); BUG_ON(!ep); if (ep && ep->com.qp) { @@ -2796,6 +2978,7 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); + c4iw_put_ep(&ep->com); return 0; } @@ -2811,15 +2994,16 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) struct cpl_fw4_ack *hdr = cplhdr(skb); u8 credits = hdr->credits; unsigned int tid = GET_TID(hdr); - struct tid_info *t = dev->rdev.lldi.tids; - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); + if (!ep) + return 0; PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); if (credits == 0) { PDBG("%s 0 credit ack ep %p tid %u state %u\n", __func__, ep, ep->hwtid, state_read(&ep->com)); - return 0; + goto out; } dst_confirm(ep->dst); @@ -2829,7 +3013,13 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); kfree_skb(ep->mpa_skb); ep->mpa_skb = NULL; + mutex_lock(&ep->com.mutex); + if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) + stop_ep_timer(ep); + mutex_unlock(&ep->com.mutex); } +out: + c4iw_put_ep(&ep->com); return 0; } @@ -2841,22 +3031,23 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); mutex_lock(&ep->com.mutex); - if (ep->com.state == DEAD) { + if (ep->com.state != MPA_REQ_RCVD) { mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); return -ECONNRESET; } set_bit(ULP_REJECT, &ep->com.history); - BUG_ON(ep->com.state != MPA_REQ_RCVD); if (mpa_rev == 0) - abort_connection(ep, NULL, GFP_KERNEL); + disconnect = 2; else { err = send_mpa_reject(ep, pdata, pdata_len); disconnect = 1; } mutex_unlock(&ep->com.mutex); - if (disconnect) - err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); + if (disconnect) { + stop_ep_timer(ep); + err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); + } c4iw_put_ep(&ep->com); return 0; } @@ -2869,24 +3060,23 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct c4iw_ep *ep = to_ep(cm_id); struct c4iw_dev *h = to_c4iw_dev(cm_id->device); struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); + int abort = 0; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); mutex_lock(&ep->com.mutex); - if (ep->com.state == DEAD) { + if (ep->com.state != MPA_REQ_RCVD) { err = -ECONNRESET; - goto err; + goto err_out; } - BUG_ON(ep->com.state != MPA_REQ_RCVD); BUG_ON(!qp); set_bit(ULP_ACCEPT, &ep->com.history); if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || (conn_param->ird > cur_max_read_depth(ep->com.dev))) { - abort_connection(ep, NULL, GFP_KERNEL); err = -EINVAL; - goto err; + goto err_abort; } if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { @@ -2898,9 +3088,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ep->ord = conn_param->ord; send_mpa_reject(ep, conn_param->private_data, conn_param->private_data_len); - abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; - goto err; + goto err_abort; } } if (conn_param->ird < ep->ord) { @@ -2908,9 +3097,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ep->ord <= h->rdev.lldi.max_ordird_qp) { conn_param->ird = ep->ord; } else { - abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; - goto err; + goto err_abort; } } } @@ -2929,8 +3117,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); - cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; + ref_cm_id(&ep->com); ep->com.qp = qp; ref_qp(ep); @@ -2951,23 +3139,27 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); if (err) - goto err1; + goto err_deref_cm_id; + + set_bit(STOP_MPA_TIMER, &ep->com.flags); err = send_mpa_reply(ep, conn_param->private_data, conn_param->private_data_len); if (err) - goto err1; + goto err_deref_cm_id; __state_set(&ep->com, FPDU_MODE); established_upcall(ep); mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); return 0; -err1: - ep->com.cm_id = NULL; - abort_connection(ep, NULL, GFP_KERNEL); - cm_id->rem_ref(cm_id); -err: +err_deref_cm_id: + deref_cm_id(&ep->com); +err_abort: + abort = 1; +err_out: mutex_unlock(&ep->com.mutex); + if (abort) + c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); return err; } @@ -3067,9 +3259,9 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (peer2peer && ep->ord == 0) ep->ord = 1; - cm_id->add_ref(cm_id); - ep->com.dev = dev; ep->com.cm_id = cm_id; + ref_cm_id(&ep->com); + ep->com.dev = dev; ep->com.qp = get_qhp(dev, conn_param->qpn); if (!ep->com.qp) { PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); @@ -3108,7 +3300,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) /* * Handle loopback requests to INADDR_ANY. */ - if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) { + if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { err = pick_local_ipaddrs(dev, cm_id); if (err) goto fail1; @@ -3176,7 +3368,7 @@ fail2: remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); fail1: - cm_id->rem_ref(cm_id); + deref_cm_id(&ep->com); c4iw_put_ep(&ep->com); out: return err; @@ -3270,8 +3462,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) goto fail1; } PDBG("%s ep %p\n", __func__, ep); - cm_id->add_ref(cm_id); ep->com.cm_id = cm_id; + ref_cm_id(&ep->com); ep->com.dev = dev; ep->backlog = backlog; memcpy(&ep->com.local_addr, &cm_id->m_local_addr, @@ -3311,7 +3503,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); fail2: - cm_id->rem_ref(cm_id); + deref_cm_id(&ep->com); c4iw_put_ep(&ep->com); fail1: out: @@ -3350,7 +3542,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id) cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, ep->com.local_addr.ss_family); done: - cm_id->rem_ref(cm_id); + deref_cm_id(&ep->com); c4iw_put_ep(&ep->com); return err; } @@ -3367,6 +3559,12 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, states[ep->com.state], abrupt); + /* + * Ref the ep here in case we have fatal errors causing the + * ep to be released and freed. + */ + c4iw_get_ep(&ep->com); + rdev = &ep->com.dev->rdev; if (c4iw_fatal_error(rdev)) { fatal = 1; @@ -3418,10 +3616,30 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) set_bit(EP_DISC_CLOSE, &ep->com.history); ret = send_halfclose(ep, gfp); } - if (ret) + if (ret) { + set_bit(EP_DISC_FAIL, &ep->com.history); + if (!abrupt) { + stop_ep_timer(ep); + close_complete_upcall(ep, -EIO); + } + if (ep->com.qp) { + struct c4iw_qp_attributes attrs; + + attrs.next_state = C4IW_QP_STATE_ERROR; + ret = c4iw_modify_qp(ep->com.qp->rhp, + ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, + &attrs, 1); + if (ret) + pr_err(MOD + "%s - qp <- error failed!\n", + __func__); + } fatal = 1; + } } mutex_unlock(&ep->com.mutex); + c4iw_put_ep(&ep->com); if (fatal) release_ep_resources(ep); return ret; @@ -3676,7 +3894,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) struct cpl_pass_accept_req *req = (void *)(rss + 1); struct l2t_entry *e; struct dst_entry *dst; - struct c4iw_ep *lep; + struct c4iw_ep *lep = NULL; u16 window; struct port_info *pi; struct net_device *pdev; @@ -3701,7 +3919,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) */ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); - lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); + lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); if (!lep) { PDBG("%s connect request on invalid stid %d\n", __func__, stid); goto reject; @@ -3802,6 +4020,8 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) free_dst: dst_release(dst); reject: + if (lep) + c4iw_put_ep(&lep->com); return 0; } @@ -3809,7 +4029,7 @@ reject: * These are the real handlers that are called from a * work queue. */ -static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { +static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = { [CPL_ACT_ESTABLISH] = act_establish, [CPL_ACT_OPEN_RPL] = act_open_rpl, [CPL_RX_DATA] = rx_data, @@ -3825,7 +4045,9 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = { [CPL_RDMA_TERMINATE] = terminate, [CPL_FW4_ACK] = fw4_ack, [CPL_FW6_MSG] = deferred_fw6_msg, - [CPL_RX_PKT] = rx_pkt + [CPL_RX_PKT] = rx_pkt, + [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, + [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe }; static void process_timeout(struct c4iw_ep *ep) @@ -3839,11 +4061,12 @@ static void process_timeout(struct c4iw_ep *ep) set_bit(TIMEDOUT, &ep->com.history); switch (ep->com.state) { case MPA_REQ_SENT: - __state_set(&ep->com, ABORTING); connect_reply_upcall(ep, -ETIMEDOUT); break; case MPA_REQ_WAIT: - __state_set(&ep->com, ABORTING); + case MPA_REQ_RCVD: + case MPA_REP_SENT: + case FPDU_MODE: break; case CLOSING: case MORIBUND: @@ -3853,7 +4076,6 @@ static void process_timeout(struct c4iw_ep *ep) ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } - __state_set(&ep->com, ABORTING); close_complete_upcall(ep, -ETIMEDOUT); break; case ABORTING: @@ -3871,9 +4093,9 @@ static void process_timeout(struct c4iw_ep *ep) __func__, ep, ep->hwtid, ep->com.state); abort = 0; } - if (abort) - abort_connection(ep, NULL, GFP_KERNEL); mutex_unlock(&ep->com.mutex); + if (abort) + c4iw_ep_disconnect(ep, 1, GFP_KERNEL); c4iw_put_ep(&ep->com); } @@ -4006,10 +4228,10 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) { struct cpl_abort_req_rss *req = cplhdr(skb); struct c4iw_ep *ep; - struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(req); - ep = lookup_tid(t, tid); + ep = get_ep_from_tid(dev, tid); + /* This EP will be dereferenced in peer_abort() */ if (!ep) { printk(KERN_WARNING MOD "Abort on non-existent endpoint, tid %d\n", tid); @@ -4020,24 +4242,13 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", __func__, ep->hwtid, req->status, neg_adv_str(req->status)); - ep->stats.abort_neg_adv++; - dev->rdev.stats.neg_adv++; - kfree_skb(skb); - return 0; + goto out; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, ep->com.state); - /* - * Wake up any threads in rdma_init() or rdma_fini(). - * However, if we are on MPAv2 and want to retry with MPAv1 - * then, don't wake up yet. - */ - if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { - if (ep->com.state != MPA_REQ_SENT) - c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); - } else - c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); +out: sched(dev, skb); return 0; } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index df43f871a..f6f34a75a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -755,6 +755,7 @@ enum c4iw_ep_flags { CLOSE_SENT = 3, TIMEOUT = 4, QP_REFERENCED = 5, + STOP_MPA_TIMER = 7, }; enum c4iw_ep_history { @@ -779,7 +780,13 @@ enum c4iw_ep_history { EP_DISC_ABORT = 18, CONN_RPL_UPCALL = 19, ACT_RETRY_NOMEM = 20, - ACT_RETRY_INUSE = 21 + ACT_RETRY_INUSE = 21, + CLOSE_CON_RPL = 22, + EP_DISC_FAIL = 24, + QP_REFED = 25, + QP_DEREFED = 26, + CM_ID_REFED = 27, + CM_ID_DEREFED = 28, }; struct c4iw_ep_common { @@ -917,9 +924,8 @@ void c4iw_qp_rem_ref(struct ib_qp *qp); struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); -int c4iw_map_mr_sg(struct ib_mr *ibmr, - struct scatterlist *sg, - int sg_nents); +int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset); int c4iw_dealloc_mw(struct ib_mw *mw); struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 008be07d5..55d0651ee 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -86,8 +86,9 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, (wait ? FW_WR_COMPL_F : 0)); req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16))); - req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE)); - req->cmd |= cpu_to_be32(T5_ULP_MEMIO_ORDER_V(1)); + req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) | + T5_ULP_MEMIO_ORDER_V(1) | + T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0])); req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr)); @@ -690,15 +691,14 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) return 0; } -int c4iw_map_mr_sg(struct ib_mr *ibmr, - struct scatterlist *sg, - int sg_nents) +int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, + unsigned int *sg_offset) { struct c4iw_mr *mhp = to_c4iw_mr(ibmr); mhp->mpl_len = 0; - return ib_sg_to_pages(ibmr, sg, sg_nents, c4iw_set_page); + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page); } int c4iw_dereg_mr(struct ib_mr *ib_mr) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 7574f394f..dd8a86b72 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -446,20 +446,59 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, c4iw_dev->rdev.lldi.pdev->device); } +enum counters { + IP4INSEGS, + IP4OUTSEGS, + IP4RETRANSSEGS, + IP4OUTRSTS, + IP6INSEGS, + IP6OUTSEGS, + IP6RETRANSSEGS, + IP6OUTRSTS, + NR_COUNTERS +}; + +static const char * const names[] = { + [IP4INSEGS] = "ip4InSegs", + [IP4OUTSEGS] = "ip4OutSegs", + [IP4RETRANSSEGS] = "ip4RetransSegs", + [IP4OUTRSTS] = "ip4OutRsts", + [IP6INSEGS] = "ip6InSegs", + [IP6OUTSEGS] = "ip6OutSegs", + [IP6RETRANSSEGS] = "ip6RetransSegs", + [IP6OUTRSTS] = "ip6OutRsts" +}; + +static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev, + u8 port_num) +{ + BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); + + if (port_num != 0) + return NULL; + + return rdma_alloc_hw_stats_struct(names, NR_COUNTERS, + RDMA_HW_STATS_DEFAULT_LIFESPAN); +} + static int c4iw_get_mib(struct ib_device *ibdev, - union rdma_protocol_stats *stats) + struct rdma_hw_stats *stats, + u8 port, int index) { struct tp_tcp_stats v4, v6; struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev); cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6); - memset(stats, 0, sizeof *stats); - stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs; - stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs; - stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs; - stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts; - - return 0; + stats->value[IP4INSEGS] = v4.tcp_in_segs; + stats->value[IP4OUTSEGS] = v4.tcp_out_segs; + stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs; + stats->value[IP4OUTRSTS] = v4.tcp_out_rsts; + stats->value[IP6INSEGS] = v6.tcp_in_segs; + stats->value[IP6OUTSEGS] = v6.tcp_out_segs; + stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs; + stats->value[IP6OUTRSTS] = v6.tcp_out_rsts; + + return stats->num_counters; } static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); @@ -562,7 +601,8 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.req_notify_cq = c4iw_arm_cq; dev->ibdev.post_send = c4iw_post_send; dev->ibdev.post_recv = c4iw_post_receive; - dev->ibdev.get_protocol_stats = c4iw_get_mib; + dev->ibdev.alloc_hw_stats = c4iw_alloc_stats; + dev->ibdev.get_hw_stats = c4iw_get_mib; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.drain_sq = c4iw_drain_sq; -- cgit v1.2.3-54-g00ecf