From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile | 2 + .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 3118 +++++++++++++++++ .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 1030 ++++++ .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 3519 ++++++++++++++++++++ .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c | 230 ++ 5 files changed, 7899 insertions(+) create mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile create mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c create mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h create mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c create mode 100644 drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c (limited to 'drivers/staging/lustre/lnet/klnds/o2iblnd') diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile new file mode 100644 index 000000000..e0a7aa72b --- /dev/null +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_LNET_XPRT_IB) += ko2iblnd.o +ko2iblnd-y := o2iblnd.o o2iblnd_cb.o o2iblnd_modparams.o diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c new file mode 100644 index 000000000..3bad441de --- /dev/null +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -0,0 +1,3118 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/o2iblnd/o2iblnd.c + * + * Author: Eric Barton + */ + +#include "o2iblnd.h" +#include + +static lnd_t the_o2iblnd = { + .lnd_type = O2IBLND, + .lnd_startup = kiblnd_startup, + .lnd_shutdown = kiblnd_shutdown, + .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, + .lnd_send = kiblnd_send, + .lnd_recv = kiblnd_recv, +}; + +kib_data_t kiblnd_data; + +static __u32 kiblnd_cksum(void *ptr, int nob) +{ + char *c = ptr; + __u32 sum = 0; + + while (nob-- > 0) + sum = ((sum << 1) | (sum >> 31)) + *c++; + + /* ensure I don't return 0 (== no checksum) */ + return (sum == 0) ? 1 : sum; +} + +static char *kiblnd_msgtype2str(int type) +{ + switch (type) { + case IBLND_MSG_CONNREQ: + return "CONNREQ"; + + case IBLND_MSG_CONNACK: + return "CONNACK"; + + case IBLND_MSG_NOOP: + return "NOOP"; + + case IBLND_MSG_IMMEDIATE: + return "IMMEDIATE"; + + case IBLND_MSG_PUT_REQ: + return "PUT_REQ"; + + case IBLND_MSG_PUT_NAK: + return "PUT_NAK"; + + case IBLND_MSG_PUT_ACK: + return "PUT_ACK"; + + case IBLND_MSG_PUT_DONE: + return "PUT_DONE"; + + case IBLND_MSG_GET_REQ: + return "GET_REQ"; + + case IBLND_MSG_GET_DONE: + return "GET_DONE"; + + default: + return "???"; + } +} + +static int kiblnd_msgtype2size(int type) +{ + const int hdr_size = offsetof(kib_msg_t, ibm_u); + + switch (type) { + case IBLND_MSG_CONNREQ: + case IBLND_MSG_CONNACK: + return hdr_size + sizeof(kib_connparams_t); + + case IBLND_MSG_NOOP: + return hdr_size; + + case IBLND_MSG_IMMEDIATE: + return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]); + + case IBLND_MSG_PUT_REQ: + return hdr_size + sizeof(kib_putreq_msg_t); + + case IBLND_MSG_PUT_ACK: + return hdr_size + sizeof(kib_putack_msg_t); + + case IBLND_MSG_GET_REQ: + return hdr_size + sizeof(kib_get_msg_t); + + case IBLND_MSG_PUT_NAK: + case IBLND_MSG_PUT_DONE: + case IBLND_MSG_GET_DONE: + return hdr_size + sizeof(kib_completion_msg_t); + default: + return -1; + } +} + +static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) +{ + kib_rdma_desc_t *rd; + int nob; + int n; + int i; + + LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || + msg->ibm_type == IBLND_MSG_PUT_ACK); + + rd = msg->ibm_type == IBLND_MSG_GET_REQ ? + &msg->ibm_u.get.ibgm_rd : + &msg->ibm_u.putack.ibpam_rd; + + if (flip) { + __swab32s(&rd->rd_key); + __swab32s(&rd->rd_nfrags); + } + + n = rd->rd_nfrags; + + if (n <= 0 || n > IBLND_MAX_RDMA_FRAGS) { + CERROR("Bad nfrags: %d, should be 0 < n <= %d\n", + n, IBLND_MAX_RDMA_FRAGS); + return 1; + } + + nob = offsetof(kib_msg_t, ibm_u) + + kiblnd_rd_msg_size(rd, msg->ibm_type, n); + + if (msg->ibm_nob < nob) { + CERROR("Short %s: %d(%d)\n", + kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob); + return 1; + } + + if (!flip) + return 0; + + for (i = 0; i < n; i++) { + __swab32s(&rd->rd_frags[i].rf_nob); + __swab64s(&rd->rd_frags[i].rf_addr); + } + + return 0; +} + +void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, + int credits, lnet_nid_t dstnid, __u64 dststamp) +{ + kib_net_t *net = ni->ni_data; + + /* CAVEAT EMPTOR! all message fields not set here should have been + * initialised previously. */ + msg->ibm_magic = IBLND_MSG_MAGIC; + msg->ibm_version = version; + /* ibm_type */ + msg->ibm_credits = credits; + /* ibm_nob */ + msg->ibm_cksum = 0; + msg->ibm_srcnid = ni->ni_nid; + msg->ibm_srcstamp = net->ibn_incarnation; + msg->ibm_dstnid = dstnid; + msg->ibm_dststamp = dststamp; + + if (*kiblnd_tunables.kib_cksum) { + /* NB ibm_cksum zero while computing cksum */ + msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob); + } +} + +int kiblnd_unpack_msg(kib_msg_t *msg, int nob) +{ + const int hdr_size = offsetof(kib_msg_t, ibm_u); + __u32 msg_cksum; + __u16 version; + int msg_nob; + int flip; + + /* 6 bytes are enough to have received magic + version */ + if (nob < 6) { + CERROR("Short message: %d\n", nob); + return -EPROTO; + } + + if (msg->ibm_magic == IBLND_MSG_MAGIC) { + flip = 0; + } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) { + flip = 1; + } else { + CERROR("Bad magic: %08x\n", msg->ibm_magic); + return -EPROTO; + } + + version = flip ? __swab16(msg->ibm_version) : msg->ibm_version; + if (version != IBLND_MSG_VERSION && + version != IBLND_MSG_VERSION_1) { + CERROR("Bad version: %x\n", version); + return -EPROTO; + } + + if (nob < hdr_size) { + CERROR("Short message: %d\n", nob); + return -EPROTO; + } + + msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob; + if (msg_nob > nob) { + CERROR("Short message: got %d, wanted %d\n", nob, msg_nob); + return -EPROTO; + } + + /* checksum must be computed with ibm_cksum zero and BEFORE anything + * gets flipped */ + msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; + msg->ibm_cksum = 0; + if (msg_cksum != 0 && + msg_cksum != kiblnd_cksum(msg, msg_nob)) { + CERROR("Bad checksum\n"); + return -EPROTO; + } + + msg->ibm_cksum = msg_cksum; + + if (flip) { + /* leave magic unflipped as a clue to peer endianness */ + msg->ibm_version = version; + CLASSERT(sizeof(msg->ibm_type) == 1); + CLASSERT(sizeof(msg->ibm_credits) == 1); + msg->ibm_nob = msg_nob; + __swab64s(&msg->ibm_srcnid); + __swab64s(&msg->ibm_srcstamp); + __swab64s(&msg->ibm_dstnid); + __swab64s(&msg->ibm_dststamp); + } + + if (msg->ibm_srcnid == LNET_NID_ANY) { + CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid)); + return -EPROTO; + } + + if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) { + CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type), + msg_nob, kiblnd_msgtype2size(msg->ibm_type)); + return -EPROTO; + } + + switch (msg->ibm_type) { + default: + CERROR("Unknown message type %x\n", msg->ibm_type); + return -EPROTO; + + case IBLND_MSG_NOOP: + case IBLND_MSG_IMMEDIATE: + case IBLND_MSG_PUT_REQ: + break; + + case IBLND_MSG_PUT_ACK: + case IBLND_MSG_GET_REQ: + if (kiblnd_unpack_rd(msg, flip)) + return -EPROTO; + break; + + case IBLND_MSG_PUT_NAK: + case IBLND_MSG_PUT_DONE: + case IBLND_MSG_GET_DONE: + if (flip) + __swab32s(&msg->ibm_u.completion.ibcm_status); + break; + + case IBLND_MSG_CONNREQ: + case IBLND_MSG_CONNACK: + if (flip) { + __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth); + __swab16s(&msg->ibm_u.connparams.ibcp_max_frags); + __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size); + } + break; + } + return 0; +} + +int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) +{ + kib_peer_t *peer; + kib_net_t *net = ni->ni_data; + int cpt = lnet_cpt_of_nid(nid); + unsigned long flags; + + LASSERT(net != NULL); + LASSERT(nid != LNET_NID_ANY); + + LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); + if (peer == NULL) { + CERROR("Cannot allocate peer\n"); + return -ENOMEM; + } + + memset(peer, 0, sizeof(*peer)); /* zero flags etc */ + + peer->ibp_ni = ni; + peer->ibp_nid = nid; + peer->ibp_error = 0; + peer->ibp_last_alive = 0; + atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ + + INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ + INIT_LIST_HEAD(&peer->ibp_conns); + INIT_LIST_HEAD(&peer->ibp_tx_queue); + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + /* always called with a ref on ni, which prevents ni being shutdown */ + LASSERT(net->ibn_shutdown == 0); + + /* npeers only grows with the global lock held */ + atomic_inc(&net->ibn_npeers); + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + *peerp = peer; + return 0; +} + +void kiblnd_destroy_peer(kib_peer_t *peer) +{ + kib_net_t *net = peer->ibp_ni->ni_data; + + LASSERT(net != NULL); + LASSERT(atomic_read(&peer->ibp_refcount) == 0); + LASSERT(!kiblnd_peer_active(peer)); + LASSERT(peer->ibp_connecting == 0); + LASSERT(peer->ibp_accepting == 0); + LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(list_empty(&peer->ibp_tx_queue)); + + LIBCFS_FREE(peer, sizeof(*peer)); + + /* NB a peer's connections keep a reference on their peer until + * they are destroyed, so we can be assured that _all_ state to do + * with this peer has been cleaned up when its refcount drops to + * zero. */ + atomic_dec(&net->ibn_npeers); +} + +kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) +{ + /* the caller is responsible for accounting the additional reference + * that this creates */ + struct list_head *peer_list = kiblnd_nid2peerlist(nid); + struct list_head *tmp; + kib_peer_t *peer; + + list_for_each(tmp, peer_list) { + + peer = list_entry(tmp, kib_peer_t, ibp_list); + + LASSERT(peer->ibp_connecting > 0 || /* creating conns */ + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); /* active conn */ + + if (peer->ibp_nid != nid) + continue; + + CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n", + peer, libcfs_nid2str(nid), + atomic_read(&peer->ibp_refcount), + peer->ibp_version); + return peer; + } + return NULL; +} + +void kiblnd_unlink_peer_locked(kib_peer_t *peer) +{ + LASSERT(list_empty(&peer->ibp_conns)); + + LASSERT(kiblnd_peer_active(peer)); + list_del_init(&peer->ibp_list); + /* lose peerlist's ref */ + kiblnd_peer_decref(peer); +} + +static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, + lnet_nid_t *nidp, int *count) +{ + kib_peer_t *peer; + struct list_head *ptmp; + int i; + unsigned long flags; + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { + + list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { + + peer = list_entry(ptmp, kib_peer_t, ibp_list); + LASSERT(peer->ibp_connecting > 0 || + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); + + if (peer->ibp_ni != ni) + continue; + + if (index-- > 0) + continue; + + *nidp = peer->ibp_nid; + *count = atomic_read(&peer->ibp_refcount); + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, + flags); + return 0; + } + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + return -ENOENT; +} + +static void kiblnd_del_peer_locked(kib_peer_t *peer) +{ + struct list_head *ctmp; + struct list_head *cnxt; + kib_conn_t *conn; + + if (list_empty(&peer->ibp_conns)) { + kiblnd_unlink_peer_locked(peer); + } else { + list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { + conn = list_entry(ctmp, kib_conn_t, ibc_list); + + kiblnd_close_conn_locked(conn, 0); + } + /* NB closing peer's last conn unlinked it. */ + } + /* NB peer now unlinked; might even be freed if the peer table had the + * last ref on it. */ +} + +static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) +{ + LIST_HEAD(zombies); + struct list_head *ptmp; + struct list_head *pnxt; + kib_peer_t *peer; + int lo; + int hi; + int i; + unsigned long flags; + int rc = -ENOENT; + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + if (nid != LNET_NID_ANY) { + lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + } else { + lo = 0; + hi = kiblnd_data.kib_peer_hash_size - 1; + } + + for (i = lo; i <= hi; i++) { + list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { + peer = list_entry(ptmp, kib_peer_t, ibp_list); + LASSERT(peer->ibp_connecting > 0 || + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); + + if (peer->ibp_ni != ni) + continue; + + if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid)) + continue; + + if (!list_empty(&peer->ibp_tx_queue)) { + LASSERT(list_empty(&peer->ibp_conns)); + + list_splice_init(&peer->ibp_tx_queue, + &zombies); + } + + kiblnd_del_peer_locked(peer); + rc = 0; /* matched something */ + } + } + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + kiblnd_txlist_done(ni, &zombies, -EIO); + + return rc; +} + +static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) +{ + kib_peer_t *peer; + struct list_head *ptmp; + kib_conn_t *conn; + struct list_head *ctmp; + int i; + unsigned long flags; + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { + list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { + + peer = list_entry(ptmp, kib_peer_t, ibp_list); + LASSERT(peer->ibp_connecting > 0 || + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); + + if (peer->ibp_ni != ni) + continue; + + list_for_each(ctmp, &peer->ibp_conns) { + if (index-- > 0) + continue; + + conn = list_entry(ctmp, kib_conn_t, + ibc_list); + kiblnd_conn_addref(conn); + read_unlock_irqrestore( + &kiblnd_data.kib_global_lock, + flags); + return conn; + } + } + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + return NULL; +} + +int kiblnd_translate_mtu(int value) +{ + switch (value) { + default: + return -1; + case 0: + return 0; + case 256: + return IB_MTU_256; + case 512: + return IB_MTU_512; + case 1024: + return IB_MTU_1024; + case 2048: + return IB_MTU_2048; + case 4096: + return IB_MTU_4096; + } +} + +static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) +{ + int mtu; + + /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ + if (cmid->route.path_rec == NULL) + return; + + mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); + LASSERT(mtu >= 0); + if (mtu != 0) + cmid->route.path_rec->mtu = mtu; +} + +static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) +{ + cpumask_t *mask; + int vectors; + int off; + int i; + lnet_nid_t nid = conn->ibc_peer->ibp_nid; + + vectors = conn->ibc_cmid->device->num_comp_vectors; + if (vectors <= 1) + return 0; + + mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); + if (mask == NULL) + return 0; + + /* hash NID to CPU id in this partition... */ + off = do_div(nid, cpumask_weight(mask)); + for_each_cpu(i, mask) { + if (off-- == 0) + return i % vectors; + } + + LBUG(); + return 1; +} + +kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, + int state, int version) +{ + /* CAVEAT EMPTOR: + * If the new conn is created successfully it takes over the caller's + * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself + * is destroyed. On failure, the caller's ref on 'peer' remains and + * she must dispose of 'cmid'. (Actually I'd block forever if I tried + * to destroy 'cmid' here since I'm called from the CM which still has + * its ref on 'cmid'). */ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_net_t *net = peer->ibp_ni->ni_data; + kib_dev_t *dev; + struct ib_qp_init_attr *init_qp_attr; + struct kib_sched_info *sched; + kib_conn_t *conn; + struct ib_cq *cq; + unsigned long flags; + int cpt; + int rc; + int i; + + LASSERT(net != NULL); + LASSERT(!in_interrupt()); + + dev = net->ibn_dev; + + cpt = lnet_cpt_of_nid(peer->ibp_nid); + sched = kiblnd_data.kib_scheds[cpt]; + + LASSERT(sched->ibs_nthreads > 0); + + LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, + sizeof(*init_qp_attr)); + if (init_qp_attr == NULL) { + CERROR("Can't allocate qp_attr for %s\n", + libcfs_nid2str(peer->ibp_nid)); + goto failed_0; + } + + LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); + if (conn == NULL) { + CERROR("Can't allocate connection for %s\n", + libcfs_nid2str(peer->ibp_nid)); + goto failed_1; + } + + conn->ibc_state = IBLND_CONN_INIT; + conn->ibc_version = version; + conn->ibc_peer = peer; /* I take the caller's ref */ + cmid->context = conn; /* for future CM callbacks */ + conn->ibc_cmid = cmid; + + INIT_LIST_HEAD(&conn->ibc_early_rxs); + INIT_LIST_HEAD(&conn->ibc_tx_noops); + INIT_LIST_HEAD(&conn->ibc_tx_queue); + INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd); + INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred); + INIT_LIST_HEAD(&conn->ibc_active_txs); + spin_lock_init(&conn->ibc_lock); + + LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, + sizeof(*conn->ibc_connvars)); + if (conn->ibc_connvars == NULL) { + CERROR("Can't allocate in-progress connection state\n"); + goto failed_2; + } + + write_lock_irqsave(glock, flags); + if (dev->ibd_failover) { + write_unlock_irqrestore(glock, flags); + CERROR("%s: failover in progress\n", dev->ibd_ifname); + goto failed_2; + } + + if (dev->ibd_hdev->ibh_ibdev != cmid->device) { + /* wakeup failover thread and teardown connection */ + if (kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + wake_up(&kiblnd_data.kib_failover_waitq); + } + + write_unlock_irqrestore(glock, flags); + CERROR("cmid HCA(%s), kib_dev(%s) need failover\n", + cmid->device->name, dev->ibd_ifname); + goto failed_2; + } + + kiblnd_hdev_addref_locked(dev->ibd_hdev); + conn->ibc_hdev = dev->ibd_hdev; + + kiblnd_setup_mtu_locked(cmid); + + write_unlock_irqrestore(glock, flags); + + LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, + IBLND_RX_MSGS(version) * sizeof(kib_rx_t)); + if (conn->ibc_rxs == NULL) { + CERROR("Cannot allocate RX buffers\n"); + goto failed_2; + } + + rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, + IBLND_RX_MSG_PAGES(version)); + if (rc != 0) + goto failed_2; + + kiblnd_map_rx_descs(conn); + + cq = ib_create_cq(cmid->device, + kiblnd_cq_completion, kiblnd_cq_event, conn, + IBLND_CQ_ENTRIES(version), + kiblnd_get_completion_vector(conn, cpt)); + if (IS_ERR(cq)) { + CERROR("Can't create CQ: %ld, cqe: %d\n", + PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); + goto failed_2; + } + + conn->ibc_cq = cq; + + rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + if (rc != 0) { + CERROR("Can't request completion notificiation: %d\n", rc); + goto failed_2; + } + + init_qp_attr->event_handler = kiblnd_qp_event; + init_qp_attr->qp_context = conn; + init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version); + init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version); + init_qp_attr->cap.max_send_sge = 1; + init_qp_attr->cap.max_recv_sge = 1; + init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; + init_qp_attr->qp_type = IB_QPT_RC; + init_qp_attr->send_cq = cq; + init_qp_attr->recv_cq = cq; + + conn->ibc_sched = sched; + + rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); + if (rc != 0) { + CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", + rc, init_qp_attr->cap.max_send_wr, + init_qp_attr->cap.max_recv_wr); + goto failed_2; + } + + LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); + + /* 1 ref for caller and each rxmsg */ + atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version)); + conn->ibc_nrx = IBLND_RX_MSGS(version); + + /* post receives */ + for (i = 0; i < IBLND_RX_MSGS(version); i++) { + rc = kiblnd_post_rx(&conn->ibc_rxs[i], + IBLND_POSTRX_NO_CREDIT); + if (rc != 0) { + CERROR("Can't post rxmsg: %d\n", rc); + + /* Make posted receives complete */ + kiblnd_abort_receives(conn); + + /* correct # of posted buffers + * NB locking needed now I'm racing with completion */ + spin_lock_irqsave(&sched->ibs_lock, flags); + conn->ibc_nrx -= IBLND_RX_MSGS(version) - i; + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + /* cmid will be destroyed by CM(ofed) after cm_callback + * returned, so we can't refer it anymore + * (by kiblnd_connd()->kiblnd_destroy_conn) */ + rdma_destroy_qp(conn->ibc_cmid); + conn->ibc_cmid = NULL; + + /* Drop my own and unused rxbuffer refcounts */ + while (i++ <= IBLND_RX_MSGS(version)) + kiblnd_conn_decref(conn); + + return NULL; + } + } + + /* Init successful! */ + LASSERT(state == IBLND_CONN_ACTIVE_CONNECT || + state == IBLND_CONN_PASSIVE_WAIT); + conn->ibc_state = state; + + /* 1 more conn */ + atomic_inc(&net->ibn_nconns); + return conn; + + failed_2: + kiblnd_destroy_conn(conn); + failed_1: + LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); + failed_0: + return NULL; +} + +void kiblnd_destroy_conn(kib_conn_t *conn) +{ + struct rdma_cm_id *cmid = conn->ibc_cmid; + kib_peer_t *peer = conn->ibc_peer; + int rc; + + LASSERT(!in_interrupt()); + LASSERT(atomic_read(&conn->ibc_refcount) == 0); + LASSERT(list_empty(&conn->ibc_early_rxs)); + LASSERT(list_empty(&conn->ibc_tx_noops)); + LASSERT(list_empty(&conn->ibc_tx_queue)); + LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd)); + LASSERT(list_empty(&conn->ibc_tx_queue_nocred)); + LASSERT(list_empty(&conn->ibc_active_txs)); + LASSERT(conn->ibc_noops_posted == 0); + LASSERT(conn->ibc_nsends_posted == 0); + + switch (conn->ibc_state) { + default: + /* conn must be completely disengaged from the network */ + LBUG(); + + case IBLND_CONN_DISCONNECTED: + /* connvars should have been freed already */ + LASSERT(conn->ibc_connvars == NULL); + break; + + case IBLND_CONN_INIT: + break; + } + + /* conn->ibc_cmid might be destroyed by CM already */ + if (cmid != NULL && cmid->qp != NULL) + rdma_destroy_qp(cmid); + + if (conn->ibc_cq != NULL) { + rc = ib_destroy_cq(conn->ibc_cq); + if (rc != 0) + CWARN("Error destroying CQ: %d\n", rc); + } + + if (conn->ibc_rx_pages != NULL) + kiblnd_unmap_rx_descs(conn); + + if (conn->ibc_rxs != NULL) { + LIBCFS_FREE(conn->ibc_rxs, + IBLND_RX_MSGS(conn->ibc_version) + * sizeof(kib_rx_t)); + } + + if (conn->ibc_connvars != NULL) + LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); + + if (conn->ibc_hdev != NULL) + kiblnd_hdev_decref(conn->ibc_hdev); + + /* See CAVEAT EMPTOR above in kiblnd_create_conn */ + if (conn->ibc_state != IBLND_CONN_INIT) { + kib_net_t *net = peer->ibp_ni->ni_data; + + kiblnd_peer_decref(peer); + rdma_destroy_id(cmid); + atomic_dec(&net->ibn_nconns); + } + + LIBCFS_FREE(conn, sizeof(*conn)); +} + +int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) +{ + kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; + + list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { + conn = list_entry(ctmp, kib_conn_t, ibc_list); + + CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n", + libcfs_nid2str(peer->ibp_nid), + conn->ibc_version, why); + + kiblnd_close_conn_locked(conn, why); + count++; + } + + return count; +} + +int kiblnd_close_stale_conns_locked(kib_peer_t *peer, + int version, __u64 incarnation) +{ + kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; + + list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { + conn = list_entry(ctmp, kib_conn_t, ibc_list); + + if (conn->ibc_version == version && + conn->ibc_incarnation == incarnation) + continue; + + CDEBUG(D_NET, + "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n", + libcfs_nid2str(peer->ibp_nid), + conn->ibc_version, conn->ibc_incarnation, + version, incarnation); + + kiblnd_close_conn_locked(conn, -ESTALE); + count++; + } + + return count; +} + +static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) +{ + kib_peer_t *peer; + struct list_head *ptmp; + struct list_head *pnxt; + int lo; + int hi; + int i; + unsigned long flags; + int count = 0; + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + if (nid != LNET_NID_ANY) + lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + else { + lo = 0; + hi = kiblnd_data.kib_peer_hash_size - 1; + } + + for (i = lo; i <= hi; i++) { + list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { + + peer = list_entry(ptmp, kib_peer_t, ibp_list); + LASSERT(peer->ibp_connecting > 0 || + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); + + if (peer->ibp_ni != ni) + continue; + + if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid)) + continue; + + count += kiblnd_close_peer_conns_locked(peer, 0); + } + } + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + /* wildcards always succeed */ + if (nid == LNET_NID_ANY) + return 0; + + return (count == 0) ? -ENOENT : 0; +} + +int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) +{ + struct libcfs_ioctl_data *data = arg; + int rc = -EINVAL; + + switch (cmd) { + case IOC_LIBCFS_GET_PEER: { + lnet_nid_t nid = 0; + int count = 0; + + rc = kiblnd_get_peer_info(ni, data->ioc_count, + &nid, &count); + data->ioc_nid = nid; + data->ioc_count = count; + break; + } + + case IOC_LIBCFS_DEL_PEER: { + rc = kiblnd_del_peer(ni, data->ioc_nid); + break; + } + case IOC_LIBCFS_GET_CONN: { + kib_conn_t *conn; + + rc = 0; + conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); + if (conn == NULL) { + rc = -ENOENT; + break; + } + + LASSERT(conn->ibc_cmid != NULL); + data->ioc_nid = conn->ibc_peer->ibp_nid; + if (conn->ibc_cmid->route.path_rec == NULL) + data->ioc_u32[0] = 0; /* iWarp has no path MTU */ + else + data->ioc_u32[0] = + ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu); + kiblnd_conn_decref(conn); + break; + } + case IOC_LIBCFS_CLOSE_CONNECTION: { + rc = kiblnd_close_matching_conns(ni, data->ioc_nid); + break; + } + + default: + break; + } + + return rc; +} + +void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) +{ + unsigned long last_alive = 0; + unsigned long now = cfs_time_current(); + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_peer_t *peer; + unsigned long flags; + + read_lock_irqsave(glock, flags); + + peer = kiblnd_find_peer_locked(nid); + if (peer != NULL) { + LASSERT(peer->ibp_connecting > 0 || /* creating conns */ + peer->ibp_accepting > 0 || + !list_empty(&peer->ibp_conns)); /* active conn */ + last_alive = peer->ibp_last_alive; + } + + read_unlock_irqrestore(glock, flags); + + if (last_alive != 0) + *when = last_alive; + + /* peer is not persistent in hash, trigger peer creation + * and connection establishment with a NULL tx */ + if (peer == NULL) + kiblnd_launch_tx(ni, NULL, nid); + + CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", + libcfs_nid2str(nid), peer, + last_alive ? cfs_duration_sec(now - last_alive) : -1); +} + +void kiblnd_free_pages(kib_pages_t *p) +{ + int npages = p->ibp_npages; + int i; + + for (i = 0; i < npages; i++) { + if (p->ibp_pages[i] != NULL) + __free_page(p->ibp_pages[i]); + } + + LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); +} + +int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) +{ + kib_pages_t *p; + int i; + + LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, + offsetof(kib_pages_t, ibp_pages[npages])); + if (p == NULL) { + CERROR("Can't allocate descriptor for %d pages\n", npages); + return -ENOMEM; + } + + memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages])); + p->ibp_npages = npages; + + for (i = 0; i < npages; i++) { + p->ibp_pages[i] = alloc_pages_node( + cfs_cpt_spread_node(lnet_cpt_table(), cpt), + GFP_NOFS, 0); + if (p->ibp_pages[i] == NULL) { + CERROR("Can't allocate page %d of %d\n", i, npages); + kiblnd_free_pages(p); + return -ENOMEM; + } + } + + *pp = p; + return 0; +} + +void kiblnd_unmap_rx_descs(kib_conn_t *conn) +{ + kib_rx_t *rx; + int i; + + LASSERT(conn->ibc_rxs != NULL); + LASSERT(conn->ibc_hdev != NULL); + + for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + rx = &conn->ibc_rxs[i]; + + LASSERT(rx->rx_nob >= 0); /* not posted */ + + kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev, + KIBLND_UNMAP_ADDR(rx, rx_msgunmap, + rx->rx_msgaddr), + IBLND_MSG_SIZE, DMA_FROM_DEVICE); + } + + kiblnd_free_pages(conn->ibc_rx_pages); + + conn->ibc_rx_pages = NULL; +} + +void kiblnd_map_rx_descs(kib_conn_t *conn) +{ + kib_rx_t *rx; + struct page *pg; + int pg_off; + int ipg; + int i; + + for (pg_off = ipg = i = 0; + i < IBLND_RX_MSGS(conn->ibc_version); i++) { + pg = conn->ibc_rx_pages->ibp_pages[ipg]; + rx = &conn->ibc_rxs[i]; + + rx->rx_conn = conn; + rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off); + + rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev, + rx->rx_msg, + IBLND_MSG_SIZE, + DMA_FROM_DEVICE); + LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, + rx->rx_msgaddr)); + KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); + + CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", + i, rx->rx_msg, rx->rx_msgaddr, + lnet_page2phys(pg) + pg_off); + + pg_off += IBLND_MSG_SIZE; + LASSERT(pg_off <= PAGE_SIZE); + + if (pg_off == PAGE_SIZE) { + pg_off = 0; + ipg++; + LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); + } + } +} + +static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) +{ + kib_hca_dev_t *hdev = tpo->tpo_hdev; + kib_tx_t *tx; + int i; + + LASSERT(tpo->tpo_pool.po_allocated == 0); + + if (hdev == NULL) + return; + + for (i = 0; i < tpo->tpo_pool.po_size; i++) { + tx = &tpo->tpo_tx_descs[i]; + kiblnd_dma_unmap_single(hdev->ibh_ibdev, + KIBLND_UNMAP_ADDR(tx, tx_msgunmap, + tx->tx_msgaddr), + IBLND_MSG_SIZE, DMA_TO_DEVICE); + } + + kiblnd_hdev_decref(hdev); + tpo->tpo_hdev = NULL; +} + +static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) +{ + kib_hca_dev_t *hdev; + unsigned long flags; + int i = 0; + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + while (dev->ibd_failover) { + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + if (i++ % 50 == 0) + CDEBUG(D_NET, "%s: Wait for failover\n", + dev->ibd_ifname); + schedule_timeout(cfs_time_seconds(1) / 100); + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + } + + kiblnd_hdev_addref_locked(dev->ibd_hdev); + hdev = dev->ibd_hdev; + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + return hdev; +} + +static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) +{ + kib_pages_t *txpgs = tpo->tpo_tx_pages; + kib_pool_t *pool = &tpo->tpo_pool; + kib_net_t *net = pool->po_owner->ps_net; + kib_dev_t *dev; + struct page *page; + kib_tx_t *tx; + int page_offset; + int ipage; + int i; + + LASSERT(net != NULL); + + dev = net->ibn_dev; + + /* pre-mapped messages are not bigger than 1 page */ + CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE); + + /* No fancy arithmetic when we do the buffer calculations */ + CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0); + + tpo->tpo_hdev = kiblnd_current_hdev(dev); + + for (ipage = page_offset = i = 0; i < pool->po_size; i++) { + page = txpgs->ibp_pages[ipage]; + tx = &tpo->tpo_tx_descs[i]; + + tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + + page_offset); + + tx->tx_msgaddr = kiblnd_dma_map_single( + tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, + IBLND_MSG_SIZE, DMA_TO_DEVICE); + LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, + tx->tx_msgaddr)); + KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); + + list_add(&tx->tx_list, &pool->po_free_list); + + page_offset += IBLND_MSG_SIZE; + LASSERT(page_offset <= PAGE_SIZE); + + if (page_offset == PAGE_SIZE) { + page_offset = 0; + ipage++; + LASSERT(ipage <= txpgs->ibp_npages); + } + } +} + +struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) +{ + __u64 index; + + LASSERT(hdev->ibh_mrs[0] != NULL); + + if (hdev->ibh_nmrs == 1) + return hdev->ibh_mrs[0]; + + index = addr >> hdev->ibh_mr_shift; + + if (index < hdev->ibh_nmrs && + index == ((addr + size - 1) >> hdev->ibh_mr_shift)) + return hdev->ibh_mrs[index]; + + return NULL; +} + +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) +{ + struct ib_mr *prev_mr; + struct ib_mr *mr; + int i; + + LASSERT(hdev->ibh_mrs[0] != NULL); + + if (*kiblnd_tunables.kib_map_on_demand > 0 && + *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) + return NULL; + + if (hdev->ibh_nmrs == 1) + return hdev->ibh_mrs[0]; + + for (i = 0, mr = prev_mr = NULL; + i < rd->rd_nfrags; i++) { + mr = kiblnd_find_dma_mr(hdev, + rd->rd_frags[i].rf_addr, + rd->rd_frags[i].rf_nob); + if (prev_mr == NULL) + prev_mr = mr; + + if (mr == NULL || prev_mr != mr) { + /* Can't covered by one single MR */ + mr = NULL; + break; + } + } + + return mr; +} + +static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) +{ + LASSERT(pool->fpo_map_count == 0); + + if (pool->fpo_fmr_pool != NULL) + ib_destroy_fmr_pool(pool->fpo_fmr_pool); + + if (pool->fpo_hdev != NULL) + kiblnd_hdev_decref(pool->fpo_hdev); + + LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t)); +} + +static void kiblnd_destroy_fmr_pool_list(struct list_head *head) +{ + kib_fmr_pool_t *pool; + + while (!list_empty(head)) { + pool = list_entry(head->next, kib_fmr_pool_t, fpo_list); + list_del(&pool->fpo_list); + kiblnd_destroy_fmr_pool(pool); + } +} + +static int kiblnd_fmr_pool_size(int ncpts) +{ + int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts; + + return max(IBLND_FMR_POOL, size); +} + +static int kiblnd_fmr_flush_trigger(int ncpts) +{ + int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts; + + return max(IBLND_FMR_POOL_FLUSH, size); +} + +static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, + kib_fmr_pool_t **pp_fpo) +{ + /* FMR pool for RDMA */ + kib_dev_t *dev = fps->fps_net->ibn_dev; + kib_fmr_pool_t *fpo; + struct ib_fmr_pool_param param = { + .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, + .page_shift = PAGE_SHIFT, + .access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE), + .pool_size = fps->fps_pool_size, + .dirty_watermark = fps->fps_flush_trigger, + .flush_function = NULL, + .flush_arg = NULL, + .cache = !!*kiblnd_tunables.kib_fmr_cache}; + int rc; + + LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); + if (fpo == NULL) + return -ENOMEM; + + fpo->fpo_hdev = kiblnd_current_hdev(dev); + + fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, ¶m); + if (IS_ERR(fpo->fpo_fmr_pool)) { + rc = PTR_ERR(fpo->fpo_fmr_pool); + CERROR("Failed to create FMR pool: %d\n", rc); + + kiblnd_hdev_decref(fpo->fpo_hdev); + LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t)); + return rc; + } + + fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); + fpo->fpo_owner = fps; + *pp_fpo = fpo; + + return 0; +} + +static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, + struct list_head *zombies) +{ + if (fps->fps_net == NULL) /* intialized? */ + return; + + spin_lock(&fps->fps_lock); + + while (!list_empty(&fps->fps_pool_list)) { + kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next, + kib_fmr_pool_t, fpo_list); + fpo->fpo_failed = 1; + list_del(&fpo->fpo_list); + if (fpo->fpo_map_count == 0) + list_add(&fpo->fpo_list, zombies); + else + list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); + } + + spin_unlock(&fps->fps_lock); +} + +static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) +{ + if (fps->fps_net != NULL) { /* initialized? */ + kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); + kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list); + } +} + +static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, + kib_net_t *net, int pool_size, + int flush_trigger) +{ + kib_fmr_pool_t *fpo; + int rc; + + memset(fps, 0, sizeof(kib_fmr_poolset_t)); + + fps->fps_net = net; + fps->fps_cpt = cpt; + fps->fps_pool_size = pool_size; + fps->fps_flush_trigger = flush_trigger; + spin_lock_init(&fps->fps_lock); + INIT_LIST_HEAD(&fps->fps_pool_list); + INIT_LIST_HEAD(&fps->fps_failed_pool_list); + + rc = kiblnd_create_fmr_pool(fps, &fpo); + if (rc == 0) + list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); + + return rc; +} + +static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) +{ + if (fpo->fpo_map_count != 0) /* still in use */ + return 0; + if (fpo->fpo_failed) + return 1; + return cfs_time_aftereq(now, fpo->fpo_deadline); +} + +void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) +{ + LIST_HEAD(zombies); + kib_fmr_pool_t *fpo = fmr->fmr_pool; + kib_fmr_poolset_t *fps = fpo->fpo_owner; + unsigned long now = cfs_time_current(); + kib_fmr_pool_t *tmp; + int rc; + + rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); + LASSERT(rc == 0); + + if (status != 0) { + rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); + LASSERT(rc == 0); + } + + fmr->fmr_pool = NULL; + fmr->fmr_pfmr = NULL; + + spin_lock(&fps->fps_lock); + fpo->fpo_map_count--; /* decref the pool */ + + list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) { + /* the first pool is persistent */ + if (fps->fps_pool_list.next == &fpo->fpo_list) + continue; + + if (kiblnd_fmr_pool_is_idle(fpo, now)) { + list_move(&fpo->fpo_list, &zombies); + fps->fps_version++; + } + } + spin_unlock(&fps->fps_lock); + + if (!list_empty(&zombies)) + kiblnd_destroy_fmr_pool_list(&zombies); +} + +int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, + __u64 iov, kib_fmr_t *fmr) +{ + struct ib_pool_fmr *pfmr; + kib_fmr_pool_t *fpo; + __u64 version; + int rc; + + again: + spin_lock(&fps->fps_lock); + version = fps->fps_version; + list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) { + fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); + fpo->fpo_map_count++; + spin_unlock(&fps->fps_lock); + + pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool, + pages, npages, iov); + if (likely(!IS_ERR(pfmr))) { + fmr->fmr_pool = fpo; + fmr->fmr_pfmr = pfmr; + return 0; + } + + spin_lock(&fps->fps_lock); + fpo->fpo_map_count--; + if (PTR_ERR(pfmr) != -EAGAIN) { + spin_unlock(&fps->fps_lock); + return PTR_ERR(pfmr); + } + + /* EAGAIN and ... */ + if (version != fps->fps_version) { + spin_unlock(&fps->fps_lock); + goto again; + } + } + + if (fps->fps_increasing) { + spin_unlock(&fps->fps_lock); + CDEBUG(D_NET, + "Another thread is allocating new FMR pool, waiting for her to complete\n"); + schedule(); + goto again; + + } + + if (time_before(cfs_time_current(), fps->fps_next_retry)) { + /* someone failed recently */ + spin_unlock(&fps->fps_lock); + return -EAGAIN; + } + + fps->fps_increasing = 1; + spin_unlock(&fps->fps_lock); + + CDEBUG(D_NET, "Allocate new FMR pool\n"); + rc = kiblnd_create_fmr_pool(fps, &fpo); + spin_lock(&fps->fps_lock); + fps->fps_increasing = 0; + if (rc == 0) { + fps->fps_version++; + list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); + } else { + fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); + } + spin_unlock(&fps->fps_lock); + + goto again; +} + +static void kiblnd_fini_pool(kib_pool_t *pool) +{ + LASSERT(list_empty(&pool->po_free_list)); + LASSERT(pool->po_allocated == 0); + + CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); +} + +static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) +{ + CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); + + memset(pool, 0, sizeof(kib_pool_t)); + INIT_LIST_HEAD(&pool->po_free_list); + pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); + pool->po_owner = ps; + pool->po_size = size; +} + +static void kiblnd_destroy_pool_list(struct list_head *head) +{ + kib_pool_t *pool; + + while (!list_empty(head)) { + pool = list_entry(head->next, kib_pool_t, po_list); + list_del(&pool->po_list); + + LASSERT(pool->po_owner != NULL); + pool->po_owner->ps_pool_destroy(pool); + } +} + +static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) +{ + if (ps->ps_net == NULL) /* intialized? */ + return; + + spin_lock(&ps->ps_lock); + while (!list_empty(&ps->ps_pool_list)) { + kib_pool_t *po = list_entry(ps->ps_pool_list.next, + kib_pool_t, po_list); + po->po_failed = 1; + list_del(&po->po_list); + if (po->po_allocated == 0) + list_add(&po->po_list, zombies); + else + list_add(&po->po_list, &ps->ps_failed_pool_list); + } + spin_unlock(&ps->ps_lock); +} + +static void kiblnd_fini_poolset(kib_poolset_t *ps) +{ + if (ps->ps_net != NULL) { /* initialized? */ + kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); + kiblnd_destroy_pool_list(&ps->ps_pool_list); + } +} + +static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, + kib_net_t *net, char *name, int size, + kib_ps_pool_create_t po_create, + kib_ps_pool_destroy_t po_destroy, + kib_ps_node_init_t nd_init, + kib_ps_node_fini_t nd_fini) +{ + kib_pool_t *pool; + int rc; + + memset(ps, 0, sizeof(kib_poolset_t)); + + ps->ps_cpt = cpt; + ps->ps_net = net; + ps->ps_pool_create = po_create; + ps->ps_pool_destroy = po_destroy; + ps->ps_node_init = nd_init; + ps->ps_node_fini = nd_fini; + ps->ps_pool_size = size; + if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name)) + >= sizeof(ps->ps_name)) + return -E2BIG; + spin_lock_init(&ps->ps_lock); + INIT_LIST_HEAD(&ps->ps_pool_list); + INIT_LIST_HEAD(&ps->ps_failed_pool_list); + + rc = ps->ps_pool_create(ps, size, &pool); + if (rc == 0) + list_add(&pool->po_list, &ps->ps_pool_list); + else + CERROR("Failed to create the first pool for %s\n", ps->ps_name); + + return rc; +} + +static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) +{ + if (pool->po_allocated != 0) /* still in use */ + return 0; + if (pool->po_failed) + return 1; + return cfs_time_aftereq(now, pool->po_deadline); +} + +void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) +{ + LIST_HEAD(zombies); + kib_poolset_t *ps = pool->po_owner; + kib_pool_t *tmp; + unsigned long now = cfs_time_current(); + + spin_lock(&ps->ps_lock); + + if (ps->ps_node_fini != NULL) + ps->ps_node_fini(pool, node); + + LASSERT(pool->po_allocated > 0); + list_add(node, &pool->po_free_list); + pool->po_allocated--; + + list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) { + /* the first pool is persistent */ + if (ps->ps_pool_list.next == &pool->po_list) + continue; + + if (kiblnd_pool_is_idle(pool, now)) + list_move(&pool->po_list, &zombies); + } + spin_unlock(&ps->ps_lock); + + if (!list_empty(&zombies)) + kiblnd_destroy_pool_list(&zombies); +} + +struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) +{ + struct list_head *node; + kib_pool_t *pool; + int rc; + + again: + spin_lock(&ps->ps_lock); + list_for_each_entry(pool, &ps->ps_pool_list, po_list) { + if (list_empty(&pool->po_free_list)) + continue; + + pool->po_allocated++; + pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); + node = pool->po_free_list.next; + list_del(node); + + if (ps->ps_node_init != NULL) { + /* still hold the lock */ + ps->ps_node_init(pool, node); + } + spin_unlock(&ps->ps_lock); + return node; + } + + /* no available tx pool and ... */ + if (ps->ps_increasing) { + /* another thread is allocating a new pool */ + spin_unlock(&ps->ps_lock); + CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n", + ps->ps_name); + schedule(); + goto again; + } + + if (time_before(cfs_time_current(), ps->ps_next_retry)) { + /* someone failed recently */ + spin_unlock(&ps->ps_lock); + return NULL; + } + + ps->ps_increasing = 1; + spin_unlock(&ps->ps_lock); + + CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); + + rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); + + spin_lock(&ps->ps_lock); + ps->ps_increasing = 0; + if (rc == 0) { + list_add_tail(&pool->po_list, &ps->ps_pool_list); + } else { + ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); + CERROR("Can't allocate new %s pool because out of memory\n", + ps->ps_name); + } + spin_unlock(&ps->ps_lock); + + goto again; +} + +void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr) +{ + kib_pmr_pool_t *ppo = pmr->pmr_pool; + struct ib_mr *mr = pmr->pmr_mr; + + pmr->pmr_mr = NULL; + kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list); + if (mr != NULL) + ib_dereg_mr(mr); +} + +int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, + kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr) +{ + kib_phys_mr_t *pmr; + struct list_head *node; + int rc; + int i; + + node = kiblnd_pool_alloc_node(&pps->pps_poolset); + if (node == NULL) { + CERROR("Failed to allocate PMR descriptor\n"); + return -ENOMEM; + } + + pmr = container_of(node, kib_phys_mr_t, pmr_list); + if (pmr->pmr_pool->ppo_hdev != hdev) { + kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node); + return -EAGAIN; + } + + for (i = 0; i < rd->rd_nfrags; i++) { + pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr; + pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob; + } + + pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd, + pmr->pmr_ipb, rd->rd_nfrags, + IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE, + iova); + if (!IS_ERR(pmr->pmr_mr)) { + pmr->pmr_iova = *iova; + *pp_pmr = pmr; + return 0; + } + + rc = PTR_ERR(pmr->pmr_mr); + CERROR("Failed ib_reg_phys_mr: %d\n", rc); + + pmr->pmr_mr = NULL; + kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node); + + return rc; +} + +static void kiblnd_destroy_pmr_pool(kib_pool_t *pool) +{ + kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool); + kib_phys_mr_t *pmr; + kib_phys_mr_t *tmp; + + LASSERT(pool->po_allocated == 0); + + list_for_each_entry_safe(pmr, tmp, &pool->po_free_list, pmr_list) { + LASSERT(pmr->pmr_mr == NULL); + list_del(&pmr->pmr_list); + + if (pmr->pmr_ipb != NULL) { + LIBCFS_FREE(pmr->pmr_ipb, + IBLND_MAX_RDMA_FRAGS * + sizeof(struct ib_phys_buf)); + } + + LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t)); + } + + kiblnd_fini_pool(pool); + if (ppo->ppo_hdev != NULL) + kiblnd_hdev_decref(ppo->ppo_hdev); + + LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t)); +} + +static inline int kiblnd_pmr_pool_size(int ncpts) +{ + int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts; + + return max(IBLND_PMR_POOL, size); +} + +static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, + kib_pool_t **pp_po) +{ + struct kib_pmr_pool *ppo; + struct kib_pool *pool; + kib_phys_mr_t *pmr; + int i; + + LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(), + ps->ps_cpt, sizeof(kib_pmr_pool_t)); + if (ppo == NULL) { + CERROR("Failed to allocate PMR pool\n"); + return -ENOMEM; + } + + pool = &ppo->ppo_pool; + kiblnd_init_pool(ps, pool, size); + + for (i = 0; i < size; i++) { + LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(), + ps->ps_cpt, sizeof(kib_phys_mr_t)); + if (pmr == NULL) + break; + + pmr->pmr_pool = ppo; + LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt, + IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb)); + if (pmr->pmr_ipb == NULL) + break; + + list_add(&pmr->pmr_list, &pool->po_free_list); + } + + if (i < size) { + ps->ps_pool_destroy(pool); + return -ENOMEM; + } + + ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev); + *pp_po = pool; + return 0; +} + +static void kiblnd_destroy_tx_pool(kib_pool_t *pool) +{ + kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); + int i; + + LASSERT(pool->po_allocated == 0); + + if (tpo->tpo_tx_pages != NULL) { + kiblnd_unmap_tx_pool(tpo); + kiblnd_free_pages(tpo->tpo_tx_pages); + } + + if (tpo->tpo_tx_descs == NULL) + goto out; + + for (i = 0; i < pool->po_size; i++) { + kib_tx_t *tx = &tpo->tpo_tx_descs[i]; + + list_del(&tx->tx_list); + if (tx->tx_pages != NULL) + LIBCFS_FREE(tx->tx_pages, + LNET_MAX_IOV * + sizeof(*tx->tx_pages)); + if (tx->tx_frags != NULL) + LIBCFS_FREE(tx->tx_frags, + IBLND_MAX_RDMA_FRAGS * + sizeof(*tx->tx_frags)); + if (tx->tx_wrq != NULL) + LIBCFS_FREE(tx->tx_wrq, + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_wrq)); + if (tx->tx_sge != NULL) + LIBCFS_FREE(tx->tx_sge, + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_sge)); + if (tx->tx_rd != NULL) + LIBCFS_FREE(tx->tx_rd, + offsetof(kib_rdma_desc_t, + rd_frags[IBLND_MAX_RDMA_FRAGS])); + } + + LIBCFS_FREE(tpo->tpo_tx_descs, + pool->po_size * sizeof(kib_tx_t)); +out: + kiblnd_fini_pool(pool); + LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); +} + +static int kiblnd_tx_pool_size(int ncpts) +{ + int ntx = *kiblnd_tunables.kib_ntx / ncpts; + + return max(IBLND_TX_POOL, ntx); +} + +static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, + kib_pool_t **pp_po) +{ + int i; + int npg; + kib_pool_t *pool; + kib_tx_pool_t *tpo; + + LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); + if (tpo == NULL) { + CERROR("Failed to allocate TX pool\n"); + return -ENOMEM; + } + + pool = &tpo->tpo_pool; + kiblnd_init_pool(ps, pool, size); + tpo->tpo_tx_descs = NULL; + tpo->tpo_tx_pages = NULL; + + npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; + if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) { + CERROR("Can't allocate tx pages: %d\n", npg); + LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + return -ENOMEM; + } + + LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, + size * sizeof(kib_tx_t)); + if (tpo->tpo_tx_descs == NULL) { + CERROR("Can't allocate %d tx descriptors\n", size); + ps->ps_pool_destroy(pool); + return -ENOMEM; + } + + memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t)); + + for (i = 0; i < size; i++) { + kib_tx_t *tx = &tpo->tpo_tx_descs[i]; + + tx->tx_pool = tpo; + if (ps->ps_net->ibn_fmr_ps != NULL) { + LIBCFS_CPT_ALLOC(tx->tx_pages, + lnet_cpt_table(), ps->ps_cpt, + LNET_MAX_IOV * sizeof(*tx->tx_pages)); + if (tx->tx_pages == NULL) + break; + } + + LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, + IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); + if (tx->tx_frags == NULL) + break; + + sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); + + LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_wrq)); + if (tx->tx_wrq == NULL) + break; + + LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, + (1 + IBLND_MAX_RDMA_FRAGS) * + sizeof(*tx->tx_sge)); + if (tx->tx_sge == NULL) + break; + + LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, + offsetof(kib_rdma_desc_t, + rd_frags[IBLND_MAX_RDMA_FRAGS])); + if (tx->tx_rd == NULL) + break; + } + + if (i == size) { + kiblnd_map_tx_pool(tpo); + *pp_po = pool; + return 0; + } + + ps->ps_pool_destroy(pool); + return -ENOMEM; +} + +static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) +{ + kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, + tps_poolset); + kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); + + tx->tx_cookie = tps->tps_next_tx_cookie++; +} + +static void kiblnd_net_fini_pools(kib_net_t *net) +{ + int i; + + cfs_cpt_for_each(i, lnet_cpt_table()) { + kib_tx_poolset_t *tps; + kib_fmr_poolset_t *fps; + kib_pmr_poolset_t *pps; + + if (net->ibn_tx_ps != NULL) { + tps = net->ibn_tx_ps[i]; + kiblnd_fini_poolset(&tps->tps_poolset); + } + + if (net->ibn_fmr_ps != NULL) { + fps = net->ibn_fmr_ps[i]; + kiblnd_fini_fmr_poolset(fps); + } + + if (net->ibn_pmr_ps != NULL) { + pps = net->ibn_pmr_ps[i]; + kiblnd_fini_poolset(&pps->pps_poolset); + } + } + + if (net->ibn_tx_ps != NULL) { + cfs_percpt_free(net->ibn_tx_ps); + net->ibn_tx_ps = NULL; + } + + if (net->ibn_fmr_ps != NULL) { + cfs_percpt_free(net->ibn_fmr_ps); + net->ibn_fmr_ps = NULL; + } + + if (net->ibn_pmr_ps != NULL) { + cfs_percpt_free(net->ibn_pmr_ps); + net->ibn_pmr_ps = NULL; + } +} + +static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) +{ + unsigned long flags; + int cpt; + int rc; + int i; + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (*kiblnd_tunables.kib_map_on_demand == 0 && + net->ibn_dev->ibd_hdev->ibh_nmrs == 1) { + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, + flags); + goto create_tx_pool; + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + if (*kiblnd_tunables.kib_fmr_pool_size < + *kiblnd_tunables.kib_ntx / 4) { + CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n", + *kiblnd_tunables.kib_fmr_pool_size, + *kiblnd_tunables.kib_ntx / 4); + rc = -EINVAL; + goto failed; + } + + /* TX pool must be created later than FMR/PMR, see LU-2268 + * for details */ + LASSERT(net->ibn_tx_ps == NULL); + + /* premapping can fail if ibd_nmr > 1, so we always create + * FMR/PMR pool and map-on-demand if premapping failed */ + + net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), + sizeof(kib_fmr_poolset_t)); + if (net->ibn_fmr_ps == NULL) { + CERROR("Failed to allocate FMR pool array\n"); + rc = -ENOMEM; + goto failed; + } + + for (i = 0; i < ncpts; i++) { + cpt = (cpts == NULL) ? i : cpts[i]; + rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net, + kiblnd_fmr_pool_size(ncpts), + kiblnd_fmr_flush_trigger(ncpts)); + if (rc == -ENOSYS && i == 0) /* no FMR */ + break; /* create PMR pool */ + + if (rc != 0) { /* a real error */ + CERROR("Can't initialize FMR pool for CPT %d: %d\n", + cpt, rc); + goto failed; + } + } + + if (i > 0) { + LASSERT(i == ncpts); + goto create_tx_pool; + } + + cfs_percpt_free(net->ibn_fmr_ps); + net->ibn_fmr_ps = NULL; + + CWARN("Device does not support FMR, failing back to PMR\n"); + + if (*kiblnd_tunables.kib_pmr_pool_size < + *kiblnd_tunables.kib_ntx / 4) { + CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n", + *kiblnd_tunables.kib_pmr_pool_size, + *kiblnd_tunables.kib_ntx / 4); + rc = -EINVAL; + goto failed; + } + + net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(), + sizeof(kib_pmr_poolset_t)); + if (net->ibn_pmr_ps == NULL) { + CERROR("Failed to allocate PMR pool array\n"); + rc = -ENOMEM; + goto failed; + } + + for (i = 0; i < ncpts; i++) { + cpt = (cpts == NULL) ? i : cpts[i]; + rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset, + cpt, net, "PMR", + kiblnd_pmr_pool_size(ncpts), + kiblnd_create_pmr_pool, + kiblnd_destroy_pmr_pool, NULL, NULL); + if (rc != 0) { + CERROR("Can't initialize PMR pool for CPT %d: %d\n", + cpt, rc); + goto failed; + } + } + + create_tx_pool: + net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), + sizeof(kib_tx_poolset_t)); + if (net->ibn_tx_ps == NULL) { + CERROR("Failed to allocate tx pool array\n"); + rc = -ENOMEM; + goto failed; + } + + for (i = 0; i < ncpts; i++) { + cpt = (cpts == NULL) ? i : cpts[i]; + rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, + cpt, net, "TX", + kiblnd_tx_pool_size(ncpts), + kiblnd_create_tx_pool, + kiblnd_destroy_tx_pool, + kiblnd_tx_init, NULL); + if (rc != 0) { + CERROR("Can't initialize TX pool for CPT %d: %d\n", + cpt, rc); + goto failed; + } + } + + return 0; + failed: + kiblnd_net_fini_pools(net); + LASSERT(rc != 0); + return rc; +} + +static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) +{ + struct ib_device_attr *attr; + int rc; + + /* It's safe to assume a HCA can handle a page size + * matching that of the native system */ + hdev->ibh_page_shift = PAGE_SHIFT; + hdev->ibh_page_size = 1 << PAGE_SHIFT; + hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); + + LIBCFS_ALLOC(attr, sizeof(*attr)); + if (attr == NULL) { + CERROR("Out of memory\n"); + return -ENOMEM; + } + + rc = ib_query_device(hdev->ibh_ibdev, attr); + if (rc == 0) + hdev->ibh_mr_size = attr->max_mr_size; + + LIBCFS_FREE(attr, sizeof(*attr)); + + if (rc != 0) { + CERROR("Failed to query IB device: %d\n", rc); + return rc; + } + + if (hdev->ibh_mr_size == ~0ULL) { + hdev->ibh_mr_shift = 64; + return 0; + } + + for (hdev->ibh_mr_shift = 0; + hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) { + if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) || + hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1) + return 0; + } + + CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); + return -EINVAL; +} + +static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) +{ + int i; + + if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL) + return; + + for (i = 0; i < hdev->ibh_nmrs; i++) { + if (hdev->ibh_mrs[i] == NULL) + break; + + ib_dereg_mr(hdev->ibh_mrs[i]); + } + + LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs); + hdev->ibh_mrs = NULL; + hdev->ibh_nmrs = 0; +} + +void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) +{ + kiblnd_hdev_cleanup_mrs(hdev); + + if (hdev->ibh_pd != NULL) + ib_dealloc_pd(hdev->ibh_pd); + + if (hdev->ibh_cmid != NULL) + rdma_destroy_id(hdev->ibh_cmid); + + LIBCFS_FREE(hdev, sizeof(*hdev)); +} + +static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) +{ + struct ib_mr *mr; + int i; + int rc; + __u64 mm_size; + __u64 mr_size; + int acflags = IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE; + + rc = kiblnd_hdev_get_attr(hdev); + if (rc != 0) + return rc; + + if (hdev->ibh_mr_shift == 64) { + LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs)); + if (hdev->ibh_mrs == NULL) { + CERROR("Failed to allocate MRs table\n"); + return -ENOMEM; + } + + hdev->ibh_mrs[0] = NULL; + hdev->ibh_nmrs = 1; + + mr = ib_get_dma_mr(hdev->ibh_pd, acflags); + if (IS_ERR(mr)) { + CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr)); + kiblnd_hdev_cleanup_mrs(hdev); + return PTR_ERR(mr); + } + + hdev->ibh_mrs[0] = mr; + + goto out; + } + + mr_size = 1ULL << hdev->ibh_mr_shift; + mm_size = (unsigned long)high_memory - PAGE_OFFSET; + + hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift); + + if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) { + /* it's 4T..., assume we will re-code at that time */ + CERROR("Can't support memory size: x%#llx with MR size: x%#llx\n", + mm_size, mr_size); + return -EINVAL; + } + + /* create an array of MRs to cover all memory */ + LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs); + if (hdev->ibh_mrs == NULL) { + CERROR("Failed to allocate MRs' table\n"); + return -ENOMEM; + } + + for (i = 0; i < hdev->ibh_nmrs; i++) { + struct ib_phys_buf ipb; + __u64 iova; + + ipb.size = hdev->ibh_mr_size; + ipb.addr = i * mr_size; + iova = ipb.addr; + + mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova); + if (IS_ERR(mr)) { + CERROR("Failed ib_reg_phys_mr addr %#llx size %#llx : %ld\n", + ipb.addr, ipb.size, PTR_ERR(mr)); + kiblnd_hdev_cleanup_mrs(hdev); + return PTR_ERR(mr); + } + + LASSERT(iova == ipb.addr); + + hdev->ibh_mrs[i] = mr; + } + +out: + if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1) + LCONSOLE_INFO("Register global MR array, MR size: %#llx, array size: %d\n", + hdev->ibh_mr_size, hdev->ibh_nmrs); + return 0; +} + +/* DUMMY */ +static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, + struct rdma_cm_event *event) +{ + return 0; +} + +static int kiblnd_dev_need_failover(kib_dev_t *dev) +{ + struct rdma_cm_id *cmid; + struct sockaddr_in srcaddr; + struct sockaddr_in dstaddr; + int rc; + + if (dev->ibd_hdev == NULL || /* initializing */ + dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */ + *kiblnd_tunables.kib_dev_failover > 1) /* debugging */ + return 1; + + /* XXX: it's UGLY, but I don't have better way to find + * ib-bonding HCA failover because: + * + * a. no reliable CM event for HCA failover... + * b. no OFED API to get ib_device for current net_device... + * + * We have only two choices at this point: + * + * a. rdma_bind_addr(), it will conflict with listener cmid + * b. rdma_resolve_addr() to zero addr */ + cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP, + IB_QPT_RC); + if (IS_ERR(cmid)) { + rc = PTR_ERR(cmid); + CERROR("Failed to create cmid for failover: %d\n", rc); + return rc; + } + + memset(&srcaddr, 0, sizeof(srcaddr)); + srcaddr.sin_family = AF_INET; + srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip); + + memset(&dstaddr, 0, sizeof(dstaddr)); + dstaddr.sin_family = AF_INET; + rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, + (struct sockaddr *)&dstaddr, 1); + if (rc != 0 || cmid->device == NULL) { + CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", + dev->ibd_ifname, &dev->ibd_ifip, + cmid->device, rc); + rdma_destroy_id(cmid); + return rc; + } + + if (dev->ibd_hdev->ibh_ibdev == cmid->device) { + /* don't need device failover */ + rdma_destroy_id(cmid); + return 0; + } + + return 1; +} + +int kiblnd_dev_failover(kib_dev_t *dev) +{ + LIST_HEAD(zombie_tpo); + LIST_HEAD(zombie_ppo); + LIST_HEAD(zombie_fpo); + struct rdma_cm_id *cmid = NULL; + kib_hca_dev_t *hdev = NULL; + kib_hca_dev_t *old; + struct ib_pd *pd; + kib_net_t *net; + struct sockaddr_in addr; + unsigned long flags; + int rc = 0; + int i; + + LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || + dev->ibd_can_failover || + dev->ibd_hdev == NULL); + + rc = kiblnd_dev_need_failover(dev); + if (rc <= 0) + goto out; + + if (dev->ibd_hdev != NULL && + dev->ibd_hdev->ibh_cmid != NULL) { + /* XXX it's not good to close old listener at here, + * because we can fail to create new listener. + * But we have to close it now, otherwise rdma_bind_addr + * will return EADDRINUSE... How crap! */ + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + cmid = dev->ibd_hdev->ibh_cmid; + /* make next schedule of kiblnd_dev_need_failover() + * return 1 for me */ + dev->ibd_hdev->ibh_cmid = NULL; + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + rdma_destroy_id(cmid); + } + + cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP, + IB_QPT_RC); + if (IS_ERR(cmid)) { + rc = PTR_ERR(cmid); + CERROR("Failed to create cmid for failover: %d\n", rc); + goto out; + } + + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip); + addr.sin_port = htons(*kiblnd_tunables.kib_service); + + /* Bind to failover device or port */ + rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); + if (rc != 0 || cmid->device == NULL) { + CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", + dev->ibd_ifname, &dev->ibd_ifip, + cmid->device, rc); + rdma_destroy_id(cmid); + goto out; + } + + LIBCFS_ALLOC(hdev, sizeof(*hdev)); + if (hdev == NULL) { + CERROR("Failed to allocate kib_hca_dev\n"); + rdma_destroy_id(cmid); + rc = -ENOMEM; + goto out; + } + + atomic_set(&hdev->ibh_ref, 1); + hdev->ibh_dev = dev; + hdev->ibh_cmid = cmid; + hdev->ibh_ibdev = cmid->device; + + pd = ib_alloc_pd(cmid->device); + if (IS_ERR(pd)) { + rc = PTR_ERR(pd); + CERROR("Can't allocate PD: %d\n", rc); + goto out; + } + + hdev->ibh_pd = pd; + + rc = rdma_listen(cmid, 0); + if (rc != 0) { + CERROR("Can't start new listener: %d\n", rc); + goto out; + } + + rc = kiblnd_hdev_setup_mrs(hdev); + if (rc != 0) { + CERROR("Can't setup device: %d\n", rc); + goto out; + } + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + old = dev->ibd_hdev; + dev->ibd_hdev = hdev; /* take over the refcount */ + hdev = old; + + list_for_each_entry(net, &dev->ibd_nets, ibn_list) { + cfs_cpt_for_each(i, lnet_cpt_table()) { + kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset, + &zombie_tpo); + + if (net->ibn_fmr_ps != NULL) { + kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i], + &zombie_fpo); + + } else if (net->ibn_pmr_ps != NULL) { + kiblnd_fail_poolset(&net->ibn_pmr_ps[i]-> + pps_poolset, &zombie_ppo); + } + } + } + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + out: + if (!list_empty(&zombie_tpo)) + kiblnd_destroy_pool_list(&zombie_tpo); + if (!list_empty(&zombie_ppo)) + kiblnd_destroy_pool_list(&zombie_ppo); + if (!list_empty(&zombie_fpo)) + kiblnd_destroy_fmr_pool_list(&zombie_fpo); + if (hdev != NULL) + kiblnd_hdev_decref(hdev); + + if (rc != 0) + dev->ibd_failed_failover++; + else + dev->ibd_failed_failover = 0; + + return rc; +} + +void kiblnd_destroy_dev(kib_dev_t *dev) +{ + LASSERT(dev->ibd_nnets == 0); + LASSERT(list_empty(&dev->ibd_nets)); + + list_del(&dev->ibd_fail_list); + list_del(&dev->ibd_list); + + if (dev->ibd_hdev != NULL) + kiblnd_hdev_decref(dev->ibd_hdev); + + LIBCFS_FREE(dev, sizeof(*dev)); +} + +static kib_dev_t *kiblnd_create_dev(char *ifname) +{ + struct net_device *netdev; + kib_dev_t *dev; + __u32 netmask; + __u32 ip; + int up; + int rc; + + rc = libcfs_ipif_query(ifname, &up, &ip, &netmask); + if (rc != 0) { + CERROR("Can't query IPoIB interface %s: %d\n", + ifname, rc); + return NULL; + } + + if (!up) { + CERROR("Can't query IPoIB interface %s: it's down\n", ifname); + return NULL; + } + + LIBCFS_ALLOC(dev, sizeof(*dev)); + if (dev == NULL) + return NULL; + + netdev = dev_get_by_name(&init_net, ifname); + if (netdev == NULL) { + dev->ibd_can_failover = 0; + } else { + dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER); + dev_put(netdev); + } + + INIT_LIST_HEAD(&dev->ibd_nets); + INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */ + INIT_LIST_HEAD(&dev->ibd_fail_list); + dev->ibd_ifip = ip; + strcpy(&dev->ibd_ifname[0], ifname); + + /* initialize the device */ + rc = kiblnd_dev_failover(dev); + if (rc != 0) { + CERROR("Can't initialize device: %d\n", rc); + LIBCFS_FREE(dev, sizeof(*dev)); + return NULL; + } + + list_add_tail(&dev->ibd_list, + &kiblnd_data.kib_devs); + return dev; +} + +static void kiblnd_base_shutdown(void) +{ + struct kib_sched_info *sched; + int i; + + LASSERT(list_empty(&kiblnd_data.kib_devs)); + + CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n", + atomic_read(&libcfs_kmemory)); + + switch (kiblnd_data.kib_init) { + default: + LBUG(); + + case IBLND_INIT_ALL: + case IBLND_INIT_DATA: + LASSERT(kiblnd_data.kib_peers != NULL); + for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) + LASSERT(list_empty(&kiblnd_data.kib_peers[i])); + LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); + LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); + + /* flag threads to terminate; wake and wait for them to die */ + kiblnd_data.kib_shutdown = 1; + + /* NB: we really want to stop scheduler threads net by net + * instead of the whole module, this should be improved + * with dynamic configuration LNet */ + cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) + wake_up_all(&sched->ibs_waitq); + + wake_up_all(&kiblnd_data.kib_connd_waitq); + wake_up_all(&kiblnd_data.kib_failover_waitq); + + i = 2; + while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { + i++; + /* power of 2 ? */ + CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, + "Waiting for %d threads to terminate\n", + atomic_read(&kiblnd_data.kib_nthreads)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + } + + /* fall through */ + + case IBLND_INIT_NOTHING: + break; + } + + if (kiblnd_data.kib_peers != NULL) { + LIBCFS_FREE(kiblnd_data.kib_peers, + sizeof(struct list_head) * + kiblnd_data.kib_peer_hash_size); + } + + if (kiblnd_data.kib_scheds != NULL) + cfs_percpt_free(kiblnd_data.kib_scheds); + + CDEBUG(D_MALLOC, "after LND base cleanup: kmem %d\n", + atomic_read(&libcfs_kmemory)); + + kiblnd_data.kib_init = IBLND_INIT_NOTHING; + module_put(THIS_MODULE); +} + +void kiblnd_shutdown(lnet_ni_t *ni) +{ + kib_net_t *net = ni->ni_data; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + int i; + unsigned long flags; + + LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); + + if (net == NULL) + goto out; + + CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n", + atomic_read(&libcfs_kmemory)); + + write_lock_irqsave(g_lock, flags); + net->ibn_shutdown = 1; + write_unlock_irqrestore(g_lock, flags); + + switch (net->ibn_init) { + default: + LBUG(); + + case IBLND_INIT_ALL: + /* nuke all existing peers within this net */ + kiblnd_del_peer(ni, LNET_NID_ANY); + + /* Wait for all peer state to clean up */ + i = 2; + while (atomic_read(&net->ibn_npeers) != 0) { + i++; + CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */ + "%s: waiting for %d peers to disconnect\n", + libcfs_nid2str(ni->ni_nid), + atomic_read(&net->ibn_npeers)); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(cfs_time_seconds(1)); + } + + kiblnd_net_fini_pools(net); + + write_lock_irqsave(g_lock, flags); + LASSERT(net->ibn_dev->ibd_nnets > 0); + net->ibn_dev->ibd_nnets--; + list_del(&net->ibn_list); + write_unlock_irqrestore(g_lock, flags); + + /* fall through */ + + case IBLND_INIT_NOTHING: + LASSERT(atomic_read(&net->ibn_nconns) == 0); + + if (net->ibn_dev != NULL && + net->ibn_dev->ibd_nnets == 0) + kiblnd_destroy_dev(net->ibn_dev); + + break; + } + + CDEBUG(D_MALLOC, "after LND net cleanup: kmem %d\n", + atomic_read(&libcfs_kmemory)); + + net->ibn_init = IBLND_INIT_NOTHING; + ni->ni_data = NULL; + + LIBCFS_FREE(net, sizeof(*net)); + +out: + if (list_empty(&kiblnd_data.kib_devs)) + kiblnd_base_shutdown(); +} + +static int kiblnd_base_startup(void) +{ + struct kib_sched_info *sched; + int rc; + int i; + + LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); + + try_module_get(THIS_MODULE); + /* zero pointers, flags etc */ + memset(&kiblnd_data, 0, sizeof(kiblnd_data)); + + rwlock_init(&kiblnd_data.kib_global_lock); + + INIT_LIST_HEAD(&kiblnd_data.kib_devs); + INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs); + + kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; + LIBCFS_ALLOC(kiblnd_data.kib_peers, + sizeof(struct list_head) * + kiblnd_data.kib_peer_hash_size); + if (kiblnd_data.kib_peers == NULL) + goto failed; + for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) + INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); + + spin_lock_init(&kiblnd_data.kib_connd_lock); + INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns); + INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies); + init_waitqueue_head(&kiblnd_data.kib_connd_waitq); + init_waitqueue_head(&kiblnd_data.kib_failover_waitq); + + kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(), + sizeof(*sched)); + if (kiblnd_data.kib_scheds == NULL) + goto failed; + + cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { + int nthrs; + + spin_lock_init(&sched->ibs_lock); + INIT_LIST_HEAD(&sched->ibs_conns); + init_waitqueue_head(&sched->ibs_waitq); + + nthrs = cfs_cpt_weight(lnet_cpt_table(), i); + if (*kiblnd_tunables.kib_nscheds > 0) { + nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds); + } else { + /* max to half of CPUs, another half is reserved for + * upper layer modules */ + nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); + } + + sched->ibs_nthreads_max = nthrs; + sched->ibs_cpt = i; + } + + kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR; + + /* lists/ptrs/locks initialised */ + kiblnd_data.kib_init = IBLND_INIT_DATA; + /*****************************************************/ + + rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd"); + if (rc != 0) { + CERROR("Can't spawn o2iblnd connd: %d\n", rc); + goto failed; + } + + if (*kiblnd_tunables.kib_dev_failover != 0) + rc = kiblnd_thread_start(kiblnd_failover_thread, NULL, + "kiblnd_failover"); + + if (rc != 0) { + CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); + goto failed; + } + + /* flag everything initialised */ + kiblnd_data.kib_init = IBLND_INIT_ALL; + /*****************************************************/ + + return 0; + + failed: + kiblnd_base_shutdown(); + return -ENETDOWN; +} + +static int kiblnd_start_schedulers(struct kib_sched_info *sched) +{ + int rc = 0; + int nthrs; + int i; + + if (sched->ibs_nthreads == 0) { + if (*kiblnd_tunables.kib_nscheds > 0) { + nthrs = sched->ibs_nthreads_max; + } else { + nthrs = cfs_cpt_weight(lnet_cpt_table(), + sched->ibs_cpt); + nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); + nthrs = min(IBLND_N_SCHED_HIGH, nthrs); + } + } else { + LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max); + /* increase one thread if there is new interface */ + nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max; + } + + for (i = 0; i < nthrs; i++) { + long id; + char name[20]; + + id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); + snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", + KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); + rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name); + if (rc == 0) + continue; + + CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", + sched->ibs_cpt, sched->ibs_nthreads + i, rc); + break; + } + + sched->ibs_nthreads += i; + return rc; +} + +static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, + int ncpts) +{ + int cpt; + int rc; + int i; + + for (i = 0; i < ncpts; i++) { + struct kib_sched_info *sched; + + cpt = (cpts == NULL) ? i : cpts[i]; + sched = kiblnd_data.kib_scheds[cpt]; + + if (!newdev && sched->ibs_nthreads > 0) + continue; + + rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]); + if (rc != 0) { + CERROR("Failed to start scheduler threads for %s\n", + dev->ibd_ifname); + return rc; + } + } + return 0; +} + +static kib_dev_t *kiblnd_dev_search(char *ifname) +{ + kib_dev_t *alias = NULL; + kib_dev_t *dev; + char *colon; + char *colon2; + + colon = strchr(ifname, ':'); + list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { + if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + return dev; + + if (alias != NULL) + continue; + + colon2 = strchr(dev->ibd_ifname, ':'); + if (colon != NULL) + *colon = 0; + if (colon2 != NULL) + *colon2 = 0; + + if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + alias = dev; + + if (colon != NULL) + *colon = ':'; + if (colon2 != NULL) + *colon2 = ':'; + } + return alias; +} + +int kiblnd_startup(lnet_ni_t *ni) +{ + char *ifname; + kib_dev_t *ibdev = NULL; + kib_net_t *net; + struct timeval tv; + unsigned long flags; + int rc; + int newdev; + + LASSERT(ni->ni_lnd == &the_o2iblnd); + + if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { + rc = kiblnd_base_startup(); + if (rc != 0) + return rc; + } + + LIBCFS_ALLOC(net, sizeof(*net)); + ni->ni_data = net; + if (net == NULL) + goto net_failed; + + do_gettimeofday(&tv); + net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec; + + ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout; + ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits; + ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; + ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits; + + if (ni->ni_interfaces[0] != NULL) { + /* Use the IPoIB interface specified in 'networks=' */ + + CLASSERT(LNET_MAX_INTERFACES > 1); + if (ni->ni_interfaces[1] != NULL) { + CERROR("Multiple interfaces not supported\n"); + goto failed; + } + + ifname = ni->ni_interfaces[0]; + } else { + ifname = *kiblnd_tunables.kib_default_ipif; + } + + if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) { + CERROR("IPoIB interface name too long: %s\n", ifname); + goto failed; + } + + ibdev = kiblnd_dev_search(ifname); + + newdev = ibdev == NULL; + /* hmm...create kib_dev even for alias */ + if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) + ibdev = kiblnd_create_dev(ifname); + + if (ibdev == NULL) + goto failed; + + net->ibn_dev = ibdev; + ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip); + + rc = kiblnd_dev_start_threads(ibdev, newdev, + ni->ni_cpts, ni->ni_ncpts); + if (rc != 0) + goto failed; + + rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts); + if (rc != 0) { + CERROR("Failed to initialize NI pools: %d\n", rc); + goto failed; + } + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + ibdev->ibd_nnets++; + list_add_tail(&net->ibn_list, &ibdev->ibd_nets); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + net->ibn_init = IBLND_INIT_ALL; + + return 0; + +failed: + if (net->ibn_dev == NULL && ibdev != NULL) + kiblnd_destroy_dev(ibdev); + +net_failed: + kiblnd_shutdown(ni); + + CDEBUG(D_NET, "kiblnd_startup failed\n"); + return -ENETDOWN; +} + +static void __exit kiblnd_module_fini(void) +{ + lnet_unregister_lnd(&the_o2iblnd); +} + +static int __init kiblnd_module_init(void) +{ + int rc; + + CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); + CLASSERT(offsetof(kib_msg_t, + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); + CLASSERT(offsetof(kib_msg_t, + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); + + rc = kiblnd_tunables_init(); + if (rc != 0) + return rc; + + lnet_register_lnd(&the_o2iblnd); + + return 0; +} + +MODULE_AUTHOR("Sun Microsystems, Inc. "); +MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00"); +MODULE_LICENSE("GPL"); + +module_init(kiblnd_module_init); +module_exit(kiblnd_module_fini); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h new file mode 100644 index 000000000..cd664d025 --- /dev/null +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -0,0 +1,1030 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/o2iblnd/o2iblnd.h + * + * Author: Eric Barton + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#define DEBUG_SUBSYSTEM S_LND + +#include "../../../include/linux/libcfs/libcfs.h" +#include "../../../include/linux/lnet/lnet.h" +#include "../../../include/linux/lnet/lib-lnet.h" +#include "../../../include/linux/lnet/lnet-sysctl.h" + +#include +#include +#include +#include + +#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */ +/* # scheduler loops before reschedule */ +#define IBLND_RESCHED 100 + +#define IBLND_N_SCHED 2 +#define IBLND_N_SCHED_HIGH 4 + +typedef struct { + int *kib_dev_failover; /* HCA failover */ + unsigned int *kib_service; /* IB service number */ + int *kib_min_reconnect_interval; /* first failed connection retry... */ + int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ + int *kib_cksum; /* checksum kib_msg_t? */ + int *kib_timeout; /* comms timeout (seconds) */ + int *kib_keepalive; /* keepalive timeout (seconds) */ + int *kib_ntx; /* # tx descs */ + int *kib_credits; /* # concurrent sends */ + int *kib_peertxcredits; /* # concurrent sends to 1 peer */ + int *kib_peerrtrcredits; /* # per-peer router buffer credits */ + int *kib_peercredits_hiw; /* # when eagerly to return credits */ + int *kib_peertimeout; /* seconds to consider peer dead */ + char **kib_default_ipif; /* default IPoIB interface */ + int *kib_retry_count; + int *kib_rnr_retry_count; + int *kib_concurrent_sends; /* send work queue sizing */ + int *kib_ib_mtu; /* IB MTU */ + int *kib_map_on_demand; /* map-on-demand if RD has more fragments + * than this value, 0 disable map-on-demand */ + int *kib_pmr_pool_size; /* # physical MR in pool */ + int *kib_fmr_pool_size; /* # FMRs in pool */ + int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ + int *kib_fmr_cache; /* enable FMR pool cache? */ + int *kib_require_priv_port;/* accept only privileged ports */ + int *kib_use_priv_port; /* use privileged port for active connect */ + /* # threads on each CPT */ + int *kib_nscheds; +} kib_tunables_t; + +extern kib_tunables_t kiblnd_tunables; + +#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ +#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ + +#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ +#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */ + +#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \ + IBLND_MSG_QUEUE_SIZE_V1 : \ + *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */ +#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \ + IBLND_CREDIT_HIGHWATER_V1 : \ + *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */ + +#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt) + +static inline int +kiblnd_concurrent_sends_v1(void) +{ + if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2) + return IBLND_MSG_QUEUE_SIZE_V1 * 2; + + if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2) + return IBLND_MSG_QUEUE_SIZE_V1 / 2; + + return *kiblnd_tunables.kib_concurrent_sends; +} + +#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \ + kiblnd_concurrent_sends_v1() : \ + *kiblnd_tunables.kib_concurrent_sends) +/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */ +#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1) +#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0) + +#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */ +#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */ +#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \ + *kiblnd_tunables.kib_map_on_demand : \ + IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */ +#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \ + IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS) + +/************************/ +/* derived constants... */ +/* Pools (shared by connections on each CPT) */ +/* These pools can grow at runtime, so don't need give a very large value */ +#define IBLND_TX_POOL 256 +#define IBLND_PMR_POOL 256 +#define IBLND_FMR_POOL 256 +#define IBLND_FMR_POOL_FLUSH 192 + +/* TX messages (shared by all connections) */ +#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx) + +/* RX messages (per connection) */ +#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v)) +#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE) +#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE) + +/* WRs and CQEs (per connection) */ +#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v) +#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v)) +#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v)) + +struct kib_hca_dev; + +/* o2iblnd can run over aliased interface */ +#ifdef IFALIASZ +#define KIB_IFNAME_SIZE IFALIASZ +#else +#define KIB_IFNAME_SIZE 256 +#endif + +typedef struct { + struct list_head ibd_list; /* chain on kib_devs */ + struct list_head ibd_fail_list; /* chain on kib_failed_devs */ + __u32 ibd_ifip; /* IPoIB interface IP */ + /** IPoIB interface name */ + char ibd_ifname[KIB_IFNAME_SIZE]; + int ibd_nnets; /* # nets extant */ + + unsigned long ibd_next_failover; + int ibd_failed_failover; /* # failover failures */ + unsigned int ibd_failover; /* failover in progress */ + unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */ + struct list_head ibd_nets; + struct kib_hca_dev *ibd_hdev; +} kib_dev_t; + +typedef struct kib_hca_dev { + struct rdma_cm_id *ibh_cmid; /* listener cmid */ + struct ib_device *ibh_ibdev; /* IB device */ + int ibh_page_shift; /* page shift of current HCA */ + int ibh_page_size; /* page size of current HCA */ + __u64 ibh_page_mask; /* page mask of current HCA */ + int ibh_mr_shift; /* bits shift of max MR size */ + __u64 ibh_mr_size; /* size of MR */ + int ibh_nmrs; /* # of global MRs */ + struct ib_mr **ibh_mrs; /* global MR */ + struct ib_pd *ibh_pd; /* PD */ + kib_dev_t *ibh_dev; /* owner */ + atomic_t ibh_ref; /* refcount */ +} kib_hca_dev_t; + +/** # of seconds to keep pool alive */ +#define IBLND_POOL_DEADLINE 300 +/** # of seconds to retry if allocation failed */ +#define IBLND_POOL_RETRY 1 + +typedef struct { + int ibp_npages; /* # pages */ + struct page *ibp_pages[0]; /* page array */ +} kib_pages_t; + +struct kib_pmr_pool; + +typedef struct { + struct list_head pmr_list; /* chain node */ + struct ib_phys_buf *pmr_ipb; /* physical buffer */ + struct ib_mr *pmr_mr; /* IB MR */ + struct kib_pmr_pool *pmr_pool; /* owner of this MR */ + __u64 pmr_iova; /* Virtual I/O address */ + int pmr_refcount; /* reference count */ +} kib_phys_mr_t; + +struct kib_pool; +struct kib_poolset; + +typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps, + int inc, struct kib_pool **pp_po); +typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po); +typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node); +typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node); + +struct kib_net; + +#define IBLND_POOL_NAME_LEN 32 + +typedef struct kib_poolset { + spinlock_t ps_lock; /* serialize */ + struct kib_net *ps_net; /* network it belongs to */ + char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ + struct list_head ps_pool_list; /* list of pools */ + struct list_head ps_failed_pool_list; /* failed pool list */ + unsigned long ps_next_retry; /* time stamp for retry if failed to allocate */ + int ps_increasing; /* is allocating new pool */ + int ps_pool_size; /* new pool size */ + int ps_cpt; /* CPT id */ + + kib_ps_pool_create_t ps_pool_create; /* create a new pool */ + kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ + kib_ps_node_init_t ps_node_init; /* initialize new allocated node */ + kib_ps_node_fini_t ps_node_fini; /* finalize node */ +} kib_poolset_t; + +typedef struct kib_pool { + struct list_head po_list; /* chain on pool list */ + struct list_head po_free_list; /* pre-allocated node */ + kib_poolset_t *po_owner; /* pool_set of this pool */ + unsigned long po_deadline; /* deadline of this pool */ + int po_allocated; /* # of elements in use */ + int po_failed; /* pool is created on failed HCA */ + int po_size; /* # of pre-allocated elements */ +} kib_pool_t; + +typedef struct { + kib_poolset_t tps_poolset; /* pool-set */ + __u64 tps_next_tx_cookie; /* cookie of TX */ +} kib_tx_poolset_t; + +typedef struct { + kib_pool_t tpo_pool; /* pool */ + struct kib_hca_dev *tpo_hdev; /* device for this pool */ + struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ + kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ +} kib_tx_pool_t; + +typedef struct { + kib_poolset_t pps_poolset; /* pool-set */ +} kib_pmr_poolset_t; + +typedef struct kib_pmr_pool { + struct kib_hca_dev *ppo_hdev; /* device for this pool */ + kib_pool_t ppo_pool; /* pool */ +} kib_pmr_pool_t; + +typedef struct { + spinlock_t fps_lock; /* serialize */ + struct kib_net *fps_net; /* IB network */ + struct list_head fps_pool_list; /* FMR pool list */ + struct list_head fps_failed_pool_list; /* FMR pool list */ + __u64 fps_version; /* validity stamp */ + int fps_cpt; /* CPT id */ + int fps_pool_size; + int fps_flush_trigger; + /* is allocating new pool */ + int fps_increasing; + /* time stamp for retry if failed to allocate */ + unsigned long fps_next_retry; +} kib_fmr_poolset_t; + +typedef struct { + struct list_head fpo_list; /* chain on pool list */ + struct kib_hca_dev *fpo_hdev; /* device for this pool */ + kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ + struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ + unsigned long fpo_deadline; /* deadline of this pool */ + int fpo_failed; /* fmr pool is failed */ + int fpo_map_count; /* # of mapped FMR */ +} kib_fmr_pool_t; + +typedef struct { + struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ + kib_fmr_pool_t *fmr_pool; /* pool of FMR */ +} kib_fmr_t; + +typedef struct kib_net { + struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */ + __u64 ibn_incarnation; /* my epoch */ + int ibn_init; /* initialisation state */ + int ibn_shutdown; /* shutting down? */ + + atomic_t ibn_npeers; /* # peers extant */ + atomic_t ibn_nconns; /* # connections extant */ + + kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ + kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ + kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */ + + kib_dev_t *ibn_dev; /* underlying IB device */ +} kib_net_t; + +#define KIB_THREAD_SHIFT 16 +#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid)) +#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT) +#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1)) + +struct kib_sched_info { + /* serialise */ + spinlock_t ibs_lock; + /* schedulers sleep here */ + wait_queue_head_t ibs_waitq; + /* conns to check for rx completions */ + struct list_head ibs_conns; + /* number of scheduler threads */ + int ibs_nthreads; + /* max allowed scheduler threads */ + int ibs_nthreads_max; + int ibs_cpt; /* CPT id */ +}; + +typedef struct { + int kib_init; /* initialisation state */ + int kib_shutdown; /* shut down? */ + struct list_head kib_devs; /* IB devices extant */ + /* list head of failed devices */ + struct list_head kib_failed_devs; + /* schedulers sleep here */ + wait_queue_head_t kib_failover_waitq; + atomic_t kib_nthreads; /* # live threads */ + /* stabilize net/dev/peer/conn ops */ + rwlock_t kib_global_lock; + /* hash table of all my known peers */ + struct list_head *kib_peers; + /* size of kib_peers */ + int kib_peer_hash_size; + /* the connd task (serialisation assertions) */ + void *kib_connd; + /* connections to setup/teardown */ + struct list_head kib_connd_conns; + /* connections with zero refcount */ + struct list_head kib_connd_zombies; + /* connection daemon sleeps here */ + wait_queue_head_t kib_connd_waitq; + spinlock_t kib_connd_lock; /* serialise */ + struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ + /* percpt data for schedulers */ + struct kib_sched_info **kib_scheds; +} kib_data_t; + +#define IBLND_INIT_NOTHING 0 +#define IBLND_INIT_DATA 1 +#define IBLND_INIT_ALL 2 + +/************************************************************************ + * IB Wire message format. + * These are sent in sender's byte order (i.e. receiver flips). + */ + +typedef struct kib_connparams { + __u16 ibcp_queue_depth; + __u16 ibcp_max_frags; + __u32 ibcp_max_msg_size; +} WIRE_ATTR kib_connparams_t; + +typedef struct { + lnet_hdr_t ibim_hdr; /* portals header */ + char ibim_payload[0]; /* piggy-backed payload */ +} WIRE_ATTR kib_immediate_msg_t; + +typedef struct { + __u32 rf_nob; /* # bytes this frag */ + __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ +} WIRE_ATTR kib_rdma_frag_t; + +typedef struct { + __u32 rd_key; /* local/remote key */ + __u32 rd_nfrags; /* # fragments */ + kib_rdma_frag_t rd_frags[0]; /* buffer frags */ +} WIRE_ATTR kib_rdma_desc_t; + +typedef struct { + lnet_hdr_t ibprm_hdr; /* portals header */ + __u64 ibprm_cookie; /* opaque completion cookie */ +} WIRE_ATTR kib_putreq_msg_t; + +typedef struct { + __u64 ibpam_src_cookie; /* reflected completion cookie */ + __u64 ibpam_dst_cookie; /* opaque completion cookie */ + kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */ +} WIRE_ATTR kib_putack_msg_t; + +typedef struct { + lnet_hdr_t ibgm_hdr; /* portals header */ + __u64 ibgm_cookie; /* opaque completion cookie */ + kib_rdma_desc_t ibgm_rd; /* rdma descriptor */ +} WIRE_ATTR kib_get_msg_t; + +typedef struct { + __u64 ibcm_cookie; /* opaque completion cookie */ + __s32 ibcm_status; /* < 0 failure: >= 0 length */ +} WIRE_ATTR kib_completion_msg_t; + +typedef struct { + /* First 2 fields fixed FOR ALL TIME */ + __u32 ibm_magic; /* I'm an ibnal message */ + __u16 ibm_version; /* this is my version number */ + + __u8 ibm_type; /* msg type */ + __u8 ibm_credits; /* returned credits */ + __u32 ibm_nob; /* # bytes in whole message */ + __u32 ibm_cksum; /* checksum (0 == no checksum) */ + __u64 ibm_srcnid; /* sender's NID */ + __u64 ibm_srcstamp; /* sender's incarnation */ + __u64 ibm_dstnid; /* destination's NID */ + __u64 ibm_dststamp; /* destination's incarnation */ + + union { + kib_connparams_t connparams; + kib_immediate_msg_t immediate; + kib_putreq_msg_t putreq; + kib_putack_msg_t putack; + kib_get_msg_t get; + kib_completion_msg_t completion; + } WIRE_ATTR ibm_u; +} WIRE_ATTR kib_msg_t; + +#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ + +#define IBLND_MSG_VERSION_1 0x11 +#define IBLND_MSG_VERSION_2 0x12 +#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2 + +#define IBLND_MSG_CONNREQ 0xc0 /* connection request */ +#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */ +#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */ +#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */ +#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */ +#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */ +#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */ +#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */ +#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ +#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ + +typedef struct { + __u32 ibr_magic; /* sender's magic */ + __u16 ibr_version; /* sender's version */ + __u8 ibr_why; /* reject reason */ + __u8 ibr_padding; /* padding */ + __u64 ibr_incarnation; /* incarnation of peer */ + kib_connparams_t ibr_cp; /* connection parameters */ +} WIRE_ATTR kib_rej_t; + +/* connection rejection reasons */ +#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ +#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ +#define IBLND_REJECT_FATAL 3 /* Anything else */ + +#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ +#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ + +#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */ +#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */ + +/***********************************************************************/ + +typedef struct kib_rx /* receive message */ +{ + struct list_head rx_list; /* queue for attention */ + struct kib_conn *rx_conn; /* owning conn */ + int rx_nob; /* # bytes received (-1 while posted) */ + enum ib_wc_status rx_status; /* completion status */ + kib_msg_t *rx_msg; /* message buffer (host vaddr) */ + __u64 rx_msgaddr; /* message buffer (I/O addr) */ + DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */ + struct ib_recv_wr rx_wrq; /* receive work item... */ + struct ib_sge rx_sge; /* ...and its memory */ +} kib_rx_t; + +#define IBLND_POSTRX_DONT_POST 0 /* don't post */ +#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ +#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ +#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */ + +typedef struct kib_tx /* transmit message */ +{ + struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */ + kib_tx_pool_t *tx_pool; /* pool I'm from */ + struct kib_conn *tx_conn; /* owning conn */ + short tx_sending; /* # tx callbacks outstanding */ + short tx_queued; /* queued for sending */ + short tx_waiting; /* waiting for peer */ + int tx_status; /* LNET completion status */ + unsigned long tx_deadline; /* completion deadline */ + __u64 tx_cookie; /* completion cookie */ + lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */ + kib_msg_t *tx_msg; /* message buffer (host vaddr) */ + __u64 tx_msgaddr; /* message buffer (I/O addr) */ + DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */ + int tx_nwrq; /* # send work items */ + struct ib_send_wr *tx_wrq; /* send work items... */ + struct ib_sge *tx_sge; /* ...and their memory */ + kib_rdma_desc_t *tx_rd; /* rdma descriptor */ + int tx_nfrags; /* # entries in... */ + struct scatterlist *tx_frags; /* dma_map_sg descriptor */ + __u64 *tx_pages; /* rdma phys page addrs */ + union { + kib_phys_mr_t *pmr; /* MR for physical buffer */ + kib_fmr_t fmr; /* FMR */ + } tx_u; + int tx_dmadir; /* dma direction */ +} kib_tx_t; + +typedef struct kib_connvars { + /* connection-in-progress variables */ + kib_msg_t cv_msg; +} kib_connvars_t; + +typedef struct kib_conn { + struct kib_sched_info *ibc_sched; /* scheduler information */ + struct kib_peer *ibc_peer; /* owning peer */ + kib_hca_dev_t *ibc_hdev; /* HCA bound on */ + struct list_head ibc_list; /* stash on peer's conn list */ + struct list_head ibc_sched_list; /* schedule for attention */ + __u16 ibc_version; /* version of connection */ + __u64 ibc_incarnation; /* which instance of the peer */ + atomic_t ibc_refcount; /* # users */ + int ibc_state; /* what's happening */ + int ibc_nsends_posted; /* # uncompleted sends */ + int ibc_noops_posted; /* # uncompleted NOOPs */ + int ibc_credits; /* # credits I have */ + int ibc_outstanding_credits; /* # credits to return */ + int ibc_reserved_credits;/* # ACK/DONE msg credits */ + int ibc_comms_error; /* set on comms error */ + unsigned int ibc_nrx:16; /* receive buffers owned */ + unsigned int ibc_scheduled:1; /* scheduled for attention */ + unsigned int ibc_ready:1; /* CQ callback fired */ + /* time of last send */ + unsigned long ibc_last_send; + /** link chain for kiblnd_check_conns only */ + struct list_head ibc_connd_list; + /** rxs completed before ESTABLISHED */ + struct list_head ibc_early_rxs; + /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */ + struct list_head ibc_tx_noops; + struct list_head ibc_tx_queue; /* sends that need a credit */ + struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */ + struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */ + struct list_head ibc_active_txs; /* active tx awaiting completion */ + spinlock_t ibc_lock; /* serialise */ + kib_rx_t *ibc_rxs; /* the rx descs */ + kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ + + struct rdma_cm_id *ibc_cmid; /* CM id */ + struct ib_cq *ibc_cq; /* completion queue */ + + kib_connvars_t *ibc_connvars; /* in-progress connection state */ +} kib_conn_t; + +#define IBLND_CONN_INIT 0 /* being initialised */ +#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ +#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ +#define IBLND_CONN_ESTABLISHED 3 /* connection established */ +#define IBLND_CONN_CLOSING 4 /* being closed */ +#define IBLND_CONN_DISCONNECTED 5 /* disconnected */ + +typedef struct kib_peer { + struct list_head ibp_list; /* stash on global peer list */ + lnet_nid_t ibp_nid; /* who's on the other end(s) */ + lnet_ni_t *ibp_ni; /* LNet interface */ + atomic_t ibp_refcount; /* # users */ + struct list_head ibp_conns; /* all active connections */ + struct list_head ibp_tx_queue; /* msgs waiting for a conn */ + __u16 ibp_version; /* version of peer */ + __u64 ibp_incarnation; /* incarnation of peer */ + int ibp_connecting; /* current active connection attempts */ + int ibp_accepting; /* current passive connection attempts */ + int ibp_error; /* errno on closing this peer */ + unsigned long ibp_last_alive; /* when (in jiffies) I was last alive */ +} kib_peer_t; + +extern kib_data_t kiblnd_data; + +extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev); + +static inline void +kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev) +{ + LASSERT (atomic_read(&hdev->ibh_ref) > 0); + atomic_inc(&hdev->ibh_ref); +} + +static inline void +kiblnd_hdev_decref(kib_hca_dev_t *hdev) +{ + LASSERT (atomic_read(&hdev->ibh_ref) > 0); + if (atomic_dec_and_test(&hdev->ibh_ref)) + kiblnd_hdev_destroy(hdev); +} + +static inline int +kiblnd_dev_can_failover(kib_dev_t *dev) +{ + if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */ + return 0; + + if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */ + return 0; + + if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */ + return 1; + + return dev->ibd_can_failover; +} + +#define kiblnd_conn_addref(conn) \ +do { \ + CDEBUG(D_NET, "conn[%p] (%d)++\n", \ + (conn), atomic_read(&(conn)->ibc_refcount)); \ + atomic_inc(&(conn)->ibc_refcount); \ +} while (0) + +#define kiblnd_conn_decref(conn) \ +do { \ + unsigned long flags; \ + \ + CDEBUG(D_NET, "conn[%p] (%d)--\n", \ + (conn), atomic_read(&(conn)->ibc_refcount)); \ + LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \ + if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \ + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \ + list_add_tail(&(conn)->ibc_list, \ + &kiblnd_data.kib_connd_zombies); \ + wake_up(&kiblnd_data.kib_connd_waitq); \ + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\ + } \ +} while (0) + +#define kiblnd_peer_addref(peer) \ +do { \ + CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \ + (peer), libcfs_nid2str((peer)->ibp_nid), \ + atomic_read (&(peer)->ibp_refcount)); \ + atomic_inc(&(peer)->ibp_refcount); \ +} while (0) + +#define kiblnd_peer_decref(peer) \ +do { \ + CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \ + (peer), libcfs_nid2str((peer)->ibp_nid), \ + atomic_read (&(peer)->ibp_refcount)); \ + LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \ + if (atomic_dec_and_test(&(peer)->ibp_refcount)) \ + kiblnd_destroy_peer(peer); \ +} while (0) + +static inline struct list_head * +kiblnd_nid2peerlist (lnet_nid_t nid) +{ + unsigned int hash = + ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size; + + return (&kiblnd_data.kib_peers [hash]); +} + +static inline int +kiblnd_peer_active (kib_peer_t *peer) +{ + /* Am I in the peer hash table? */ + return (!list_empty(&peer->ibp_list)); +} + +static inline kib_conn_t * +kiblnd_get_conn_locked (kib_peer_t *peer) +{ + LASSERT (!list_empty(&peer->ibp_conns)); + + /* just return the first connection */ + return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list); +} + +static inline int +kiblnd_send_keepalive(kib_conn_t *conn) +{ + return (*kiblnd_tunables.kib_keepalive > 0) && + cfs_time_after(jiffies, conn->ibc_last_send + + *kiblnd_tunables.kib_keepalive*HZ); +} + +static inline int +kiblnd_need_noop(kib_conn_t *conn) +{ + LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + + if (conn->ibc_outstanding_credits < + IBLND_CREDITS_HIGHWATER(conn->ibc_version) && + !kiblnd_send_keepalive(conn)) + return 0; /* No need to send NOOP */ + + if (IBLND_OOB_CAPABLE(conn->ibc_version)) { + if (!list_empty(&conn->ibc_tx_queue_nocred)) + return 0; /* NOOP can be piggybacked */ + + /* No tx to piggyback NOOP onto or no credit to send a tx */ + return (list_empty(&conn->ibc_tx_queue) || + conn->ibc_credits == 0); + } + + if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */ + !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */ + conn->ibc_credits == 0) /* no credit */ + return 0; + + if (conn->ibc_credits == 1 && /* last credit reserved for */ + conn->ibc_outstanding_credits == 0) /* giving back credits */ + return 0; + + /* No tx to piggyback NOOP onto or no credit to send a tx */ + return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1); +} + +static inline void +kiblnd_abort_receives(kib_conn_t *conn) +{ + ib_modify_qp(conn->ibc_cmid->qp, + &kiblnd_data.kib_error_qpa, IB_QP_STATE); +} + +static inline const char * +kiblnd_queue2str (kib_conn_t *conn, struct list_head *q) +{ + if (q == &conn->ibc_tx_queue) + return "tx_queue"; + + if (q == &conn->ibc_tx_queue_rsrvd) + return "tx_queue_rsrvd"; + + if (q == &conn->ibc_tx_queue_nocred) + return "tx_queue_nocred"; + + if (q == &conn->ibc_active_txs) + return "active_txs"; + + LBUG(); + return NULL; +} + +/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the + * lowest bits of the work request id to stash the work item type. */ + +#define IBLND_WID_TX 0 +#define IBLND_WID_RDMA 1 +#define IBLND_WID_RX 2 +#define IBLND_WID_MASK 3UL + +static inline __u64 +kiblnd_ptr2wreqid (void *ptr, int type) +{ + unsigned long lptr = (unsigned long)ptr; + + LASSERT ((lptr & IBLND_WID_MASK) == 0); + LASSERT ((type & ~IBLND_WID_MASK) == 0); + return (__u64)(lptr | type); +} + +static inline void * +kiblnd_wreqid2ptr (__u64 wreqid) +{ + return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK); +} + +static inline int +kiblnd_wreqid2type (__u64 wreqid) +{ + return (wreqid & IBLND_WID_MASK); +} + +static inline void +kiblnd_set_conn_state (kib_conn_t *conn, int state) +{ + conn->ibc_state = state; + mb(); +} + +static inline void +kiblnd_init_msg (kib_msg_t *msg, int type, int body_nob) +{ + msg->ibm_type = type; + msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob; +} + +static inline int +kiblnd_rd_size (kib_rdma_desc_t *rd) +{ + int i; + int size; + + for (i = size = 0; i < rd->rd_nfrags; i++) + size += rd->rd_frags[i].rf_nob; + + return size; +} + +static inline __u64 +kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index) +{ + return rd->rd_frags[index].rf_addr; +} + +static inline __u32 +kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index) +{ + return rd->rd_frags[index].rf_nob; +} + +static inline __u32 +kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index) +{ + return rd->rd_key; +} + +static inline int +kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob) +{ + if (nob < rd->rd_frags[index].rf_nob) { + rd->rd_frags[index].rf_addr += nob; + rd->rd_frags[index].rf_nob -= nob; + } else { + index ++; + } + + return index; +} + +static inline int +kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n) +{ + LASSERT (msgtype == IBLND_MSG_GET_REQ || + msgtype == IBLND_MSG_PUT_ACK); + + return msgtype == IBLND_MSG_GET_REQ ? + offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) : + offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]); +} + + +static inline __u64 +kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr) +{ + return ib_dma_mapping_error(dev, dma_addr); +} + +static inline __u64 kiblnd_dma_map_single(struct ib_device *dev, + void *msg, size_t size, + enum dma_data_direction direction) +{ + return ib_dma_map_single(dev, msg, size, direction); +} + +static inline void kiblnd_dma_unmap_single(struct ib_device *dev, + __u64 addr, size_t size, + enum dma_data_direction direction) +{ + ib_dma_unmap_single(dev, addr, size, direction); +} + +#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0) +#define KIBLND_UNMAP_ADDR(p, m, a) (a) + +static inline int kiblnd_dma_map_sg(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + return ib_dma_map_sg(dev, sg, nents, direction); +} + +static inline void kiblnd_dma_unmap_sg(struct ib_device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + ib_dma_unmap_sg(dev, sg, nents, direction); +} + +static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev, + struct scatterlist *sg) +{ + return ib_sg_dma_address(dev, sg); +} + +static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, + struct scatterlist *sg) +{ + return ib_sg_dma_len(dev, sg); +} + +/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly + * right because OFED1.2 defines it as const, to use it we have to add + * (void *) cast to overcome "const" */ + +#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) +#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) + + +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, + kib_rdma_desc_t *rd); +struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, + __u64 addr, __u64 size); +void kiblnd_map_rx_descs(kib_conn_t *conn); +void kiblnd_unmap_rx_descs(kib_conn_t *conn); +int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, + kib_rdma_desc_t *rd, int nfrags); +void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx); +void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node); +struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps); + +int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, + int npages, __u64 iov, kib_fmr_t *fmr); +void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status); + +int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, + kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr); +void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr); + +int kiblnd_startup (lnet_ni_t *ni); +void kiblnd_shutdown (lnet_ni_t *ni); +int kiblnd_ctl (lnet_ni_t *ni, unsigned int cmd, void *arg); +void kiblnd_query (struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when); + +int kiblnd_tunables_init(void); +void kiblnd_tunables_fini(void); + +int kiblnd_connd (void *arg); +int kiblnd_scheduler(void *arg); +int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name); +int kiblnd_failover_thread (void *arg); + +int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages); +void kiblnd_free_pages (kib_pages_t *p); + +int kiblnd_cm_callback(struct rdma_cm_id *cmid, + struct rdma_cm_event *event); +int kiblnd_translate_mtu(int value); + +int kiblnd_dev_failover(kib_dev_t *dev); +int kiblnd_create_peer (lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid); +void kiblnd_destroy_peer (kib_peer_t *peer); +void kiblnd_destroy_dev (kib_dev_t *dev); +void kiblnd_unlink_peer_locked (kib_peer_t *peer); +void kiblnd_peer_alive (kib_peer_t *peer); +kib_peer_t *kiblnd_find_peer_locked (lnet_nid_t nid); +void kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error); +int kiblnd_close_stale_conns_locked (kib_peer_t *peer, + int version, __u64 incarnation); +int kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why); + +void kiblnd_connreq_done(kib_conn_t *conn, int status); +kib_conn_t *kiblnd_create_conn (kib_peer_t *peer, struct rdma_cm_id *cmid, + int state, int version); +void kiblnd_destroy_conn (kib_conn_t *conn); +void kiblnd_close_conn (kib_conn_t *conn, int error); +void kiblnd_close_conn_locked (kib_conn_t *conn, int error); + +int kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, + int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie); + +void kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid); +void kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn); +void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn); +void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob); +void kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, + int status); +void kiblnd_check_sends (kib_conn_t *conn); + +void kiblnd_qp_event(struct ib_event *event, void *arg); +void kiblnd_cq_event(struct ib_event *event, void *arg); +void kiblnd_cq_completion(struct ib_cq *cq, void *arg); + +void kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version, + int credits, lnet_nid_t dstnid, __u64 dststamp); +int kiblnd_unpack_msg(kib_msg_t *msg, int nob); +int kiblnd_post_rx (kib_rx_t *rx, int credit); + +int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg); +int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen); diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c new file mode 100644 index 000000000..dbf374983 --- /dev/null +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -0,0 +1,3519 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/o2iblnd/o2iblnd_cb.c + * + * Author: Eric Barton + */ + +#include "o2iblnd.h" + +static void +kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) +{ + lnet_msg_t *lntmsg[2]; + kib_net_t *net = ni->ni_data; + int rc; + int i; + + LASSERT(net != NULL); + LASSERT(!in_interrupt()); + LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ + LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */ + LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ + LASSERT(tx->tx_pool != NULL); + + kiblnd_unmap_tx(ni, tx); + + /* tx may have up to 2 lnet msgs to finalise */ + lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; + lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; + rc = tx->tx_status; + + if (tx->tx_conn != NULL) { + LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); + + kiblnd_conn_decref(tx->tx_conn); + tx->tx_conn = NULL; + } + + tx->tx_nwrq = 0; + tx->tx_status = 0; + + kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); + + /* delay finalize until my descs have been freed */ + for (i = 0; i < 2; i++) { + if (lntmsg[i] == NULL) + continue; + + lnet_finalize(ni, lntmsg[i], rc); + } +} + +void +kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) +{ + kib_tx_t *tx; + + while (!list_empty(txlist)) { + tx = list_entry(txlist->next, kib_tx_t, tx_list); + + list_del(&tx->tx_list); + /* complete now */ + tx->tx_waiting = 0; + tx->tx_status = status; + kiblnd_tx_done(ni, tx); + } +} + +static kib_tx_t * +kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) +{ + kib_net_t *net = (kib_net_t *)ni->ni_data; + struct list_head *node; + kib_tx_t *tx; + kib_tx_poolset_t *tps; + + tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; + node = kiblnd_pool_alloc_node(&tps->tps_poolset); + if (node == NULL) + return NULL; + tx = container_of(node, kib_tx_t, tx_list); + + LASSERT(tx->tx_nwrq == 0); + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_sending == 0); + LASSERT(!tx->tx_waiting); + LASSERT(tx->tx_status == 0); + LASSERT(tx->tx_conn == NULL); + LASSERT(tx->tx_lntmsg[0] == NULL); + LASSERT(tx->tx_lntmsg[1] == NULL); + LASSERT(tx->tx_u.pmr == NULL); + LASSERT(tx->tx_nfrags == 0); + + return tx; +} + +static void +kiblnd_drop_rx(kib_rx_t *rx) +{ + kib_conn_t *conn = rx->rx_conn; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; + + spin_lock_irqsave(&sched->ibs_lock, flags); + LASSERT(conn->ibc_nrx > 0); + conn->ibc_nrx--; + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + kiblnd_conn_decref(conn); +} + +int +kiblnd_post_rx(kib_rx_t *rx, int credit) +{ + kib_conn_t *conn = rx->rx_conn; + kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; + struct ib_recv_wr *bad_wrq = NULL; + struct ib_mr *mr; + int rc; + + LASSERT(net != NULL); + LASSERT(!in_interrupt()); + LASSERT(credit == IBLND_POSTRX_NO_CREDIT || + credit == IBLND_POSTRX_PEER_CREDIT || + credit == IBLND_POSTRX_RSRVD_CREDIT); + + mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); + LASSERT(mr != NULL); + + rx->rx_sge.lkey = mr->lkey; + rx->rx_sge.addr = rx->rx_msgaddr; + rx->rx_sge.length = IBLND_MSG_SIZE; + + rx->rx_wrq.next = NULL; + rx->rx_wrq.sg_list = &rx->rx_sge; + rx->rx_wrq.num_sge = 1; + rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); + + LASSERT(conn->ibc_state >= IBLND_CONN_INIT); + LASSERT(rx->rx_nob >= 0); /* not posted */ + + if (conn->ibc_state > IBLND_CONN_ESTABLISHED) { + kiblnd_drop_rx(rx); /* No more posts for this rx */ + return 0; + } + + rx->rx_nob = -1; /* flag posted */ + + rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); + if (rc != 0) { + CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); + rx->rx_nob = 0; + } + + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ + return rc; + + if (rc != 0) { + kiblnd_close_conn(conn, rc); + kiblnd_drop_rx(rx); /* No more posts for this rx */ + return rc; + } + + if (credit == IBLND_POSTRX_NO_CREDIT) + return 0; + + spin_lock(&conn->ibc_lock); + if (credit == IBLND_POSTRX_PEER_CREDIT) + conn->ibc_outstanding_credits++; + else + conn->ibc_reserved_credits++; + spin_unlock(&conn->ibc_lock); + + kiblnd_check_sends(conn); + return 0; +} + +static kib_tx_t * +kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) +{ + struct list_head *tmp; + + list_for_each(tmp, &conn->ibc_active_txs) { + kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); + + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_sending != 0 || tx->tx_waiting); + + if (tx->tx_cookie != cookie) + continue; + + if (tx->tx_waiting && + tx->tx_msg->ibm_type == txtype) + return tx; + + CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", + tx->tx_waiting ? "" : "NOT ", + tx->tx_msg->ibm_type, txtype); + } + return NULL; +} + +static void +kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) +{ + kib_tx_t *tx; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int idle; + + spin_lock(&conn->ibc_lock); + + tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); + if (tx == NULL) { + spin_unlock(&conn->ibc_lock); + + CWARN("Unmatched completion type %x cookie %#llx from %s\n", + txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + kiblnd_close_conn(conn, -EPROTO); + return; + } + + if (tx->tx_status == 0) { /* success so far */ + if (status < 0) { /* failed? */ + tx->tx_status = status; + } else if (txtype == IBLND_MSG_GET_REQ) { + lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status); + } + } + + tx->tx_waiting = 0; + + idle = !tx->tx_queued && (tx->tx_sending == 0); + if (idle) + list_del(&tx->tx_list); + + spin_unlock(&conn->ibc_lock); + + if (idle) + kiblnd_tx_done(ni, tx); +} + +static void +kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) +{ + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + + if (tx == NULL) { + CERROR("Can't get tx for completion %x for %s\n", + type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + return; + } + + tx->tx_msg->ibm_u.completion.ibcm_status = status; + tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie; + kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t)); + + kiblnd_queue_tx(tx, conn); +} + +static void +kiblnd_handle_rx(kib_rx_t *rx) +{ + kib_msg_t *msg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int credits = msg->ibm_credits; + kib_tx_t *tx; + int rc = 0; + int rc2; + int post_credit; + + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); + + CDEBUG(D_NET, "Received %x[%d] from %s\n", + msg->ibm_type, credits, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + + if (credits != 0) { + /* Have I received credits that will let me send? */ + spin_lock(&conn->ibc_lock); + + if (conn->ibc_credits + credits > + IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { + rc2 = conn->ibc_credits; + spin_unlock(&conn->ibc_lock); + + CERROR("Bad credits from %s: %d + %d > %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + rc2, credits, + IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); + + kiblnd_close_conn(conn, -EPROTO); + kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); + return; + } + + conn->ibc_credits += credits; + + /* This ensures the credit taken by NOOP can be returned */ + if (msg->ibm_type == IBLND_MSG_NOOP && + !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */ + conn->ibc_outstanding_credits++; + + spin_unlock(&conn->ibc_lock); + kiblnd_check_sends(conn); + } + + switch (msg->ibm_type) { + default: + CERROR("Bad IBLND message type %x from %s\n", + msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + post_credit = IBLND_POSTRX_NO_CREDIT; + rc = -EPROTO; + break; + + case IBLND_MSG_NOOP: + if (IBLND_OOB_CAPABLE(conn->ibc_version)) { + post_credit = IBLND_POSTRX_NO_CREDIT; + break; + } + + if (credits != 0) /* credit already posted */ + post_credit = IBLND_POSTRX_NO_CREDIT; + else /* a keepalive NOOP */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; + + case IBLND_MSG_IMMEDIATE: + post_credit = IBLND_POSTRX_DONT_POST; + rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr, + msg->ibm_srcnid, rx, 0); + if (rc < 0) /* repost on error */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; + + case IBLND_MSG_PUT_REQ: + post_credit = IBLND_POSTRX_DONT_POST; + rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr, + msg->ibm_srcnid, rx, 1); + if (rc < 0) /* repost on error */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; + + case IBLND_MSG_PUT_NAK: + CWARN("PUT_NACK from %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + post_credit = IBLND_POSTRX_RSRVD_CREDIT; + kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ, + msg->ibm_u.completion.ibcm_status, + msg->ibm_u.completion.ibcm_cookie); + break; + + case IBLND_MSG_PUT_ACK: + post_credit = IBLND_POSTRX_RSRVD_CREDIT; + + spin_lock(&conn->ibc_lock); + tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, + msg->ibm_u.putack.ibpam_src_cookie); + if (tx != NULL) + list_del(&tx->tx_list); + spin_unlock(&conn->ibc_lock); + + if (tx == NULL) { + CERROR("Unmatched PUT_ACK from %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + rc = -EPROTO; + break; + } + + LASSERT(tx->tx_waiting); + /* CAVEAT EMPTOR: I could be racing with tx_complete, but... + * (a) I can overwrite tx_msg since my peer has received it! + * (b) tx_waiting set tells tx_complete() it's not done. */ + + tx->tx_nwrq = 0; /* overwrite PUT_REQ */ + + rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE, + kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd), + &msg->ibm_u.putack.ibpam_rd, + msg->ibm_u.putack.ibpam_dst_cookie); + if (rc2 < 0) + CERROR("Can't setup rdma for PUT to %s: %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2); + + spin_lock(&conn->ibc_lock); + tx->tx_waiting = 0; /* clear waiting and queue atomically */ + kiblnd_queue_tx_locked(tx, conn); + spin_unlock(&conn->ibc_lock); + break; + + case IBLND_MSG_PUT_DONE: + post_credit = IBLND_POSTRX_PEER_CREDIT; + kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK, + msg->ibm_u.completion.ibcm_status, + msg->ibm_u.completion.ibcm_cookie); + break; + + case IBLND_MSG_GET_REQ: + post_credit = IBLND_POSTRX_DONT_POST; + rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr, + msg->ibm_srcnid, rx, 1); + if (rc < 0) /* repost on error */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; + + case IBLND_MSG_GET_DONE: + post_credit = IBLND_POSTRX_RSRVD_CREDIT; + kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ, + msg->ibm_u.completion.ibcm_status, + msg->ibm_u.completion.ibcm_cookie); + break; + } + + if (rc < 0) /* protocol error */ + kiblnd_close_conn(conn, rc); + + if (post_credit != IBLND_POSTRX_DONT_POST) + kiblnd_post_rx(rx, post_credit); +} + +static void +kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) +{ + kib_msg_t *msg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_net_t *net = ni->ni_data; + int rc; + int err = -EIO; + + LASSERT(net != NULL); + LASSERT(rx->rx_nob < 0); /* was posted */ + rx->rx_nob = 0; /* isn't now */ + + if (conn->ibc_state > IBLND_CONN_ESTABLISHED) + goto ignore; + + if (status != IB_WC_SUCCESS) { + CNETERR("Rx from %s failed: %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), status); + goto failed; + } + + LASSERT(nob >= 0); + rx->rx_nob = nob; + + rc = kiblnd_unpack_msg(msg, rx->rx_nob); + if (rc != 0) { + CERROR("Error %d unpacking rx from %s\n", + rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + goto failed; + } + + if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid || + msg->ibm_dstnid != ni->ni_nid || + msg->ibm_srcstamp != conn->ibc_incarnation || + msg->ibm_dststamp != net->ibn_incarnation) { + CERROR("Stale rx from %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + err = -ESTALE; + goto failed; + } + + /* set time last known alive */ + kiblnd_peer_alive(conn->ibc_peer); + + /* racing with connection establishment/teardown! */ + + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; + + write_lock_irqsave(g_lock, flags); + /* must check holding global lock to eliminate race */ + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); + write_unlock_irqrestore(g_lock, flags); + return; + } + write_unlock_irqrestore(g_lock, flags); + } + kiblnd_handle_rx(rx); + return; + + failed: + CDEBUG(D_NET, "rx %p conn %p\n", rx, conn); + kiblnd_close_conn(conn, err); + ignore: + kiblnd_drop_rx(rx); /* Don't re-post rx. */ +} + +static struct page * +kiblnd_kvaddr_to_page(unsigned long vaddr) +{ + struct page *page; + + if (is_vmalloc_addr((void *)vaddr)) { + page = vmalloc_to_page((void *)vaddr); + LASSERT(page != NULL); + return page; + } +#ifdef CONFIG_HIGHMEM + if (vaddr >= PKMAP_BASE && + vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) { + /* No highmem pages only used for bulk (kiov) I/O */ + CERROR("find page for address in highmem\n"); + LBUG(); + } +#endif + page = virt_to_page(vaddr); + LASSERT(page != NULL); + return page; +} + +static int +kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) +{ + kib_hca_dev_t *hdev; + __u64 *pages = tx->tx_pages; + kib_fmr_poolset_t *fps; + int npages; + int size; + int cpt; + int rc; + int i; + + LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); + + hdev = tx->tx_pool->tpo_hdev; + + for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { + for (size = 0; size < rd->rd_frags[i].rf_nob; + size += hdev->ibh_page_size) { + pages[npages++] = (rd->rd_frags[i].rf_addr & + hdev->ibh_page_mask) + size; + } + } + + cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; + + fps = net->ibn_fmr_ps[cpt]; + rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); + if (rc != 0) { + CERROR("Can't map %d pages: %d\n", npages, rc); + return rc; + } + + /* If rd is not tx_rd, it's going to get sent to a peer, who will need + * the rkey */ + rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : + tx->tx_u.fmr.fmr_pfmr->fmr->lkey; + rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; + rd->rd_frags[0].rf_nob = nob; + rd->rd_nfrags = 1; + + return 0; +} + +static int +kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) +{ + kib_hca_dev_t *hdev; + kib_pmr_poolset_t *pps; + __u64 iova; + int cpt; + int rc; + + LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); + + hdev = tx->tx_pool->tpo_hdev; + + iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask; + + cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; + + pps = net->ibn_pmr_ps[cpt]; + rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr); + if (rc != 0) { + CERROR("Failed to create MR by phybuf: %d\n", rc); + return rc; + } + + /* If rd is not tx_rd, it's going to get sent to a peer, who will need + * the rkey */ + rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey : + tx->tx_u.pmr->pmr_mr->lkey; + rd->rd_nfrags = 1; + rd->rd_frags[0].rf_addr = iova; + rd->rd_frags[0].rf_nob = nob; + + return 0; +} + +void +kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) +{ + kib_net_t *net = ni->ni_data; + + LASSERT(net != NULL); + + if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) { + kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); + tx->tx_u.fmr.fmr_pfmr = NULL; + + } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) { + kiblnd_pmr_pool_unmap(tx->tx_u.pmr); + tx->tx_u.pmr = NULL; + } + + if (tx->tx_nfrags != 0) { + kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, + tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + tx->tx_nfrags = 0; + } +} + +int +kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, + kib_rdma_desc_t *rd, int nfrags) +{ + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + kib_net_t *net = ni->ni_data; + struct ib_mr *mr = NULL; + __u32 nob; + int i; + + /* If rd is not tx_rd, it's going to get sent to a peer and I'm the + * RDMA sink */ + tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + tx->tx_nfrags = nfrags; + + rd->rd_nfrags = + kiblnd_dma_map_sg(hdev->ibh_ibdev, + tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + + for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { + rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( + hdev->ibh_ibdev, &tx->tx_frags[i]); + rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address( + hdev->ibh_ibdev, &tx->tx_frags[i]); + nob += rd->rd_frags[i].rf_nob; + } + + /* looking for pre-mapping MR */ + mr = kiblnd_find_rd_dma_mr(hdev, rd); + if (mr != NULL) { + /* found pre-mapping MR */ + rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; + return 0; + } + + if (net->ibn_fmr_ps != NULL) + return kiblnd_fmr_map_tx(net, tx, rd, nob); + else if (net->ibn_pmr_ps != NULL) + return kiblnd_pmr_map_tx(net, tx, rd, nob); + + return -EINVAL; +} + + +static int +kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, + unsigned int niov, struct kvec *iov, int offset, int nob) +{ + kib_net_t *net = ni->ni_data; + struct page *page; + struct scatterlist *sg; + unsigned long vaddr; + int fragnob; + int page_offset; + + LASSERT(nob > 0); + LASSERT(niov > 0); + LASSERT(net != NULL); + + while (offset >= iov->iov_len) { + offset -= iov->iov_len; + niov--; + iov++; + LASSERT(niov > 0); + } + + sg = tx->tx_frags; + do { + LASSERT(niov > 0); + + vaddr = ((unsigned long)iov->iov_base) + offset; + page_offset = vaddr & (PAGE_SIZE - 1); + page = kiblnd_kvaddr_to_page(vaddr); + if (page == NULL) { + CERROR("Can't find page\n"); + return -EFAULT; + } + + fragnob = min((int)(iov->iov_len - offset), nob); + fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); + + sg_set_page(sg, page, fragnob, page_offset); + sg++; + + if (offset + fragnob < iov->iov_len) { + offset += fragnob; + } else { + offset = 0; + iov++; + niov--; + } + nob -= fragnob; + } while (nob > 0); + + return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); +} + +static int +kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, + int nkiov, lnet_kiov_t *kiov, int offset, int nob) +{ + kib_net_t *net = ni->ni_data; + struct scatterlist *sg; + int fragnob; + + CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); + + LASSERT(nob > 0); + LASSERT(nkiov > 0); + LASSERT(net != NULL); + + while (offset >= kiov->kiov_len) { + offset -= kiov->kiov_len; + nkiov--; + kiov++; + LASSERT(nkiov > 0); + } + + sg = tx->tx_frags; + do { + LASSERT(nkiov > 0); + + fragnob = min((int)(kiov->kiov_len - offset), nob); + + sg_set_page(sg, kiov->kiov_page, fragnob, + kiov->kiov_offset + offset); + sg++; + + offset = 0; + kiov++; + nkiov--; + nob -= fragnob; + } while (nob > 0); + + return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); +} + +static int +kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) + __releases(conn->ibc_lock) + __acquires(conn->ibc_lock) +{ + kib_msg_t *msg = tx->tx_msg; + kib_peer_t *peer = conn->ibc_peer; + int ver = conn->ibc_version; + int rc; + int done; + struct ib_send_wr *bad_wrq; + + LASSERT(tx->tx_queued); + /* We rely on this for QP sizing */ + LASSERT(tx->tx_nwrq > 0); + LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); + + LASSERT(credit == 0 || credit == 1); + LASSERT(conn->ibc_outstanding_credits >= 0); + LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(conn->ibc_credits >= 0); + LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + + if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { + /* tx completions outstanding... */ + CDEBUG(D_NET, "%s: posted enough\n", + libcfs_nid2str(peer->ibp_nid)); + return -EAGAIN; + } + + if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ + CDEBUG(D_NET, "%s: no credits\n", + libcfs_nid2str(peer->ibp_nid)); + return -EAGAIN; + } + + if (credit != 0 && !IBLND_OOB_CAPABLE(ver) && + conn->ibc_credits == 1 && /* last credit reserved */ + msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ + CDEBUG(D_NET, "%s: not using last credit\n", + libcfs_nid2str(peer->ibp_nid)); + return -EAGAIN; + } + + /* NB don't drop ibc_lock before bumping tx_sending */ + list_del(&tx->tx_list); + tx->tx_queued = 0; + + if (msg->ibm_type == IBLND_MSG_NOOP && + (!kiblnd_need_noop(conn) || /* redundant NOOP */ + (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ + conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { + /* OK to drop when posted enough NOOPs, since + * kiblnd_check_sends will queue NOOP again when + * posted NOOPs complete */ + spin_unlock(&conn->ibc_lock); + kiblnd_tx_done(peer->ibp_ni, tx); + spin_lock(&conn->ibc_lock); + CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", + libcfs_nid2str(peer->ibp_nid), + conn->ibc_noops_posted); + return 0; + } + + kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits, + peer->ibp_nid, conn->ibc_incarnation); + + conn->ibc_credits -= credit; + conn->ibc_outstanding_credits = 0; + conn->ibc_nsends_posted++; + if (msg->ibm_type == IBLND_MSG_NOOP) + conn->ibc_noops_posted++; + + /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA + * PUT. If so, it was first queued here as a PUT_REQ, sent and + * stashed on ibc_active_txs, matched by an incoming PUT_ACK, + * and then re-queued here. It's (just) possible that + * tx_sending is non-zero if we've not done the tx_complete() + * from the first send; hence the ++ rather than = below. */ + tx->tx_sending++; + list_add(&tx->tx_list, &conn->ibc_active_txs); + + /* I'm still holding ibc_lock! */ + if (conn->ibc_state != IBLND_CONN_ESTABLISHED) { + rc = -ECONNABORTED; + } else if (tx->tx_pool->tpo_pool.po_failed || + conn->ibc_hdev != tx->tx_pool->tpo_hdev) { + /* close_conn will launch failover */ + rc = -ENETDOWN; + } else { + rc = ib_post_send(conn->ibc_cmid->qp, + tx->tx_wrq, &bad_wrq); + } + + conn->ibc_last_send = jiffies; + + if (rc == 0) + return 0; + + /* NB credits are transferred in the actual + * message, which can only be the last work item */ + conn->ibc_credits += credit; + conn->ibc_outstanding_credits += msg->ibm_credits; + conn->ibc_nsends_posted--; + if (msg->ibm_type == IBLND_MSG_NOOP) + conn->ibc_noops_posted--; + + tx->tx_status = rc; + tx->tx_waiting = 0; + tx->tx_sending--; + + done = (tx->tx_sending == 0); + if (done) + list_del(&tx->tx_list); + + spin_unlock(&conn->ibc_lock); + + if (conn->ibc_state == IBLND_CONN_ESTABLISHED) + CERROR("Error %d posting transmit to %s\n", + rc, libcfs_nid2str(peer->ibp_nid)); + else + CDEBUG(D_NET, "Error %d posting transmit to %s\n", + rc, libcfs_nid2str(peer->ibp_nid)); + + kiblnd_close_conn(conn, rc); + + if (done) + kiblnd_tx_done(peer->ibp_ni, tx); + + spin_lock(&conn->ibc_lock); + + return -EIO; +} + +void +kiblnd_check_sends(kib_conn_t *conn) +{ + int ver = conn->ibc_version; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_tx_t *tx; + + /* Don't send anything until after the connection is established */ + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + CDEBUG(D_NET, "%s too soon\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + return; + } + + spin_lock(&conn->ibc_lock); + + LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); + LASSERT(!IBLND_OOB_CAPABLE(ver) || + conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); + LASSERT(conn->ibc_reserved_credits >= 0); + + while (conn->ibc_reserved_credits > 0 && + !list_empty(&conn->ibc_tx_queue_rsrvd)) { + tx = list_entry(conn->ibc_tx_queue_rsrvd.next, + kib_tx_t, tx_list); + list_del(&tx->tx_list); + list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); + conn->ibc_reserved_credits--; + } + + if (kiblnd_need_noop(conn)) { + spin_unlock(&conn->ibc_lock); + + tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + if (tx != NULL) + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); + + spin_lock(&conn->ibc_lock); + if (tx != NULL) + kiblnd_queue_tx_locked(tx, conn); + } + + kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */ + + for (;;) { + int credit; + + if (!list_empty(&conn->ibc_tx_queue_nocred)) { + credit = 0; + tx = list_entry(conn->ibc_tx_queue_nocred.next, + kib_tx_t, tx_list); + } else if (!list_empty(&conn->ibc_tx_noops)) { + LASSERT(!IBLND_OOB_CAPABLE(ver)); + credit = 1; + tx = list_entry(conn->ibc_tx_noops.next, + kib_tx_t, tx_list); + } else if (!list_empty(&conn->ibc_tx_queue)) { + credit = 1; + tx = list_entry(conn->ibc_tx_queue.next, + kib_tx_t, tx_list); + } else + break; + + if (kiblnd_post_tx_locked(conn, tx, credit) != 0) + break; + } + + spin_unlock(&conn->ibc_lock); + + kiblnd_conn_decref(conn); /* ...until here */ +} + +static void +kiblnd_tx_complete(kib_tx_t *tx, int status) +{ + int failed = (status != IB_WC_SUCCESS); + kib_conn_t *conn = tx->tx_conn; + int idle; + + LASSERT(tx->tx_sending > 0); + + if (failed) { + if (conn->ibc_state == IBLND_CONN_ESTABLISHED) + CNETERR("Tx -> %s cookie %#llx sending %d waiting %d: failed %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + tx->tx_cookie, tx->tx_sending, tx->tx_waiting, + status); + + kiblnd_close_conn(conn, -EIO); + } else { + kiblnd_peer_alive(conn->ibc_peer); + } + + spin_lock(&conn->ibc_lock); + + /* I could be racing with rdma completion. Whoever makes 'tx' idle + * gets to free it, which also drops its ref on 'conn'. */ + + tx->tx_sending--; + conn->ibc_nsends_posted--; + if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) + conn->ibc_noops_posted--; + + if (failed) { + tx->tx_waiting = 0; /* don't wait for peer */ + tx->tx_status = -EIO; + } + + idle = (tx->tx_sending == 0) && /* This is the final callback */ + !tx->tx_waiting && /* Not waiting for peer */ + !tx->tx_queued; /* Not re-queued (PUT_DONE) */ + if (idle) + list_del(&tx->tx_list); + + kiblnd_conn_addref(conn); /* 1 ref for me.... */ + + spin_unlock(&conn->ibc_lock); + + if (idle) + kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx); + + kiblnd_check_sends(conn); + + kiblnd_conn_decref(conn); /* ...until here */ +} + +void +kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) +{ + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; + struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; + int nob = offsetof(kib_msg_t, ibm_u) + body_nob; + struct ib_mr *mr; + + LASSERT(tx->tx_nwrq >= 0); + LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); + LASSERT(nob <= IBLND_MSG_SIZE); + + kiblnd_init_msg(tx->tx_msg, type, body_nob); + + mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); + LASSERT(mr != NULL); + + sge->lkey = mr->lkey; + sge->addr = tx->tx_msgaddr; + sge->length = nob; + + memset(wrq, 0, sizeof(*wrq)); + + wrq->next = NULL; + wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); + wrq->sg_list = sge; + wrq->num_sge = 1; + wrq->opcode = IB_WR_SEND; + wrq->send_flags = IB_SEND_SIGNALED; + + tx->tx_nwrq++; +} + +int +kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) +{ + kib_msg_t *ibmsg = tx->tx_msg; + kib_rdma_desc_t *srcrd = tx->tx_rd; + struct ib_sge *sge = &tx->tx_sge[0]; + struct ib_send_wr *wrq = &tx->tx_wrq[0]; + int rc = resid; + int srcidx; + int dstidx; + int wrknob; + + LASSERT(!in_interrupt()); + LASSERT(tx->tx_nwrq == 0); + LASSERT(type == IBLND_MSG_GET_DONE || + type == IBLND_MSG_PUT_DONE); + + srcidx = dstidx = 0; + + while (resid > 0) { + if (srcidx >= srcrd->rd_nfrags) { + CERROR("Src buffer exhausted: %d frags\n", srcidx); + rc = -EPROTO; + break; + } + + if (dstidx == dstrd->rd_nfrags) { + CERROR("Dst buffer exhausted: %d frags\n", dstidx); + rc = -EPROTO; + break; + } + + if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { + CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + IBLND_RDMA_FRAGS(conn->ibc_version), + srcidx, srcrd->rd_nfrags, + dstidx, dstrd->rd_nfrags); + rc = -EMSGSIZE; + break; + } + + wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx), + kiblnd_rd_frag_size(dstrd, dstidx)), + (__u32) resid); + + sge = &tx->tx_sge[tx->tx_nwrq]; + sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx); + sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx); + sge->length = wrknob; + + wrq = &tx->tx_wrq[tx->tx_nwrq]; + + wrq->next = wrq + 1; + wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); + wrq->sg_list = sge; + wrq->num_sge = 1; + wrq->opcode = IB_WR_RDMA_WRITE; + wrq->send_flags = 0; + + wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); + wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); + + srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); + dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); + + resid -= wrknob; + + tx->tx_nwrq++; + wrq++; + sge++; + } + + if (rc < 0) /* no RDMA if completing with failure */ + tx->tx_nwrq = 0; + + ibmsg->ibm_u.completion.ibcm_status = rc; + ibmsg->ibm_u.completion.ibcm_cookie = dstcookie; + kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx, + type, sizeof(kib_completion_msg_t)); + + return rc; +} + +void +kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) +{ + struct list_head *q; + + LASSERT(tx->tx_nwrq > 0); /* work items set up */ + LASSERT(!tx->tx_queued); /* not queued for sending already */ + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); + + tx->tx_queued = 1; + tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); + + if (tx->tx_conn == NULL) { + kiblnd_conn_addref(conn); + tx->tx_conn = conn; + LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); + } else { + /* PUT_DONE first attached to conn as a PUT_REQ */ + LASSERT(tx->tx_conn == conn); + LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); + } + + switch (tx->tx_msg->ibm_type) { + default: + LBUG(); + + case IBLND_MSG_PUT_REQ: + case IBLND_MSG_GET_REQ: + q = &conn->ibc_tx_queue_rsrvd; + break; + + case IBLND_MSG_PUT_NAK: + case IBLND_MSG_PUT_ACK: + case IBLND_MSG_PUT_DONE: + case IBLND_MSG_GET_DONE: + q = &conn->ibc_tx_queue_nocred; + break; + + case IBLND_MSG_NOOP: + if (IBLND_OOB_CAPABLE(conn->ibc_version)) + q = &conn->ibc_tx_queue_nocred; + else + q = &conn->ibc_tx_noops; + break; + + case IBLND_MSG_IMMEDIATE: + q = &conn->ibc_tx_queue; + break; + } + + list_add_tail(&tx->tx_list, q); +} + +void +kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) +{ + spin_lock(&conn->ibc_lock); + kiblnd_queue_tx_locked(tx, conn); + spin_unlock(&conn->ibc_lock); + + kiblnd_check_sends(conn); +} + +static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, + struct sockaddr_in *srcaddr, + struct sockaddr_in *dstaddr, + int timeout_ms) +{ + unsigned short port; + int rc; + + /* allow the port to be reused */ + rc = rdma_set_reuseaddr(cmid, 1); + if (rc != 0) { + CERROR("Unable to set reuse on cmid: %d\n", rc); + return rc; + } + + /* look for a free privileged port */ + for (port = PROT_SOCK-1; port > 0; port--) { + srcaddr->sin_port = htons(port); + rc = rdma_resolve_addr(cmid, + (struct sockaddr *)srcaddr, + (struct sockaddr *)dstaddr, + timeout_ms); + if (rc == 0) { + CDEBUG(D_NET, "bound to port %hu\n", port); + return 0; + } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) { + CDEBUG(D_NET, "bind to port %hu failed: %d\n", + port, rc); + } else { + return rc; + } + } + + CERROR("Failed to bind to a free privileged port\n"); + return rc; +} + +static void +kiblnd_connect_peer(kib_peer_t *peer) +{ + struct rdma_cm_id *cmid; + kib_dev_t *dev; + kib_net_t *net = peer->ibp_ni->ni_data; + struct sockaddr_in srcaddr; + struct sockaddr_in dstaddr; + int rc; + + LASSERT(net != NULL); + LASSERT(peer->ibp_connecting > 0); + + cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, + IB_QPT_RC); + + if (IS_ERR(cmid)) { + CERROR("Can't create CMID for %s: %ld\n", + libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid)); + rc = PTR_ERR(cmid); + goto failed; + } + + dev = net->ibn_dev; + memset(&srcaddr, 0, sizeof(srcaddr)); + srcaddr.sin_family = AF_INET; + srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip); + + memset(&dstaddr, 0, sizeof(dstaddr)); + dstaddr.sin_family = AF_INET; + dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); + dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid)); + + kiblnd_peer_addref(peer); /* cmid's ref */ + + if (*kiblnd_tunables.kib_use_priv_port) { + rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, + *kiblnd_tunables.kib_timeout * 1000); + } else { + rc = rdma_resolve_addr(cmid, + (struct sockaddr *)&srcaddr, + (struct sockaddr *)&dstaddr, + *kiblnd_tunables.kib_timeout * 1000); + } + if (rc != 0) { + /* Can't initiate address resolution: */ + CERROR("Can't resolve addr for %s: %d\n", + libcfs_nid2str(peer->ibp_nid), rc); + goto failed2; + } + + LASSERT(cmid->device != NULL); + CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", + libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, + &dev->ibd_ifip, cmid->device->name); + + return; + + failed2: + kiblnd_peer_decref(peer); /* cmid's ref */ + rdma_destroy_id(cmid); + failed: + kiblnd_peer_connect_failed(peer, 1, rc); +} + +void +kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) +{ + kib_peer_t *peer; + kib_peer_t *peer2; + kib_conn_t *conn; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; + int rc; + + /* If I get here, I've committed to send, so I complete the tx with + * failure on any problems */ + + LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ + LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ + + /* First time, just use a read lock since I expect to find my peer + * connected */ + read_lock_irqsave(g_lock, flags); + + peer = kiblnd_find_peer_locked(nid); + if (peer != NULL && !list_empty(&peer->ibp_conns)) { + /* Found a peer with an established connection */ + conn = kiblnd_get_conn_locked(peer); + kiblnd_conn_addref(conn); /* 1 ref for me... */ + + read_unlock_irqrestore(g_lock, flags); + + if (tx != NULL) + kiblnd_queue_tx(tx, conn); + kiblnd_conn_decref(conn); /* ...to here */ + return; + } + + read_unlock(g_lock); + /* Re-try with a write lock */ + write_lock(g_lock); + + peer = kiblnd_find_peer_locked(nid); + if (peer != NULL) { + if (list_empty(&peer->ibp_conns)) { + /* found a peer, but it's still connecting... */ + LASSERT(peer->ibp_connecting != 0 || + peer->ibp_accepting != 0); + if (tx != NULL) + list_add_tail(&tx->tx_list, + &peer->ibp_tx_queue); + write_unlock_irqrestore(g_lock, flags); + } else { + conn = kiblnd_get_conn_locked(peer); + kiblnd_conn_addref(conn); /* 1 ref for me... */ + + write_unlock_irqrestore(g_lock, flags); + + if (tx != NULL) + kiblnd_queue_tx(tx, conn); + kiblnd_conn_decref(conn); /* ...to here */ + } + return; + } + + write_unlock_irqrestore(g_lock, flags); + + /* Allocate a peer ready to add to the peer table and retry */ + rc = kiblnd_create_peer(ni, &peer, nid); + if (rc != 0) { + CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); + if (tx != NULL) { + tx->tx_status = -EHOSTUNREACH; + tx->tx_waiting = 0; + kiblnd_tx_done(ni, tx); + } + return; + } + + write_lock_irqsave(g_lock, flags); + + peer2 = kiblnd_find_peer_locked(nid); + if (peer2 != NULL) { + if (list_empty(&peer2->ibp_conns)) { + /* found a peer, but it's still connecting... */ + LASSERT(peer2->ibp_connecting != 0 || + peer2->ibp_accepting != 0); + if (tx != NULL) + list_add_tail(&tx->tx_list, + &peer2->ibp_tx_queue); + write_unlock_irqrestore(g_lock, flags); + } else { + conn = kiblnd_get_conn_locked(peer2); + kiblnd_conn_addref(conn); /* 1 ref for me... */ + + write_unlock_irqrestore(g_lock, flags); + + if (tx != NULL) + kiblnd_queue_tx(tx, conn); + kiblnd_conn_decref(conn); /* ...to here */ + } + + kiblnd_peer_decref(peer); + return; + } + + /* Brand new peer */ + LASSERT(peer->ibp_connecting == 0); + peer->ibp_connecting = 1; + + /* always called with a ref on ni, which prevents ni being shutdown */ + LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); + + if (tx != NULL) + list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); + + kiblnd_peer_addref(peer); + list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + + write_unlock_irqrestore(g_lock, flags); + + kiblnd_connect_peer(peer); + kiblnd_peer_decref(peer); +} + +int +kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) +{ + lnet_hdr_t *hdr = &lntmsg->msg_hdr; + int type = lntmsg->msg_type; + lnet_process_id_t target = lntmsg->msg_target; + int target_is_router = lntmsg->msg_target_is_router; + int routing = lntmsg->msg_routing; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + kib_msg_t *ibmsg; + kib_tx_t *tx; + int nob; + int rc; + + /* NB 'private' is different depending on what we're sending.... */ + + CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", + payload_nob, payload_niov, libcfs_id2str(target)); + + LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(payload_niov <= LNET_MAX_IOV); + + /* Thread context */ + LASSERT(!in_interrupt()); + /* payload is either all vaddrs or all pages */ + LASSERT(!(payload_kiov != NULL && payload_iov != NULL)); + + switch (type) { + default: + LBUG(); + return -EIO; + + case LNET_MSG_ACK: + LASSERT(payload_nob == 0); + break; + + case LNET_MSG_GET: + if (routing || target_is_router) + break; /* send IMMEDIATE */ + + /* is the REPLY message too small for RDMA? */ + nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]); + if (nob <= IBLND_MSG_SIZE) + break; /* send IMMEDIATE */ + + tx = kiblnd_get_idle_tx(ni, target.nid); + if (tx == NULL) { + CERROR("Can't allocate txd for GET to %s\n", + libcfs_nid2str(target.nid)); + return -ENOMEM; + } + + ibmsg = tx->tx_msg; + + if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) + rc = kiblnd_setup_rd_iov(ni, tx, + &ibmsg->ibm_u.get.ibgm_rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_iov.iov, + 0, lntmsg->msg_md->md_length); + else + rc = kiblnd_setup_rd_kiov(ni, tx, + &ibmsg->ibm_u.get.ibgm_rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_iov.kiov, + 0, lntmsg->msg_md->md_length); + if (rc != 0) { + CERROR("Can't setup GET sink for %s: %d\n", + libcfs_nid2str(target.nid), rc); + kiblnd_tx_done(ni, tx); + return -EIO; + } + + nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]); + ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; + ibmsg->ibm_u.get.ibgm_hdr = *hdr; + + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); + + tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); + if (tx->tx_lntmsg[1] == NULL) { + CERROR("Can't create reply for GET -> %s\n", + libcfs_nid2str(target.nid)); + kiblnd_tx_done(ni, tx); + return -EIO; + } + + tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */ + tx->tx_waiting = 1; /* waiting for GET_DONE */ + kiblnd_launch_tx(ni, tx, target.nid); + return 0; + + case LNET_MSG_REPLY: + case LNET_MSG_PUT: + /* Is the payload small enough not to need RDMA? */ + nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]); + if (nob <= IBLND_MSG_SIZE) + break; /* send IMMEDIATE */ + + tx = kiblnd_get_idle_tx(ni, target.nid); + if (tx == NULL) { + CERROR("Can't allocate %s txd for %s\n", + type == LNET_MSG_PUT ? "PUT" : "REPLY", + libcfs_nid2str(target.nid)); + return -ENOMEM; + } + + if (payload_kiov == NULL) + rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, + payload_niov, payload_iov, + payload_offset, payload_nob); + else + rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, + payload_niov, payload_kiov, + payload_offset, payload_nob); + if (rc != 0) { + CERROR("Can't setup PUT src for %s: %d\n", + libcfs_nid2str(target.nid), rc); + kiblnd_tx_done(ni, tx); + return -EIO; + } + + ibmsg = tx->tx_msg; + ibmsg->ibm_u.putreq.ibprm_hdr = *hdr; + ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t)); + + tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ + tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ + kiblnd_launch_tx(ni, tx, target.nid); + return 0; + } + + /* send IMMEDIATE */ + + LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) + <= IBLND_MSG_SIZE); + + tx = kiblnd_get_idle_tx(ni, target.nid); + if (tx == NULL) { + CERROR("Can't send %d to %s: tx descs exhausted\n", + type, libcfs_nid2str(target.nid)); + return -ENOMEM; + } + + ibmsg = tx->tx_msg; + ibmsg->ibm_u.immediate.ibim_hdr = *hdr; + + if (payload_kiov != NULL) + lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, + offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + payload_niov, payload_kiov, + payload_offset, payload_nob); + else + lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg, + offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + payload_niov, payload_iov, + payload_offset, payload_nob); + + nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]); + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); + + tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ + kiblnd_launch_tx(ni, tx, target.nid); + return 0; +} + +static void +kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) +{ + lnet_process_id_t target = lntmsg->msg_target; + unsigned int niov = lntmsg->msg_niov; + struct kvec *iov = lntmsg->msg_iov; + lnet_kiov_t *kiov = lntmsg->msg_kiov; + unsigned int offset = lntmsg->msg_offset; + unsigned int nob = lntmsg->msg_len; + kib_tx_t *tx; + int rc; + + tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); + if (tx == NULL) { + CERROR("Can't get tx for REPLY to %s\n", + libcfs_nid2str(target.nid)); + goto failed_0; + } + + if (nob == 0) + rc = 0; + else if (kiov == NULL) + rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, + niov, iov, offset, nob); + else + rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, + niov, kiov, offset, nob); + + if (rc != 0) { + CERROR("Can't setup GET src for %s: %d\n", + libcfs_nid2str(target.nid), rc); + goto failed_1; + } + + rc = kiblnd_init_rdma(rx->rx_conn, tx, + IBLND_MSG_GET_DONE, nob, + &rx->rx_msg->ibm_u.get.ibgm_rd, + rx->rx_msg->ibm_u.get.ibgm_cookie); + if (rc < 0) { + CERROR("Can't setup rdma for GET from %s: %d\n", + libcfs_nid2str(target.nid), rc); + goto failed_1; + } + + if (nob == 0) { + /* No RDMA: local completion may happen now! */ + lnet_finalize(ni, lntmsg, 0); + } else { + /* RDMA: lnet_finalize(lntmsg) when it + * completes */ + tx->tx_lntmsg[0] = lntmsg; + } + + kiblnd_queue_tx(tx, rx->rx_conn); + return; + + failed_1: + kiblnd_tx_done(ni, tx); + failed_0: + lnet_finalize(ni, lntmsg, -EIO); +} + +int +kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) +{ + kib_rx_t *rx = private; + kib_msg_t *rxmsg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + kib_tx_t *tx; + kib_msg_t *txmsg; + int nob; + int post_credit = IBLND_POSTRX_PEER_CREDIT; + int rc = 0; + + LASSERT(mlen <= rlen); + LASSERT(!in_interrupt()); + /* Either all pages or all vaddrs */ + LASSERT(!(kiov != NULL && iov != NULL)); + + switch (rxmsg->ibm_type) { + default: + LBUG(); + + case IBLND_MSG_IMMEDIATE: + nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); + if (nob > rx->rx_nob) { + CERROR("Immediate message from %s too big: %d(%d)\n", + libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), + nob, rx->rx_nob); + rc = -EPROTO; + break; + } + + if (kiov != NULL) + lnet_copy_flat2kiov(niov, kiov, offset, + IBLND_MSG_SIZE, rxmsg, + offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + mlen); + else + lnet_copy_flat2iov(niov, iov, offset, + IBLND_MSG_SIZE, rxmsg, + offsetof(kib_msg_t, ibm_u.immediate.ibim_payload), + mlen); + lnet_finalize(ni, lntmsg, 0); + break; + + case IBLND_MSG_PUT_REQ: + if (mlen == 0) { + lnet_finalize(ni, lntmsg, 0); + kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, + rxmsg->ibm_u.putreq.ibprm_cookie); + break; + } + + tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + if (tx == NULL) { + CERROR("Can't allocate tx for %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + /* Not replying will break the connection */ + rc = -ENOMEM; + break; + } + + txmsg = tx->tx_msg; + if (kiov == NULL) + rc = kiblnd_setup_rd_iov(ni, tx, + &txmsg->ibm_u.putack.ibpam_rd, + niov, iov, offset, mlen); + else + rc = kiblnd_setup_rd_kiov(ni, tx, + &txmsg->ibm_u.putack.ibpam_rd, + niov, kiov, offset, mlen); + if (rc != 0) { + CERROR("Can't setup PUT sink for %s: %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); + kiblnd_tx_done(ni, tx); + /* tell peer it's over */ + kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, + rxmsg->ibm_u.putreq.ibprm_cookie); + break; + } + + nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]); + txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; + txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; + + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob); + + tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ + tx->tx_waiting = 1; /* waiting for PUT_DONE */ + kiblnd_queue_tx(tx, conn); + + /* reposted buffer reserved for PUT_DONE */ + post_credit = IBLND_POSTRX_NO_CREDIT; + break; + + case IBLND_MSG_GET_REQ: + if (lntmsg != NULL) { + /* Optimized GET; RDMA lntmsg's payload */ + kiblnd_reply(ni, rx, lntmsg); + } else { + /* GET didn't match anything */ + kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE, + -ENODATA, + rxmsg->ibm_u.get.ibgm_cookie); + } + break; + } + + kiblnd_post_rx(rx, post_credit); + return rc; +} + +int +kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) +{ + struct task_struct *task = kthread_run(fn, arg, "%s", name); + + if (IS_ERR(task)) + return PTR_ERR(task); + + atomic_inc(&kiblnd_data.kib_nthreads); + return 0; +} + +static void +kiblnd_thread_fini(void) +{ + atomic_dec(&kiblnd_data.kib_nthreads); +} + +void +kiblnd_peer_alive(kib_peer_t *peer) +{ + /* This is racy, but everyone's only writing cfs_time_current() */ + peer->ibp_last_alive = cfs_time_current(); + mb(); +} + +static void +kiblnd_peer_notify(kib_peer_t *peer) +{ + int error = 0; + unsigned long last_alive = 0; + unsigned long flags; + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + if (list_empty(&peer->ibp_conns) && + peer->ibp_accepting == 0 && + peer->ibp_connecting == 0 && + peer->ibp_error != 0) { + error = peer->ibp_error; + peer->ibp_error = 0; + + last_alive = peer->ibp_last_alive; + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + if (error != 0) + lnet_notify(peer->ibp_ni, + peer->ibp_nid, 0, last_alive); +} + +void +kiblnd_close_conn_locked(kib_conn_t *conn, int error) +{ + /* This just does the immediate housekeeping. 'error' is zero for a + * normal shutdown which can happen only after the connection has been + * established. If the connection is established, schedule the + * connection to be finished off by the connd. Otherwise the connd is + * already dealing with it (either to set it up or tear it down). + * Caller holds kib_global_lock exclusively in irq context */ + kib_peer_t *peer = conn->ibc_peer; + kib_dev_t *dev; + unsigned long flags; + + LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); + + if (error != 0 && conn->ibc_comms_error == 0) + conn->ibc_comms_error = error; + + if (conn->ibc_state != IBLND_CONN_ESTABLISHED) + return; /* already being handled */ + + if (error == 0 && + list_empty(&conn->ibc_tx_noops) && + list_empty(&conn->ibc_tx_queue) && + list_empty(&conn->ibc_tx_queue_rsrvd) && + list_empty(&conn->ibc_tx_queue_nocred) && + list_empty(&conn->ibc_active_txs)) { + CDEBUG(D_NET, "closing conn to %s\n", + libcfs_nid2str(peer->ibp_nid)); + } else { + CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", + libcfs_nid2str(peer->ibp_nid), error, + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", + list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); + } + + dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; + list_del(&conn->ibc_list); + /* connd (see below) takes over ibc_list's ref */ + + if (list_empty(&peer->ibp_conns) && /* no more conns */ + kiblnd_peer_active(peer)) { /* still in peer table */ + kiblnd_unlink_peer_locked(peer); + + /* set/clear error on last conn */ + peer->ibp_error = conn->ibc_comms_error; + } + + kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); + + if (error != 0 && + kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + wake_up(&kiblnd_data.kib_failover_waitq); + } + + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + + list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); + wake_up(&kiblnd_data.kib_connd_waitq); + + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); +} + +void +kiblnd_close_conn(kib_conn_t *conn, int error) +{ + unsigned long flags; + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + kiblnd_close_conn_locked(conn, error); + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); +} + +static void +kiblnd_handle_early_rxs(kib_conn_t *conn) +{ + unsigned long flags; + kib_rx_t *rx; + kib_rx_t *tmp; + + LASSERT(!in_interrupt()); + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + list_for_each_entry_safe(rx, tmp, &conn->ibc_early_rxs, rx_list) { + list_del(&rx->rx_list); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + kiblnd_handle_rx(rx); + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + } + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); +} + +static void +kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) +{ + LIST_HEAD(zombies); + struct list_head *tmp; + struct list_head *nxt; + kib_tx_t *tx; + + spin_lock(&conn->ibc_lock); + + list_for_each_safe(tmp, nxt, txs) { + tx = list_entry(tmp, kib_tx_t, tx_list); + + if (txs == &conn->ibc_active_txs) { + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || + tx->tx_sending != 0); + } else { + LASSERT(tx->tx_queued); + } + + tx->tx_status = -ECONNABORTED; + tx->tx_waiting = 0; + + if (tx->tx_sending == 0) { + tx->tx_queued = 0; + list_del(&tx->tx_list); + list_add(&tx->tx_list, &zombies); + } + } + + spin_unlock(&conn->ibc_lock); + + kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED); +} + +static void +kiblnd_finalise_conn(kib_conn_t *conn) +{ + LASSERT(!in_interrupt()); + LASSERT(conn->ibc_state > IBLND_CONN_INIT); + + kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); + + /* abort_receives moves QP state to IB_QPS_ERR. This is only required + * for connections that didn't get as far as being connected, because + * rdma_disconnect() does this for free. */ + kiblnd_abort_receives(conn); + + /* Complete all tx descs not waiting for sends to complete. + * NB we should be safe from RDMA now that the QP has changed state */ + + kiblnd_abort_txs(conn, &conn->ibc_tx_noops); + kiblnd_abort_txs(conn, &conn->ibc_tx_queue); + kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); + kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred); + kiblnd_abort_txs(conn, &conn->ibc_active_txs); + + kiblnd_handle_early_rxs(conn); +} + +void +kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) +{ + LIST_HEAD(zombies); + unsigned long flags; + + LASSERT(error != 0); + LASSERT(!in_interrupt()); + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + if (active) { + LASSERT(peer->ibp_connecting > 0); + peer->ibp_connecting--; + } else { + LASSERT(peer->ibp_accepting > 0); + peer->ibp_accepting--; + } + + if (peer->ibp_connecting != 0 || + peer->ibp_accepting != 0) { + /* another connection attempt under way... */ + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, + flags); + return; + } + + if (list_empty(&peer->ibp_conns)) { + /* Take peer's blocked transmits to complete with error */ + list_add(&zombies, &peer->ibp_tx_queue); + list_del_init(&peer->ibp_tx_queue); + + if (kiblnd_peer_active(peer)) + kiblnd_unlink_peer_locked(peer); + + peer->ibp_error = error; + } else { + /* Can't have blocked transmits if there are connections */ + LASSERT(list_empty(&peer->ibp_tx_queue)); + } + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + kiblnd_peer_notify(peer); + + if (list_empty(&zombies)) + return; + + CNETERR("Deleting messages for %s: connection failed\n", + libcfs_nid2str(peer->ibp_nid)); + + kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); +} + +void +kiblnd_connreq_done(kib_conn_t *conn, int status) +{ + kib_peer_t *peer = conn->ibc_peer; + kib_tx_t *tx; + kib_tx_t *tmp; + struct list_head txs; + unsigned long flags; + int active; + + active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + + CDEBUG(D_NET, "%s: active(%d), version(%x), status(%d)\n", + libcfs_nid2str(peer->ibp_nid), active, + conn->ibc_version, status); + + LASSERT(!in_interrupt()); + LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && + peer->ibp_connecting > 0) || + (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && + peer->ibp_accepting > 0)); + + LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); + conn->ibc_connvars = NULL; + + if (status != 0) { + /* failed to establish connection */ + kiblnd_peer_connect_failed(peer, active, status); + kiblnd_finalise_conn(conn); + return; + } + + /* connection established */ + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + conn->ibc_last_send = jiffies; + kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); + kiblnd_peer_alive(peer); + + /* Add conn to peer's list and nuke any dangling conns from a different + * peer instance... */ + kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ + list_add(&conn->ibc_list, &peer->ibp_conns); + if (active) + peer->ibp_connecting--; + else + peer->ibp_accepting--; + + if (peer->ibp_version == 0) { + peer->ibp_version = conn->ibc_version; + peer->ibp_incarnation = conn->ibc_incarnation; + } + + if (peer->ibp_version != conn->ibc_version || + peer->ibp_incarnation != conn->ibc_incarnation) { + kiblnd_close_stale_conns_locked(peer, conn->ibc_version, + conn->ibc_incarnation); + peer->ibp_version = conn->ibc_version; + peer->ibp_incarnation = conn->ibc_incarnation; + } + + /* grab pending txs while I have the lock */ + list_add(&txs, &peer->ibp_tx_queue); + list_del_init(&peer->ibp_tx_queue); + + if (!kiblnd_peer_active(peer) || /* peer has been deleted */ + conn->ibc_comms_error != 0) { /* error has happened already */ + lnet_ni_t *ni = peer->ibp_ni; + + /* start to shut down connection */ + kiblnd_close_conn_locked(conn, -ECONNABORTED); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + kiblnd_txlist_done(ni, &txs, -ECONNABORTED); + + return; + } + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + /* Schedule blocked txs */ + spin_lock(&conn->ibc_lock); + list_for_each_entry_safe(tx, tmp, &txs, tx_list) { + list_del(&tx->tx_list); + + kiblnd_queue_tx_locked(tx, conn); + } + spin_unlock(&conn->ibc_lock); + + kiblnd_check_sends(conn); + + /* schedule blocked rxs */ + kiblnd_handle_early_rxs(conn); +} + +static void +kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) +{ + int rc; + + rc = rdma_reject(cmid, rej, sizeof(*rej)); + + if (rc != 0) + CWARN("Error %d sending reject\n", rc); +} + +static int +kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) +{ + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + kib_msg_t *reqmsg = priv; + kib_msg_t *ackmsg; + kib_dev_t *ibdev; + kib_peer_t *peer; + kib_peer_t *peer2; + kib_conn_t *conn; + lnet_ni_t *ni = NULL; + kib_net_t *net = NULL; + lnet_nid_t nid; + struct rdma_conn_param cp; + kib_rej_t rej; + int version = IBLND_MSG_VERSION; + unsigned long flags; + int rc; + struct sockaddr_in *peer_addr; + LASSERT(!in_interrupt()); + + /* cmid inherits 'context' from the corresponding listener id */ + ibdev = (kib_dev_t *)cmid->context; + LASSERT(ibdev != NULL); + + memset(&rej, 0, sizeof(rej)); + rej.ibr_magic = IBLND_MSG_MAGIC; + rej.ibr_why = IBLND_REJECT_FATAL; + rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; + + peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); + if (*kiblnd_tunables.kib_require_priv_port && + ntohs(peer_addr->sin_port) >= PROT_SOCK) { + __u32 ip = ntohl(peer_addr->sin_addr.s_addr); + CERROR("Peer's port (%pI4h:%hu) is not privileged\n", + &ip, ntohs(peer_addr->sin_port)); + goto failed; + } + + if (priv_nob < offsetof(kib_msg_t, ibm_type)) { + CERROR("Short connection request\n"); + goto failed; + } + + /* Future protocol version compatibility support! If the + * o2iblnd-specific protocol changes, or when LNET unifies + * protocols over all LNDs, the initial connection will + * negotiate a protocol version. I trap this here to avoid + * console errors; the reject tells the peer which protocol I + * speak. */ + if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || + reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) + goto failed; + if (reqmsg->ibm_magic == IBLND_MSG_MAGIC && + reqmsg->ibm_version != IBLND_MSG_VERSION && + reqmsg->ibm_version != IBLND_MSG_VERSION_1) + goto failed; + if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) && + reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) && + reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1)) + goto failed; + + rc = kiblnd_unpack_msg(reqmsg, priv_nob); + if (rc != 0) { + CERROR("Can't parse connection request: %d\n", rc); + goto failed; + } + + nid = reqmsg->ibm_srcnid; + ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); + + if (ni != NULL) { + net = (kib_net_t *)ni->ni_data; + rej.ibr_incarnation = net->ibn_incarnation; + } + + if (ni == NULL || /* no matching net */ + ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ + net->ibn_dev != ibdev) { /* wrong device */ + CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n", + libcfs_nid2str(nid), + ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), + ibdev->ibd_ifname, ibdev->ibd_nnets, + &ibdev->ibd_ifip, + libcfs_nid2str(reqmsg->ibm_dstnid)); + + goto failed; + } + + /* check time stamp as soon as possible */ + if (reqmsg->ibm_dststamp != 0 && + reqmsg->ibm_dststamp != net->ibn_incarnation) { + CWARN("Stale connection request\n"); + rej.ibr_why = IBLND_REJECT_CONN_STALE; + goto failed; + } + + /* I can accept peer's version */ + version = reqmsg->ibm_version; + + if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { + CERROR("Unexpected connreq msg type: %x from %s\n", + reqmsg->ibm_type, libcfs_nid2str(nid)); + goto failed; + } + + if (reqmsg->ibm_u.connparams.ibcp_queue_depth != + IBLND_MSG_QUEUE_SIZE(version)) { + CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n", + libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, + IBLND_MSG_QUEUE_SIZE(version)); + + if (version == IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; + + goto failed; + } + + if (reqmsg->ibm_u.connparams.ibcp_max_frags != + IBLND_RDMA_FRAGS(version)) { + CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); + + if (version == IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + + goto failed; + + } + + if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { + CERROR("Can't accept %s: message size %d too big (%d max)\n", + libcfs_nid2str(nid), + reqmsg->ibm_u.connparams.ibcp_max_msg_size, + IBLND_MSG_SIZE); + goto failed; + } + + /* assume 'nid' is a new peer; create */ + rc = kiblnd_create_peer(ni, &peer, nid); + if (rc != 0) { + CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); + rej.ibr_why = IBLND_REJECT_NO_RESOURCES; + goto failed; + } + + write_lock_irqsave(g_lock, flags); + + peer2 = kiblnd_find_peer_locked(nid); + if (peer2 != NULL) { + if (peer2->ibp_version == 0) { + peer2->ibp_version = version; + peer2->ibp_incarnation = reqmsg->ibm_srcstamp; + } + + /* not the guy I've talked with */ + if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || + peer2->ibp_version != version) { + kiblnd_close_peer_conns_locked(peer2, -ESTALE); + write_unlock_irqrestore(g_lock, flags); + + CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", + libcfs_nid2str(nid), peer2->ibp_version, version); + + kiblnd_peer_decref(peer); + rej.ibr_why = IBLND_REJECT_CONN_STALE; + goto failed; + } + + /* tie-break connection race in favour of the higher NID */ + if (peer2->ibp_connecting != 0 && + nid < ni->ni_nid) { + write_unlock_irqrestore(g_lock, flags); + + CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid)); + + kiblnd_peer_decref(peer); + rej.ibr_why = IBLND_REJECT_CONN_RACE; + goto failed; + } + + peer2->ibp_accepting++; + kiblnd_peer_addref(peer2); + + write_unlock_irqrestore(g_lock, flags); + kiblnd_peer_decref(peer); + peer = peer2; + } else { + /* Brand new peer */ + LASSERT(peer->ibp_accepting == 0); + LASSERT(peer->ibp_version == 0 && + peer->ibp_incarnation == 0); + + peer->ibp_accepting = 1; + peer->ibp_version = version; + peer->ibp_incarnation = reqmsg->ibm_srcstamp; + + /* I have a ref on ni that prevents it being shutdown */ + LASSERT(net->ibn_shutdown == 0); + + kiblnd_peer_addref(peer); + list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + + write_unlock_irqrestore(g_lock, flags); + } + + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); + if (conn == NULL) { + kiblnd_peer_connect_failed(peer, 0, -ENOMEM); + kiblnd_peer_decref(peer); + rej.ibr_why = IBLND_REJECT_NO_RESOURCES; + goto failed; + } + + /* conn now "owns" cmid, so I return success from here on to ensure the + * CM callback doesn't destroy cmid. */ + + conn->ibc_incarnation = reqmsg->ibm_srcstamp; + conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); + conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) + <= IBLND_RX_MSGS(version)); + + ackmsg = &conn->ibc_connvars->cv_msg; + memset(ackmsg, 0, sizeof(*ackmsg)); + + kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, + sizeof(ackmsg->ibm_u.connparams)); + ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); + ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; + ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + + kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); + + memset(&cp, 0, sizeof(cp)); + cp.private_data = ackmsg; + cp.private_data_len = ackmsg->ibm_nob; + cp.responder_resources = 0; /* No atomic ops or RDMA reads */ + cp.initiator_depth = 0; + cp.flow_control = 1; + cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; + + CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); + + rc = rdma_accept(cmid, &cp); + if (rc != 0) { + CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc); + rej.ibr_version = version; + rej.ibr_why = IBLND_REJECT_FATAL; + + kiblnd_reject(cmid, &rej); + kiblnd_connreq_done(conn, rc); + kiblnd_conn_decref(conn); + } + + lnet_ni_decref(ni); + return 0; + + failed: + if (ni != NULL) + lnet_ni_decref(ni); + + rej.ibr_version = version; + rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); + rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + kiblnd_reject(cmid, &rej); + + return -ECONNREFUSED; +} + +static void +kiblnd_reconnect(kib_conn_t *conn, int version, + __u64 incarnation, int why, kib_connparams_t *cp) +{ + kib_peer_t *peer = conn->ibc_peer; + char *reason; + int retry = 0; + unsigned long flags; + + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ + + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + /* retry connection if it's still needed and no other connection + * attempts (active or passive) are in progress + * NB: reconnect is still needed even when ibp_tx_queue is + * empty if ibp_version != version because reconnect may be + * initiated by kiblnd_query() */ + if ((!list_empty(&peer->ibp_tx_queue) || + peer->ibp_version != version) && + peer->ibp_connecting == 1 && + peer->ibp_accepting == 0) { + retry = 1; + peer->ibp_connecting++; + + peer->ibp_version = version; + peer->ibp_incarnation = incarnation; + } + + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + if (!retry) + return; + + switch (why) { + default: + reason = "Unknown"; + break; + + case IBLND_REJECT_CONN_STALE: + reason = "stale"; + break; + + case IBLND_REJECT_CONN_RACE: + reason = "conn race"; + break; + + case IBLND_REJECT_CONN_UNCOMPAT: + reason = "version negotiation"; + break; + } + + CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n", + libcfs_nid2str(peer->ibp_nid), + reason, IBLND_MSG_VERSION, version, + cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), + cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), + cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); + + kiblnd_connect_peer(peer); +} + +static void +kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) +{ + kib_peer_t *peer = conn->ibc_peer; + + LASSERT(!in_interrupt()); + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + + switch (reason) { + case IB_CM_REJ_STALE_CONN: + kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0, + IBLND_REJECT_CONN_STALE, NULL); + break; + + case IB_CM_REJ_INVALID_SERVICE_ID: + CNETERR("%s rejected: no listener at %d\n", + libcfs_nid2str(peer->ibp_nid), + *kiblnd_tunables.kib_service); + break; + + case IB_CM_REJ_CONSUMER_DEFINED: + if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) { + kib_rej_t *rej = priv; + kib_connparams_t *cp = NULL; + int flip = 0; + __u64 incarnation = -1; + + /* NB. default incarnation is -1 because: + * a) V1 will ignore dst incarnation in connreq. + * b) V2 will provide incarnation while rejecting me, + * -1 will be overwrote. + * + * if I try to connect to a V1 peer with V2 protocol, + * it rejected me then upgrade to V2, I have no idea + * about the upgrading and try to reconnect with V1, + * in this case upgraded V2 can find out I'm trying to + * talk to the old guy and reject me(incarnation is -1). + */ + + if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) || + rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) { + __swab32s(&rej->ibr_magic); + __swab16s(&rej->ibr_version); + flip = 1; + } + + if (priv_nob >= sizeof(kib_rej_t) && + rej->ibr_version > IBLND_MSG_VERSION_1) { + /* priv_nob is always 148 in current version + * of OFED, so we still need to check version. + * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */ + cp = &rej->ibr_cp; + + if (flip) { + __swab64s(&rej->ibr_incarnation); + __swab16s(&cp->ibcp_queue_depth); + __swab16s(&cp->ibcp_max_frags); + __swab32s(&cp->ibcp_max_msg_size); + } + + incarnation = rej->ibr_incarnation; + } + + if (rej->ibr_magic != IBLND_MSG_MAGIC && + rej->ibr_magic != LNET_PROTO_MAGIC) { + CERROR("%s rejected: consumer defined fatal error\n", + libcfs_nid2str(peer->ibp_nid)); + break; + } + + if (rej->ibr_version != IBLND_MSG_VERSION && + rej->ibr_version != IBLND_MSG_VERSION_1) { + CERROR("%s rejected: o2iblnd version %x error\n", + libcfs_nid2str(peer->ibp_nid), + rej->ibr_version); + break; + } + + if (rej->ibr_why == IBLND_REJECT_FATAL && + rej->ibr_version == IBLND_MSG_VERSION_1) { + CDEBUG(D_NET, "rejected by old version peer %s: %x\n", + libcfs_nid2str(peer->ibp_nid), rej->ibr_version); + + if (conn->ibc_version != IBLND_MSG_VERSION_1) + rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; + } + + switch (rej->ibr_why) { + case IBLND_REJECT_CONN_RACE: + case IBLND_REJECT_CONN_STALE: + case IBLND_REJECT_CONN_UNCOMPAT: + kiblnd_reconnect(conn, rej->ibr_version, + incarnation, rej->ibr_why, cp); + break; + + case IBLND_REJECT_MSG_QUEUE_SIZE: + CERROR("%s rejected: incompatible message queue depth %d, %d\n", + libcfs_nid2str(peer->ibp_nid), + cp != NULL ? cp->ibcp_queue_depth : + IBLND_MSG_QUEUE_SIZE(rej->ibr_version), + IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); + break; + + case IBLND_REJECT_RDMA_FRAGS: + CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n", + libcfs_nid2str(peer->ibp_nid), + cp != NULL ? cp->ibcp_max_frags : + IBLND_RDMA_FRAGS(rej->ibr_version), + IBLND_RDMA_FRAGS(conn->ibc_version)); + break; + + case IBLND_REJECT_NO_RESOURCES: + CERROR("%s rejected: o2iblnd no resources\n", + libcfs_nid2str(peer->ibp_nid)); + break; + + case IBLND_REJECT_FATAL: + CERROR("%s rejected: o2iblnd fatal error\n", + libcfs_nid2str(peer->ibp_nid)); + break; + + default: + CERROR("%s rejected: o2iblnd reason %d\n", + libcfs_nid2str(peer->ibp_nid), + rej->ibr_why); + break; + } + break; + } + /* fall through */ + default: + CNETERR("%s rejected: reason %d, size %d\n", + libcfs_nid2str(peer->ibp_nid), reason, priv_nob); + break; + } + + kiblnd_connreq_done(conn, -ECONNREFUSED); +} + +static void +kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) +{ + kib_peer_t *peer = conn->ibc_peer; + lnet_ni_t *ni = peer->ibp_ni; + kib_net_t *net = ni->ni_data; + kib_msg_t *msg = priv; + int ver = conn->ibc_version; + int rc = kiblnd_unpack_msg(msg, priv_nob); + unsigned long flags; + + LASSERT(net != NULL); + + if (rc != 0) { + CERROR("Can't unpack connack from %s: %d\n", + libcfs_nid2str(peer->ibp_nid), rc); + goto failed; + } + + if (msg->ibm_type != IBLND_MSG_CONNACK) { + CERROR("Unexpected message %d from %s\n", + msg->ibm_type, libcfs_nid2str(peer->ibp_nid)); + rc = -EPROTO; + goto failed; + } + + if (ver != msg->ibm_version) { + CERROR("%s replied version %x is different with requested version %x\n", + libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver); + rc = -EPROTO; + goto failed; + } + + if (msg->ibm_u.connparams.ibcp_queue_depth != + IBLND_MSG_QUEUE_SIZE(ver)) { + CERROR("%s has incompatible queue depth %d(%d wanted)\n", + libcfs_nid2str(peer->ibp_nid), + msg->ibm_u.connparams.ibcp_queue_depth, + IBLND_MSG_QUEUE_SIZE(ver)); + rc = -EPROTO; + goto failed; + } + + if (msg->ibm_u.connparams.ibcp_max_frags != + IBLND_RDMA_FRAGS(ver)) { + CERROR("%s has incompatible max_frags %d (%d wanted)\n", + libcfs_nid2str(peer->ibp_nid), + msg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(ver)); + rc = -EPROTO; + goto failed; + } + + if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { + CERROR("%s max message size %d too big (%d max)\n", + libcfs_nid2str(peer->ibp_nid), + msg->ibm_u.connparams.ibcp_max_msg_size, + IBLND_MSG_SIZE); + rc = -EPROTO; + goto failed; + } + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (msg->ibm_dstnid == ni->ni_nid && + msg->ibm_dststamp == net->ibn_incarnation) + rc = 0; + else + rc = -ESTALE; + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + if (rc != 0) { + CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n", + libcfs_nid2str(peer->ibp_nid), rc, + msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); + goto failed; + } + + conn->ibc_incarnation = msg->ibm_srcstamp; + conn->ibc_credits = + conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) + <= IBLND_RX_MSGS(ver)); + + kiblnd_connreq_done(conn, 0); + return; + + failed: + /* NB My QP has already established itself, so I handle anything going + * wrong here by setting ibc_comms_error. + * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then + * immediately tears it down. */ + + LASSERT(rc != 0); + conn->ibc_comms_error = rc; + kiblnd_connreq_done(conn, 0); +} + +static int +kiblnd_active_connect(struct rdma_cm_id *cmid) +{ + kib_peer_t *peer = (kib_peer_t *)cmid->context; + kib_conn_t *conn; + kib_msg_t *msg; + struct rdma_conn_param cp; + int version; + __u64 incarnation; + unsigned long flags; + int rc; + + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + incarnation = peer->ibp_incarnation; + version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : + peer->ibp_version; + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); + if (conn == NULL) { + kiblnd_peer_connect_failed(peer, 1, -ENOMEM); + kiblnd_peer_decref(peer); /* lose cmid's ref */ + return -ENOMEM; + } + + /* conn "owns" cmid now, so I return success from here on to ensure the + * CM callback doesn't destroy cmid. conn also takes over cmid's ref + * on peer */ + + msg = &conn->ibc_connvars->cv_msg; + + memset(msg, 0, sizeof(*msg)); + kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); + msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); + msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; + + kiblnd_pack_msg(peer->ibp_ni, msg, version, + 0, peer->ibp_nid, incarnation); + + memset(&cp, 0, sizeof(cp)); + cp.private_data = msg; + cp.private_data_len = msg->ibm_nob; + cp.responder_resources = 0; /* No atomic ops or RDMA reads */ + cp.initiator_depth = 0; + cp.flow_control = 1; + cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; + + LASSERT(cmid->context == (void *)conn); + LASSERT(conn->ibc_cmid == cmid); + + rc = rdma_connect(cmid, &cp); + if (rc != 0) { + CERROR("Can't connect to %s: %d\n", + libcfs_nid2str(peer->ibp_nid), rc); + kiblnd_connreq_done(conn, rc); + kiblnd_conn_decref(conn); + } + + return 0; +} + +int +kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) +{ + kib_peer_t *peer; + kib_conn_t *conn; + int rc; + + switch (event->event) { + default: + CERROR("Unexpected event: %d, status: %d\n", + event->event, event->status); + LBUG(); + + case RDMA_CM_EVENT_CONNECT_REQUEST: + /* destroy cmid on failure */ + rc = kiblnd_passive_connect(cmid, + (void *)KIBLND_CONN_PARAM(event), + KIBLND_CONN_PARAM_LEN(event)); + CDEBUG(D_NET, "connreq: %d\n", rc); + return rc; + + case RDMA_CM_EVENT_ADDR_ERROR: + peer = (kib_peer_t *)cmid->context; + CNETERR("%s: ADDR ERROR %d\n", + libcfs_nid2str(peer->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); + kiblnd_peer_decref(peer); + return -EHOSTUNREACH; /* rc != 0 destroys cmid */ + + case RDMA_CM_EVENT_ADDR_RESOLVED: + peer = (kib_peer_t *)cmid->context; + + CDEBUG(D_NET, "%s Addr resolved: %d\n", + libcfs_nid2str(peer->ibp_nid), event->status); + + if (event->status != 0) { + CNETERR("Can't resolve address for %s: %d\n", + libcfs_nid2str(peer->ibp_nid), event->status); + rc = event->status; + } else { + rc = rdma_resolve_route( + cmid, *kiblnd_tunables.kib_timeout * 1000); + if (rc == 0) + return 0; + /* Can't initiate route resolution */ + CERROR("Can't resolve route for %s: %d\n", + libcfs_nid2str(peer->ibp_nid), rc); + } + kiblnd_peer_connect_failed(peer, 1, rc); + kiblnd_peer_decref(peer); + return rc; /* rc != 0 destroys cmid */ + + case RDMA_CM_EVENT_ROUTE_ERROR: + peer = (kib_peer_t *)cmid->context; + CNETERR("%s: ROUTE ERROR %d\n", + libcfs_nid2str(peer->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); + kiblnd_peer_decref(peer); + return -EHOSTUNREACH; /* rc != 0 destroys cmid */ + + case RDMA_CM_EVENT_ROUTE_RESOLVED: + peer = (kib_peer_t *)cmid->context; + CDEBUG(D_NET, "%s Route resolved: %d\n", + libcfs_nid2str(peer->ibp_nid), event->status); + + if (event->status == 0) + return kiblnd_active_connect(cmid); + + CNETERR("Can't resolve route for %s: %d\n", + libcfs_nid2str(peer->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer, 1, event->status); + kiblnd_peer_decref(peer); + return event->status; /* rc != 0 destroys cmid */ + + case RDMA_CM_EVENT_UNREACHABLE: + conn = (kib_conn_t *)cmid->context; + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || + conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); + CNETERR("%s: UNREACHABLE %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); + kiblnd_connreq_done(conn, -ENETDOWN); + kiblnd_conn_decref(conn); + return 0; + + case RDMA_CM_EVENT_CONNECT_ERROR: + conn = (kib_conn_t *)cmid->context; + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || + conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); + CNETERR("%s: CONNECT ERROR %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); + kiblnd_connreq_done(conn, -ENOTCONN); + kiblnd_conn_decref(conn); + return 0; + + case RDMA_CM_EVENT_REJECTED: + conn = (kib_conn_t *)cmid->context; + switch (conn->ibc_state) { + default: + LBUG(); + + case IBLND_CONN_PASSIVE_WAIT: + CERROR("%s: REJECTED %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + event->status); + kiblnd_connreq_done(conn, -ECONNRESET); + break; + + case IBLND_CONN_ACTIVE_CONNECT: + kiblnd_rejected(conn, event->status, + (void *)KIBLND_CONN_PARAM(event), + KIBLND_CONN_PARAM_LEN(event)); + break; + } + kiblnd_conn_decref(conn); + return 0; + + case RDMA_CM_EVENT_ESTABLISHED: + conn = (kib_conn_t *)cmid->context; + switch (conn->ibc_state) { + default: + LBUG(); + + case IBLND_CONN_PASSIVE_WAIT: + CDEBUG(D_NET, "ESTABLISHED (passive): %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + kiblnd_connreq_done(conn, 0); + break; + + case IBLND_CONN_ACTIVE_CONNECT: + CDEBUG(D_NET, "ESTABLISHED(active): %s\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + kiblnd_check_connreply(conn, + (void *)KIBLND_CONN_PARAM(event), + KIBLND_CONN_PARAM_LEN(event)); + break; + } + /* net keeps its ref on conn! */ + return 0; + + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n"); + return 0; + case RDMA_CM_EVENT_DISCONNECTED: + conn = (kib_conn_t *)cmid->context; + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + CERROR("%s DISCONNECTED\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + kiblnd_connreq_done(conn, -ECONNRESET); + } else { + kiblnd_close_conn(conn, 0); + } + kiblnd_conn_decref(conn); + cmid->context = NULL; + return 0; + + case RDMA_CM_EVENT_DEVICE_REMOVAL: + LCONSOLE_ERROR_MSG(0x131, + "Received notification of device removal\n" + "Please shutdown LNET to allow this to proceed\n"); + /* Can't remove network from underneath LNET for now, so I have + * to ignore this */ + return 0; + + case RDMA_CM_EVENT_ADDR_CHANGE: + LCONSOLE_INFO("Physical link changed (eg hca/port)\n"); + return 0; + } +} + +static int +kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) +{ + kib_tx_t *tx; + struct list_head *ttmp; + + list_for_each(ttmp, txs) { + tx = list_entry(ttmp, kib_tx_t, tx_list); + + if (txs != &conn->ibc_active_txs) { + LASSERT(tx->tx_queued); + } else { + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || tx->tx_sending != 0); + } + + if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { + CERROR("Timed out tx: %s, %lu seconds\n", + kiblnd_queue2str(conn, txs), + cfs_duration_sec(jiffies - tx->tx_deadline)); + return 1; + } + } + + return 0; +} + +static int +kiblnd_conn_timed_out_locked(kib_conn_t *conn) +{ + return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) || + kiblnd_check_txs_locked(conn, &conn->ibc_active_txs); +} + +static void +kiblnd_check_conns(int idx) +{ + LIST_HEAD(closes); + LIST_HEAD(checksends); + struct list_head *peers = &kiblnd_data.kib_peers[idx]; + struct list_head *ptmp; + kib_peer_t *peer; + kib_conn_t *conn; + kib_conn_t *tmp; + struct list_head *ctmp; + unsigned long flags; + + /* NB. We expect to have a look at all the peers and not find any + * RDMAs to time out, so we just use a shared lock while we + * take a look... */ + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + list_for_each(ptmp, peers) { + peer = list_entry(ptmp, kib_peer_t, ibp_list); + + list_for_each(ctmp, &peer->ibp_conns) { + int timedout; + int sendnoop; + + conn = list_entry(ctmp, kib_conn_t, ibc_list); + + LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); + + spin_lock(&conn->ibc_lock); + + sendnoop = kiblnd_need_noop(conn); + timedout = kiblnd_conn_timed_out_locked(conn); + if (!sendnoop && !timedout) { + spin_unlock(&conn->ibc_lock); + continue; + } + + if (timedout) { + CERROR("Timed out RDMA with %s (%lu): c: %u, oc: %u, rc: %u\n", + libcfs_nid2str(peer->ibp_nid), + cfs_duration_sec(cfs_time_current() - + peer->ibp_last_alive), + conn->ibc_credits, + conn->ibc_outstanding_credits, + conn->ibc_reserved_credits); + list_add(&conn->ibc_connd_list, &closes); + } else { + list_add(&conn->ibc_connd_list, + &checksends); + } + /* +ref for 'closes' or 'checksends' */ + kiblnd_conn_addref(conn); + + spin_unlock(&conn->ibc_lock); + } + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + /* Handle timeout by closing the whole + * connection. We can only be sure RDMA activity + * has ceased once the QP has been modified. */ + list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) { + list_del(&conn->ibc_connd_list); + kiblnd_close_conn(conn, -ETIMEDOUT); + kiblnd_conn_decref(conn); + } + + /* In case we have enough credits to return via a + * NOOP, but there were no non-blocking tx descs + * free to do it last time... */ + while (!list_empty(&checksends)) { + conn = list_entry(checksends.next, + kib_conn_t, ibc_connd_list); + list_del(&conn->ibc_connd_list); + kiblnd_check_sends(conn); + kiblnd_conn_decref(conn); + } +} + +static void +kiblnd_disconnect_conn(kib_conn_t *conn) +{ + LASSERT(!in_interrupt()); + LASSERT(current == kiblnd_data.kib_connd); + LASSERT(conn->ibc_state == IBLND_CONN_CLOSING); + + rdma_disconnect(conn->ibc_cmid); + kiblnd_finalise_conn(conn); + + kiblnd_peer_notify(conn->ibc_peer); +} + +int +kiblnd_connd(void *arg) +{ + wait_queue_t wait; + unsigned long flags; + kib_conn_t *conn; + int timeout; + int i; + int dropped_lock; + int peer_index = 0; + unsigned long deadline = jiffies; + + cfs_block_allsigs(); + + init_waitqueue_entry(&wait, current); + kiblnd_data.kib_connd = current; + + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + + while (!kiblnd_data.kib_shutdown) { + + dropped_lock = 0; + + if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + conn = list_entry(kiblnd_data. \ + kib_connd_zombies.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, + flags); + dropped_lock = 1; + + kiblnd_destroy_conn(conn); + + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } + + if (!list_empty(&kiblnd_data.kib_connd_conns)) { + conn = list_entry(kiblnd_data.kib_connd_conns.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, + flags); + dropped_lock = 1; + + kiblnd_disconnect_conn(conn); + kiblnd_conn_decref(conn); + + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } + + /* careful with the jiffy wrap... */ + timeout = (int)(deadline - jiffies); + if (timeout <= 0) { + const int n = 4; + const int p = 1; + int chunk = kiblnd_data.kib_peer_hash_size; + + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + dropped_lock = 1; + + /* Time to check for RDMA timeouts on a few more + * peers: I do checks every 'p' seconds on a + * proportion of the peer table and I need to check + * every connection 'n' times within a timeout + * interval, to ensure I detect a timeout on any + * connection within (n+1)/n times the timeout + * interval. */ + + if (*kiblnd_tunables.kib_timeout > n * p) + chunk = (chunk * n * p) / + *kiblnd_tunables.kib_timeout; + if (chunk == 0) + chunk = 1; + + for (i = 0; i < chunk; i++) { + kiblnd_check_conns(peer_index); + peer_index = (peer_index + 1) % + kiblnd_data.kib_peer_hash_size; + } + + deadline += p * HZ; + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } + + if (dropped_lock) + continue; + + /* Nothing to do for 'timeout' */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + + schedule_timeout(timeout); + + remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } + + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + + kiblnd_thread_fini(); + return 0; +} + +void +kiblnd_qp_event(struct ib_event *event, void *arg) +{ + kib_conn_t *conn = arg; + + switch (event->event) { + case IB_EVENT_COMM_EST: + CDEBUG(D_NET, "%s established\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + return; + + default: + CERROR("%s: Async QP event type %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); + return; + } +} + +static void +kiblnd_complete(struct ib_wc *wc) +{ + switch (kiblnd_wreqid2type(wc->wr_id)) { + default: + LBUG(); + + case IBLND_WID_RDMA: + /* We only get RDMA completion notification if it fails. All + * subsequent work items, including the final SEND will fail + * too. However we can't print out any more info about the + * failing RDMA because 'tx' might be back on the idle list or + * even reused already if we didn't manage to post all our work + * items */ + CNETERR("RDMA (tx: %p) failed: %d\n", + kiblnd_wreqid2ptr(wc->wr_id), wc->status); + return; + + case IBLND_WID_TX: + kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status); + return; + + case IBLND_WID_RX: + kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status, + wc->byte_len); + return; + } +} + +void +kiblnd_cq_completion(struct ib_cq *cq, void *arg) +{ + /* NB I'm not allowed to schedule this conn once its refcount has + * reached 0. Since fundamentally I'm racing with scheduler threads + * consuming my CQ I could be called after all completions have + * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 + * and this CQ is about to be destroyed so I NOOP. */ + kib_conn_t *conn = (kib_conn_t *)arg; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; + + LASSERT(cq == conn->ibc_cq); + + spin_lock_irqsave(&sched->ibs_lock, flags); + + conn->ibc_ready = 1; + + if (!conn->ibc_scheduled && + (conn->ibc_nrx > 0 || + conn->ibc_nsends_posted > 0)) { + kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ + conn->ibc_scheduled = 1; + list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); + + if (waitqueue_active(&sched->ibs_waitq)) + wake_up(&sched->ibs_waitq); + } + + spin_unlock_irqrestore(&sched->ibs_lock, flags); +} + +void +kiblnd_cq_event(struct ib_event *event, void *arg) +{ + kib_conn_t *conn = arg; + + CERROR("%s: async CQ event type %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); +} + +int +kiblnd_scheduler(void *arg) +{ + long id = (long)arg; + struct kib_sched_info *sched; + kib_conn_t *conn; + wait_queue_t wait; + unsigned long flags; + struct ib_wc wc; + int did_something; + int busy_loops = 0; + int rc; + + cfs_block_allsigs(); + + init_waitqueue_entry(&wait, current); + + sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; + + rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); + if (rc != 0) { + CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", + sched->ibs_cpt); + } + + spin_lock_irqsave(&sched->ibs_lock, flags); + + while (!kiblnd_data.kib_shutdown) { + if (busy_loops++ >= IBLND_RESCHED) { + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + cond_resched(); + busy_loops = 0; + + spin_lock_irqsave(&sched->ibs_lock, flags); + } + + did_something = 0; + + if (!list_empty(&sched->ibs_conns)) { + conn = list_entry(sched->ibs_conns.next, + kib_conn_t, ibc_sched_list); + /* take over kib_sched_conns' ref on conn... */ + LASSERT(conn->ibc_scheduled); + list_del(&conn->ibc_sched_list); + conn->ibc_ready = 0; + + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); + if (rc == 0) { + rc = ib_req_notify_cq(conn->ibc_cq, + IB_CQ_NEXT_COMP); + if (rc < 0) { + CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); + kiblnd_close_conn(conn, -EIO); + kiblnd_conn_decref(conn); + spin_lock_irqsave(&sched->ibs_lock, + flags); + continue; + } + + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); + } + + if (rc < 0) { + CWARN("%s: ib_poll_cq failed: %d, closing connection\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + rc); + kiblnd_close_conn(conn, -EIO); + kiblnd_conn_decref(conn); + spin_lock_irqsave(&sched->ibs_lock, flags); + continue; + } + + spin_lock_irqsave(&sched->ibs_lock, flags); + + if (rc != 0 || conn->ibc_ready) { + /* There may be another completion waiting; get + * another scheduler to check while I handle + * this one... */ + /* +1 ref for sched_conns */ + kiblnd_conn_addref(conn); + list_add_tail(&conn->ibc_sched_list, + &sched->ibs_conns); + if (waitqueue_active(&sched->ibs_waitq)) + wake_up(&sched->ibs_waitq); + } else { + conn->ibc_scheduled = 0; + } + + if (rc != 0) { + spin_unlock_irqrestore(&sched->ibs_lock, flags); + kiblnd_complete(&wc); + + spin_lock_irqsave(&sched->ibs_lock, flags); + } + + kiblnd_conn_decref(conn); /* ...drop my ref from above */ + did_something = 1; + } + + if (did_something) + continue; + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue_exclusive(&sched->ibs_waitq, &wait); + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + schedule(); + busy_loops = 0; + + remove_wait_queue(&sched->ibs_waitq, &wait); + spin_lock_irqsave(&sched->ibs_lock, flags); + } + + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + kiblnd_thread_fini(); + return 0; +} + +int +kiblnd_failover_thread(void *arg) +{ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_dev_t *dev; + wait_queue_t wait; + unsigned long flags; + int rc; + + LASSERT(*kiblnd_tunables.kib_dev_failover != 0); + + cfs_block_allsigs(); + + init_waitqueue_entry(&wait, current); + write_lock_irqsave(glock, flags); + + while (!kiblnd_data.kib_shutdown) { + int do_failover = 0; + int long_sleep; + + list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, + ibd_fail_list) { + if (time_before(cfs_time_current(), + dev->ibd_next_failover)) + continue; + do_failover = 1; + break; + } + + if (do_failover) { + list_del_init(&dev->ibd_fail_list); + dev->ibd_failover = 1; + write_unlock_irqrestore(glock, flags); + + rc = kiblnd_dev_failover(dev); + + write_lock_irqsave(glock, flags); + + LASSERT(dev->ibd_failover); + dev->ibd_failover = 0; + if (rc >= 0) { /* Device is OK or failover succeed */ + dev->ibd_next_failover = cfs_time_shift(3); + continue; + } + + /* failed to failover, retry later */ + dev->ibd_next_failover = + cfs_time_shift(min(dev->ibd_failed_failover, 10)); + if (kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + } + + continue; + } + + /* long sleep if no more pending failover */ + long_sleep = list_empty(&kiblnd_data.kib_failed_devs); + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); + write_unlock_irqrestore(glock, flags); + + rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) : + cfs_time_seconds(1)); + remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); + write_lock_irqsave(glock, flags); + + if (!long_sleep || rc != 0) + continue; + + /* have a long sleep, routine check all active devices, + * we need checking like this because if there is not active + * connection on the dev and no SEND from local, we may listen + * on wrong HCA for ever while there is a bonding failover */ + list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { + if (kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + } + } + } + + write_unlock_irqrestore(glock, flags); + + kiblnd_thread_fini(); + return 0; +} diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c new file mode 100644 index 000000000..eedf01afd --- /dev/null +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -0,0 +1,230 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + * + * lnet/klnds/o2iblnd/o2iblnd_modparams.c + * + * Author: Eric Barton + */ + +#include "o2iblnd.h" + +static int service = 987; +module_param(service, int, 0444); +MODULE_PARM_DESC(service, "service number (within RDMA_PS_TCP)"); + +static int cksum; +module_param(cksum, int, 0644); +MODULE_PARM_DESC(cksum, "set non-zero to enable message (not RDMA) checksums"); + +static int timeout = 50; +module_param(timeout, int, 0644); +MODULE_PARM_DESC(timeout, "timeout (seconds)"); + +/* Number of threads in each scheduler pool which is percpt, + * we will estimate reasonable value based on CPUs if it's set to zero. */ +static int nscheds; +module_param(nscheds, int, 0444); +MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool"); + +/* NB: this value is shared by all CPTs, it can grow at runtime */ +static int ntx = 512; +module_param(ntx, int, 0444); +MODULE_PARM_DESC(ntx, "# of message descriptors allocated for each pool"); + +/* NB: this value is shared by all CPTs */ +static int credits = 256; +module_param(credits, int, 0444); +MODULE_PARM_DESC(credits, "# concurrent sends"); + +static int peer_credits = 8; +module_param(peer_credits, int, 0444); +MODULE_PARM_DESC(peer_credits, "# concurrent sends to 1 peer"); + +static int peer_credits_hiw; +module_param(peer_credits_hiw, int, 0444); +MODULE_PARM_DESC(peer_credits_hiw, "when eagerly to return credits"); + +static int peer_buffer_credits; +module_param(peer_buffer_credits, int, 0444); +MODULE_PARM_DESC(peer_buffer_credits, "# per-peer router buffer credits"); + +static int peer_timeout = 180; +module_param(peer_timeout, int, 0444); +MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)"); + +static char *ipif_name = "ib0"; +module_param(ipif_name, charp, 0444); +MODULE_PARM_DESC(ipif_name, "IPoIB interface name"); + +static int retry_count = 5; +module_param(retry_count, int, 0644); +MODULE_PARM_DESC(retry_count, "Retransmissions when no ACK received"); + +static int rnr_retry_count = 6; +module_param(rnr_retry_count, int, 0644); +MODULE_PARM_DESC(rnr_retry_count, "RNR retransmissions"); + +static int keepalive = 100; +module_param(keepalive, int, 0644); +MODULE_PARM_DESC(keepalive, "Idle time in seconds before sending a keepalive"); + +static int ib_mtu; +module_param(ib_mtu, int, 0444); +MODULE_PARM_DESC(ib_mtu, "IB MTU 256/512/1024/2048/4096"); + +static int concurrent_sends; +module_param(concurrent_sends, int, 0444); +MODULE_PARM_DESC(concurrent_sends, "send work-queue sizing"); + +static int map_on_demand; +module_param(map_on_demand, int, 0444); +MODULE_PARM_DESC(map_on_demand, "map on demand"); + +/* NB: this value is shared by all CPTs, it can grow at runtime */ +static int fmr_pool_size = 512; +module_param(fmr_pool_size, int, 0444); +MODULE_PARM_DESC(fmr_pool_size, "size of fmr pool on each CPT (>= ntx / 4)"); + +/* NB: this value is shared by all CPTs, it can grow at runtime */ +static int fmr_flush_trigger = 384; +module_param(fmr_flush_trigger, int, 0444); +MODULE_PARM_DESC(fmr_flush_trigger, "# dirty FMRs that triggers pool flush"); + +static int fmr_cache = 1; +module_param(fmr_cache, int, 0444); +MODULE_PARM_DESC(fmr_cache, "non-zero to enable FMR caching"); + +/* NB: this value is shared by all CPTs, it can grow at runtime */ +static int pmr_pool_size = 512; +module_param(pmr_pool_size, int, 0444); +MODULE_PARM_DESC(pmr_pool_size, "size of MR cache pmr pool on each CPT"); + +/* + * 0: disable failover + * 1: enable failover if necessary + * 2: force to failover (for debug) + */ +static int dev_failover; +module_param(dev_failover, int, 0444); +MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)"); + + +static int require_privileged_port; +module_param(require_privileged_port, int, 0644); +MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection"); + +static int use_privileged_port = 1; +module_param(use_privileged_port, int, 0644); +MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection"); + +kib_tunables_t kiblnd_tunables = { + .kib_dev_failover = &dev_failover, + .kib_service = &service, + .kib_cksum = &cksum, + .kib_timeout = &timeout, + .kib_keepalive = &keepalive, + .kib_ntx = &ntx, + .kib_credits = &credits, + .kib_peertxcredits = &peer_credits, + .kib_peercredits_hiw = &peer_credits_hiw, + .kib_peerrtrcredits = &peer_buffer_credits, + .kib_peertimeout = &peer_timeout, + .kib_default_ipif = &ipif_name, + .kib_retry_count = &retry_count, + .kib_rnr_retry_count = &rnr_retry_count, + .kib_concurrent_sends = &concurrent_sends, + .kib_ib_mtu = &ib_mtu, + .kib_map_on_demand = &map_on_demand, + .kib_fmr_pool_size = &fmr_pool_size, + .kib_fmr_flush_trigger = &fmr_flush_trigger, + .kib_fmr_cache = &fmr_cache, + .kib_pmr_pool_size = &pmr_pool_size, + .kib_require_priv_port = &require_privileged_port, + .kib_use_priv_port = &use_privileged_port, + .kib_nscheds = &nscheds +}; + +int +kiblnd_tunables_init(void) +{ + if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) { + CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n", + *kiblnd_tunables.kib_ib_mtu); + return -EINVAL; + } + + if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT) + *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT; + + if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX) + *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX; + + if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits) + *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits; + + if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2) + *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2; + + if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits) + *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1; + + if (*kiblnd_tunables.kib_map_on_demand < 0 || + *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS) + *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */ + + if (*kiblnd_tunables.kib_map_on_demand == 1) + *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */ + + if (*kiblnd_tunables.kib_concurrent_sends == 0) { + if (*kiblnd_tunables.kib_map_on_demand > 0 && + *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) + *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2; + else + *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits); + } + + if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2) + *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2; + + if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2) + *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2; + + if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) { + CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n", + *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits); + } + + return 0; +} -- cgit v1.2.3-54-g00ecf