From d635711daa98be86d4c7fd01499c34f566b54ccb Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Fri, 10 Jun 2016 05:30:17 -0300 Subject: Linux-libre 4.6.2-gnu --- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 705 ++++++++++----------- 1 file changed, 329 insertions(+), 376 deletions(-) (limited to 'drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c') diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index cb74ae731..0d32e6541 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -42,15 +42,7 @@ #include #include "o2iblnd.h" -static lnd_t the_o2iblnd = { - .lnd_type = O2IBLND, - .lnd_startup = kiblnd_startup, - .lnd_shutdown = kiblnd_shutdown, - .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, - .lnd_send = kiblnd_send, - .lnd_recv = kiblnd_recv, -}; +static lnd_t the_o2iblnd; kib_data_t kiblnd_data; @@ -63,7 +55,7 @@ static __u32 kiblnd_cksum(void *ptr, int nob) sum = ((sum << 1) | (sum >> 31)) + *c++; /* ensure I don't return 0 (== no checksum) */ - return (sum == 0) ? 1 : sum; + return !sum ? 1 : sum; } static char *kiblnd_msgtype2str(int type) @@ -145,7 +137,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) int i; LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || - msg->ibm_type == IBLND_MSG_PUT_ACK); + msg->ibm_type == IBLND_MSG_PUT_ACK); rd = msg->ibm_type == IBLND_MSG_GET_REQ ? &msg->ibm_u.get.ibgm_rd : @@ -189,8 +181,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, { kib_net_t *net = ni->ni_data; - /* CAVEAT EMPTOR! all message fields not set here should have been - * initialised previously. */ + /* + * CAVEAT EMPTOR! all message fields not set here should have been + * initialised previously. + */ msg->ibm_magic = IBLND_MSG_MAGIC; msg->ibm_version = version; /* ibm_type */ @@ -249,11 +243,13 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob) return -EPROTO; } - /* checksum must be computed with ibm_cksum zero and BEFORE anything - * gets flipped */ + /* + * checksum must be computed with ibm_cksum zero and BEFORE anything + * gets flipped + */ msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum; msg->ibm_cksum = 0; - if (msg_cksum != 0 && + if (msg_cksum && msg_cksum != kiblnd_cksum(msg, msg_nob)) { CERROR("Bad checksum\n"); return -EPROTO; @@ -326,21 +322,21 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) int cpt = lnet_cpt_of_nid(nid); unsigned long flags; - LASSERT(net != NULL); + LASSERT(net); LASSERT(nid != LNET_NID_ANY); LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer)); - if (peer == NULL) { + if (!peer) { CERROR("Cannot allocate peer\n"); return -ENOMEM; } - memset(peer, 0, sizeof(*peer)); /* zero flags etc */ - peer->ibp_ni = ni; peer->ibp_nid = nid; peer->ibp_error = 0; peer->ibp_last_alive = 0; + peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS; + peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits; atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */ @@ -350,7 +346,7 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* always called with a ref on ni, which prevents ni being shutdown */ - LASSERT(net->ibn_shutdown == 0); + LASSERT(!net->ibn_shutdown); /* npeers only grows with the global lock held */ atomic_inc(&net->ibn_npeers); @@ -365,38 +361,36 @@ void kiblnd_destroy_peer(kib_peer_t *peer) { kib_net_t *net = peer->ibp_ni->ni_data; - LASSERT(net != NULL); - LASSERT(atomic_read(&peer->ibp_refcount) == 0); + LASSERT(net); + LASSERT(!atomic_read(&peer->ibp_refcount)); LASSERT(!kiblnd_peer_active(peer)); - LASSERT(peer->ibp_connecting == 0); - LASSERT(peer->ibp_accepting == 0); - LASSERT(list_empty(&peer->ibp_conns)); + LASSERT(kiblnd_peer_idle(peer)); LASSERT(list_empty(&peer->ibp_tx_queue)); LIBCFS_FREE(peer, sizeof(*peer)); - /* NB a peer's connections keep a reference on their peer until + /* + * NB a peer's connections keep a reference on their peer until * they are destroyed, so we can be assured that _all_ state to do * with this peer has been cleaned up when its refcount drops to - * zero. */ + * zero. + */ atomic_dec(&net->ibn_npeers); } kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) { - /* the caller is responsible for accounting the additional reference - * that this creates */ + /* + * the caller is responsible for accounting the additional reference + * that this creates + */ struct list_head *peer_list = kiblnd_nid2peerlist(nid); struct list_head *tmp; kib_peer_t *peer; list_for_each(tmp, peer_list) { - peer = list_entry(tmp, kib_peer_t, ibp_list); - - LASSERT(peer->ibp_connecting > 0 || /* creating conns */ - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); /* active conn */ + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_nid != nid) continue; @@ -431,13 +425,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { - list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -474,8 +464,10 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer) } /* NB closing peer's last conn unlinked it. */ } - /* NB peer now unlinked; might even be freed if the peer table had the - * last ref on it. */ + /* + * NB peer now unlinked; might even be freed if the peer table had the + * last ref on it. + */ } static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) @@ -493,7 +485,8 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (nid != LNET_NID_ANY) { - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; } else { lo = 0; hi = kiblnd_data.kib_peer_hash_size - 1; @@ -502,9 +495,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -516,7 +507,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) LASSERT(list_empty(&peer->ibp_conns)); list_splice_init(&peer->ibp_tx_queue, - &zombies); + &zombies); } kiblnd_del_peer_locked(peer); @@ -544,11 +535,8 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) { list_for_each(ptmp, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -558,7 +546,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) continue; conn = list_entry(ctmp, kib_conn_t, - ibc_list); + ibc_list); kiblnd_conn_addref(conn); read_unlock_irqrestore( &kiblnd_data.kib_global_lock, @@ -597,12 +585,12 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) int mtu; /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ - if (cmid->route.path_rec == NULL) + if (!cmid->route.path_rec) return; mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu); LASSERT(mtu >= 0); - if (mtu != 0) + if (mtu) cmid->route.path_rec->mtu = mtu; } @@ -619,13 +607,13 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) return 0; mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt); - if (mask == NULL) + if (!mask) return 0; /* hash NID to CPU id in this partition... */ off = do_div(nid, cpumask_weight(mask)); for_each_cpu(i, mask) { - if (off-- == 0) + if (!off--) return i % vectors; } @@ -634,15 +622,17 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) } kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, - int state, int version) + int state, int version) { - /* CAVEAT EMPTOR: + /* + * CAVEAT EMPTOR: * If the new conn is created successfully it takes over the caller's * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself * is destroyed. On failure, the caller's ref on 'peer' remains and * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has - * its ref on 'cmid'). */ + * its ref on 'cmid'). + */ rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_net_t *net = peer->ibp_ni->ni_data; kib_dev_t *dev; @@ -656,7 +646,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, int rc; int i; - LASSERT(net != NULL); + LASSERT(net); LASSERT(!in_interrupt()); dev = net->ibn_dev; @@ -668,14 +658,14 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt, sizeof(*init_qp_attr)); - if (init_qp_attr == NULL) { + if (!init_qp_attr) { CERROR("Can't allocate qp_attr for %s\n", libcfs_nid2str(peer->ibp_nid)); goto failed_0; } LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn)); - if (conn == NULL) { + if (!conn) { CERROR("Can't allocate connection for %s\n", libcfs_nid2str(peer->ibp_nid)); goto failed_1; @@ -686,6 +676,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, conn->ibc_peer = peer; /* I take the caller's ref */ cmid->context = conn; /* for future CM callbacks */ conn->ibc_cmid = cmid; + conn->ibc_max_frags = peer->ibp_max_frags; + conn->ibc_queue_depth = peer->ibp_queue_depth; INIT_LIST_HEAD(&conn->ibc_early_rxs); INIT_LIST_HEAD(&conn->ibc_tx_noops); @@ -697,7 +689,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt, sizeof(*conn->ibc_connvars)); - if (conn->ibc_connvars == NULL) { + if (!conn->ibc_connvars) { CERROR("Can't allocate in-progress connection state\n"); goto failed_2; } @@ -731,42 +723,42 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, write_unlock_irqrestore(glock, flags); LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt, - IBLND_RX_MSGS(version) * sizeof(kib_rx_t)); - if (conn->ibc_rxs == NULL) { + IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); + if (!conn->ibc_rxs) { CERROR("Cannot allocate RX buffers\n"); goto failed_2; } rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt, - IBLND_RX_MSG_PAGES(version)); - if (rc != 0) + IBLND_RX_MSG_PAGES(conn)); + if (rc) goto failed_2; kiblnd_map_rx_descs(conn); - cq_attr.cqe = IBLND_CQ_ENTRIES(version); + cq_attr.cqe = IBLND_CQ_ENTRIES(conn); cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, &cq_attr); if (IS_ERR(cq)) { - CERROR("Can't create CQ: %ld, cqe: %d\n", - PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); + CERROR("Failed to create CQ with %d CQEs: %ld\n", + IBLND_CQ_ENTRIES(conn), PTR_ERR(cq)); goto failed_2; } conn->ibc_cq = cq; rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); - if (rc != 0) { - CERROR("Can't request completion notificiation: %d\n", rc); + if (rc) { + CERROR("Can't request completion notification: %d\n", rc); goto failed_2; } init_qp_attr->event_handler = kiblnd_qp_event; init_qp_attr->qp_context = conn; - init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version); - init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version); + init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn); + init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn); init_qp_attr->cap.max_send_sge = 1; init_qp_attr->cap.max_recv_sge = 1; init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR; @@ -777,7 +769,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, conn->ibc_sched = sched; rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr); - if (rc != 0) { + if (rc) { CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n", rc, init_qp_attr->cap.max_send_wr, init_qp_attr->cap.max_recv_wr); @@ -787,33 +779,37 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); /* 1 ref for caller and each rxmsg */ - atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version)); - conn->ibc_nrx = IBLND_RX_MSGS(version); + atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn)); + conn->ibc_nrx = IBLND_RX_MSGS(conn); /* post receives */ - for (i = 0; i < IBLND_RX_MSGS(version); i++) { + for (i = 0; i < IBLND_RX_MSGS(conn); i++) { rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT); - if (rc != 0) { + if (rc) { CERROR("Can't post rxmsg: %d\n", rc); /* Make posted receives complete */ kiblnd_abort_receives(conn); - /* correct # of posted buffers - * NB locking needed now I'm racing with completion */ + /* + * correct # of posted buffers + * NB locking needed now I'm racing with completion + */ spin_lock_irqsave(&sched->ibs_lock, flags); - conn->ibc_nrx -= IBLND_RX_MSGS(version) - i; + conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i; spin_unlock_irqrestore(&sched->ibs_lock, flags); - /* cmid will be destroyed by CM(ofed) after cm_callback + /* + * cmid will be destroyed by CM(ofed) after cm_callback * returned, so we can't refer it anymore - * (by kiblnd_connd()->kiblnd_destroy_conn) */ + * (by kiblnd_connd()->kiblnd_destroy_conn) + */ rdma_destroy_qp(conn->ibc_cmid); conn->ibc_cmid = NULL; /* Drop my own and unused rxbuffer refcounts */ - while (i++ <= IBLND_RX_MSGS(version)) + while (i++ <= IBLND_RX_MSGS(conn)) kiblnd_conn_decref(conn); return NULL; @@ -822,7 +818,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, /* Init successful! */ LASSERT(state == IBLND_CONN_ACTIVE_CONNECT || - state == IBLND_CONN_PASSIVE_WAIT); + state == IBLND_CONN_PASSIVE_WAIT); conn->ibc_state = state; /* 1 more conn */ @@ -830,29 +826,29 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, return conn; failed_2: - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn, true); failed_1: LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr)); failed_0: return NULL; } -void kiblnd_destroy_conn(kib_conn_t *conn) +void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; kib_peer_t *peer = conn->ibc_peer; int rc; LASSERT(!in_interrupt()); - LASSERT(atomic_read(&conn->ibc_refcount) == 0); + LASSERT(!atomic_read(&conn->ibc_refcount)); LASSERT(list_empty(&conn->ibc_early_rxs)); LASSERT(list_empty(&conn->ibc_tx_noops)); LASSERT(list_empty(&conn->ibc_tx_queue)); LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd)); LASSERT(list_empty(&conn->ibc_tx_queue_nocred)); LASSERT(list_empty(&conn->ibc_active_txs)); - LASSERT(conn->ibc_noops_posted == 0); - LASSERT(conn->ibc_nsends_posted == 0); + LASSERT(!conn->ibc_noops_posted); + LASSERT(!conn->ibc_nsends_posted); switch (conn->ibc_state) { default: @@ -861,7 +857,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn) case IBLND_CONN_DISCONNECTED: /* connvars should have been freed already */ - LASSERT(conn->ibc_connvars == NULL); + LASSERT(!conn->ibc_connvars); break; case IBLND_CONN_INIT: @@ -869,28 +865,27 @@ void kiblnd_destroy_conn(kib_conn_t *conn) } /* conn->ibc_cmid might be destroyed by CM already */ - if (cmid != NULL && cmid->qp != NULL) + if (cmid && cmid->qp) rdma_destroy_qp(cmid); - if (conn->ibc_cq != NULL) { + if (conn->ibc_cq) { rc = ib_destroy_cq(conn->ibc_cq); - if (rc != 0) + if (rc) CWARN("Error destroying CQ: %d\n", rc); } - if (conn->ibc_rx_pages != NULL) + if (conn->ibc_rx_pages) kiblnd_unmap_rx_descs(conn); - if (conn->ibc_rxs != NULL) { + if (conn->ibc_rxs) { LIBCFS_FREE(conn->ibc_rxs, - IBLND_RX_MSGS(conn->ibc_version) - * sizeof(kib_rx_t)); + IBLND_RX_MSGS(conn) * sizeof(kib_rx_t)); } - if (conn->ibc_connvars != NULL) + if (conn->ibc_connvars) LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); - if (conn->ibc_hdev != NULL) + if (conn->ibc_hdev) kiblnd_hdev_decref(conn->ibc_hdev); /* See CAVEAT EMPTOR above in kiblnd_create_conn */ @@ -927,7 +922,7 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) } int kiblnd_close_stale_conns_locked(kib_peer_t *peer, - int version, __u64 incarnation) + int version, __u64 incarnation) { kib_conn_t *conn; struct list_head *ctmp; @@ -967,20 +962,18 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (nid != LNET_NID_ANY) - lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; - else { + if (nid != LNET_NID_ANY) { + lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers; + } else { lo = 0; hi = kiblnd_data.kib_peer_hash_size - 1; } for (i = lo; i <= hi; i++) { list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) { - peer = list_entry(ptmp, kib_peer_t, ibp_list); - LASSERT(peer->ibp_connecting > 0 || - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); + LASSERT(!kiblnd_peer_idle(peer)); if (peer->ibp_ni != ni) continue; @@ -998,10 +991,10 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) if (nid == LNET_NID_ANY) return 0; - return (count == 0) ? -ENOENT : 0; + return !count ? -ENOENT : 0; } -int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) +static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; int rc = -EINVAL; @@ -1027,14 +1020,14 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) rc = 0; conn = kiblnd_get_conn_by_idx(ni, data->ioc_count); - if (conn == NULL) { + if (!conn) { rc = -ENOENT; break; } - LASSERT(conn->ibc_cmid != NULL); + LASSERT(conn->ibc_cmid); data->ioc_nid = conn->ibc_peer->ibp_nid; - if (conn->ibc_cmid->route.path_rec == NULL) + if (!conn->ibc_cmid->route.path_rec) data->ioc_u32[0] = 0; /* iWarp has no path MTU */ else data->ioc_u32[0] = @@ -1054,7 +1047,7 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) return rc; } -void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) +static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { unsigned long last_alive = 0; unsigned long now = cfs_time_current(); @@ -1065,21 +1058,19 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) read_lock_irqsave(glock, flags); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) { - LASSERT(peer->ibp_connecting > 0 || /* creating conns */ - peer->ibp_accepting > 0 || - !list_empty(&peer->ibp_conns)); /* active conn */ + if (peer) last_alive = peer->ibp_last_alive; - } read_unlock_irqrestore(glock, flags); - if (last_alive != 0) + if (last_alive) *when = last_alive; - /* peer is not persistent in hash, trigger peer creation - * and connection establishment with a NULL tx */ - if (peer == NULL) + /* + * peer is not persistent in hash, trigger peer creation + * and connection establishment with a NULL tx + */ + if (!peer) kiblnd_launch_tx(ni, NULL, nid); CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n", @@ -1087,13 +1078,13 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) last_alive ? cfs_duration_sec(now - last_alive) : -1); } -void kiblnd_free_pages(kib_pages_t *p) +static void kiblnd_free_pages(kib_pages_t *p) { int npages = p->ibp_npages; int i; for (i = 0; i < npages; i++) { - if (p->ibp_pages[i] != NULL) + if (p->ibp_pages[i]) __free_page(p->ibp_pages[i]); } @@ -1107,7 +1098,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, offsetof(kib_pages_t, ibp_pages[npages])); - if (p == NULL) { + if (!p) { CERROR("Can't allocate descriptor for %d pages\n", npages); return -ENOMEM; } @@ -1119,7 +1110,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) p->ibp_pages[i] = alloc_pages_node( cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_NOFS, 0); - if (p->ibp_pages[i] == NULL) { + if (!p->ibp_pages[i]) { CERROR("Can't allocate page %d of %d\n", i, npages); kiblnd_free_pages(p); return -ENOMEM; @@ -1135,10 +1126,10 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn) kib_rx_t *rx; int i; - LASSERT(conn->ibc_rxs != NULL); - LASSERT(conn->ibc_hdev != NULL); + LASSERT(conn->ibc_rxs); + LASSERT(conn->ibc_hdev); - for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (i = 0; i < IBLND_RX_MSGS(conn); i++) { rx = &conn->ibc_rxs[i]; LASSERT(rx->rx_nob >= 0); /* not posted */ @@ -1162,7 +1153,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) int ipg; int i; - for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) { pg = conn->ibc_rx_pages->ibp_pages[ipg]; rx = &conn->ibc_rxs[i]; @@ -1174,7 +1165,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) IBLND_MSG_SIZE, DMA_FROM_DEVICE); LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev, - rx->rx_msgaddr)); + rx->rx_msgaddr)); KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr); CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", @@ -1187,7 +1178,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) if (pg_off == PAGE_SIZE) { pg_off = 0; ipg++; - LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version)); + LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn)); } } } @@ -1198,9 +1189,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) kib_tx_t *tx; int i; - LASSERT(tpo->tpo_pool.po_allocated == 0); + LASSERT(!tpo->tpo_pool.po_allocated); - if (hdev == NULL) + if (!hdev) return; for (i = 0; i < tpo->tpo_pool.po_size; i++) { @@ -1224,9 +1215,10 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); while (dev->ibd_failover) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (i++ % 50 == 0) + if (!(i++ % 50)) CDEBUG(D_NET, "%s: Wait for failover\n", dev->ibd_ifname); + set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(cfs_time_seconds(1) / 100); read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1252,7 +1244,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) int ipage; int i; - LASSERT(net != NULL); + LASSERT(net); dev = net->ibn_dev; @@ -1260,7 +1252,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE); /* No fancy arithmetic when we do the buffer calculations */ - CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0); + CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE)); tpo->tpo_hdev = kiblnd_current_hdev(dev); @@ -1275,7 +1267,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, IBLND_MSG_SIZE, DMA_TO_DEVICE); LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev, - tx->tx_msgaddr)); + tx->tx_msgaddr)); KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); list_add(&tx->tx_list, &pool->po_free_list); @@ -1291,68 +1283,32 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) } } -struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) +struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, + int negotiated_nfrags) { - __u64 index; - - LASSERT(hdev->ibh_mrs[0] != NULL); - - if (hdev->ibh_nmrs == 1) - return hdev->ibh_mrs[0]; - - index = addr >> hdev->ibh_mr_shift; + __u16 nfrags = (negotiated_nfrags != -1) ? + negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand; - if (index < hdev->ibh_nmrs && - index == ((addr + size - 1) >> hdev->ibh_mr_shift)) - return hdev->ibh_mrs[index]; - - return NULL; -} - -struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) -{ - struct ib_mr *prev_mr; - struct ib_mr *mr; - int i; - - LASSERT(hdev->ibh_mrs[0] != NULL); + LASSERT(hdev->ibh_mrs); if (*kiblnd_tunables.kib_map_on_demand > 0 && - *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags) + nfrags <= rd->rd_nfrags) return NULL; - if (hdev->ibh_nmrs == 1) - return hdev->ibh_mrs[0]; - - for (i = 0, mr = prev_mr = NULL; - i < rd->rd_nfrags; i++) { - mr = kiblnd_find_dma_mr(hdev, - rd->rd_frags[i].rf_addr, - rd->rd_frags[i].rf_nob); - if (prev_mr == NULL) - prev_mr = mr; - - if (mr == NULL || prev_mr != mr) { - /* Can't covered by one single MR */ - mr = NULL; - break; - } - } - - return mr; + return hdev->ibh_mrs; } static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool) { - LASSERT(pool->fpo_map_count == 0); + LASSERT(!pool->fpo_map_count); - if (pool->fpo_fmr_pool != NULL) + if (pool->fpo_fmr_pool) ib_destroy_fmr_pool(pool->fpo_fmr_pool); - if (pool->fpo_hdev != NULL) + if (pool->fpo_hdev) kiblnd_hdev_decref(pool->fpo_hdev); - LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t)); + LIBCFS_FREE(pool, sizeof(*pool)); } static void kiblnd_destroy_fmr_pool_list(struct list_head *head) @@ -1387,7 +1343,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_dev_t *dev = fps->fps_net->ibn_dev; kib_fmr_pool_t *fpo; struct ib_fmr_pool_param param = { - .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, + .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE, .page_shift = PAGE_SHIFT, .access = (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE), @@ -1399,7 +1355,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, int rc; LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); - if (fpo == NULL) + if (!fpo) return -ENOMEM; fpo->fpo_hdev = kiblnd_current_hdev(dev); @@ -1410,7 +1366,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, CERROR("Failed to create FMR pool: %d\n", rc); kiblnd_hdev_decref(fpo->fpo_hdev); - LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t)); + LIBCFS_FREE(fpo, sizeof(*fpo)); return rc; } @@ -1424,7 +1380,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies) { - if (fps->fps_net == NULL) /* intialized? */ + if (!fps->fps_net) /* intialized? */ return; spin_lock(&fps->fps_lock); @@ -1434,7 +1390,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, kib_fmr_pool_t, fpo_list); fpo->fpo_failed = 1; list_del(&fpo->fpo_list); - if (fpo->fpo_map_count == 0) + if (!fpo->fpo_map_count) list_add(&fpo->fpo_list, zombies); else list_add(&fpo->fpo_list, &fps->fps_failed_pool_list); @@ -1445,7 +1401,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps) { - if (fps->fps_net != NULL) { /* initialized? */ + if (fps->fps_net) { /* initialized? */ kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list); kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list); } @@ -1458,7 +1414,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_fmr_pool_t *fpo; int rc; - memset(fps, 0, sizeof(kib_fmr_poolset_t)); + memset(fps, 0, sizeof(*fps)); fps->fps_net = net; fps->fps_cpt = cpt; @@ -1469,7 +1425,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, INIT_LIST_HEAD(&fps->fps_failed_pool_list); rc = kiblnd_create_fmr_pool(fps, &fpo); - if (rc == 0) + if (!rc) list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); return rc; @@ -1477,7 +1433,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) { - if (fpo->fpo_map_count != 0) /* still in use */ + if (fpo->fpo_map_count) /* still in use */ return 0; if (fpo->fpo_failed) return 1; @@ -1494,11 +1450,11 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) int rc; rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); - LASSERT(rc == 0); + LASSERT(!rc); - if (status != 0) { + if (status) { rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool); - LASSERT(rc == 0); + LASSERT(!rc); } fmr->fmr_pool = NULL; @@ -1563,11 +1519,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, if (fps->fps_increasing) { spin_unlock(&fps->fps_lock); - CDEBUG(D_NET, - "Another thread is allocating new FMR pool, waiting for her to complete\n"); + CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n"); schedule(); goto again; - } if (time_before(cfs_time_current(), fps->fps_next_retry)) { @@ -1583,7 +1537,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, rc = kiblnd_create_fmr_pool(fps, &fpo); spin_lock(&fps->fps_lock); fps->fps_increasing = 0; - if (rc == 0) { + if (!rc) { fps->fps_version++; list_add_tail(&fpo->fpo_list, &fps->fps_pool_list); } else { @@ -1597,7 +1551,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, static void kiblnd_fini_pool(kib_pool_t *pool) { LASSERT(list_empty(&pool->po_free_list)); - LASSERT(pool->po_allocated == 0); + LASSERT(!pool->po_allocated); CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name); } @@ -1606,7 +1560,7 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size) { CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name); - memset(pool, 0, sizeof(kib_pool_t)); + memset(pool, 0, sizeof(*pool)); INIT_LIST_HEAD(&pool->po_free_list); pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE); pool->po_owner = ps; @@ -1621,14 +1575,14 @@ static void kiblnd_destroy_pool_list(struct list_head *head) pool = list_entry(head->next, kib_pool_t, po_list); list_del(&pool->po_list); - LASSERT(pool->po_owner != NULL); + LASSERT(pool->po_owner); pool->po_owner->ps_pool_destroy(pool); } } static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) { - if (ps->ps_net == NULL) /* intialized? */ + if (!ps->ps_net) /* intialized? */ return; spin_lock(&ps->ps_lock); @@ -1637,7 +1591,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) kib_pool_t, po_list); po->po_failed = 1; list_del(&po->po_list); - if (po->po_allocated == 0) + if (!po->po_allocated) list_add(&po->po_list, zombies); else list_add(&po->po_list, &ps->ps_failed_pool_list); @@ -1647,7 +1601,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies) static void kiblnd_fini_poolset(kib_poolset_t *ps) { - if (ps->ps_net != NULL) { /* initialized? */ + if (ps->ps_net) { /* initialized? */ kiblnd_destroy_pool_list(&ps->ps_failed_pool_list); kiblnd_destroy_pool_list(&ps->ps_pool_list); } @@ -1663,7 +1617,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, kib_pool_t *pool; int rc; - memset(ps, 0, sizeof(kib_poolset_t)); + memset(ps, 0, sizeof(*ps)); ps->ps_cpt = cpt; ps->ps_net = net; @@ -1680,7 +1634,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, INIT_LIST_HEAD(&ps->ps_failed_pool_list); rc = ps->ps_pool_create(ps, size, &pool); - if (rc == 0) + if (!rc) list_add(&pool->po_list, &ps->ps_pool_list); else CERROR("Failed to create the first pool for %s\n", ps->ps_name); @@ -1690,7 +1644,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) { - if (pool->po_allocated != 0) /* still in use */ + if (pool->po_allocated) /* still in use */ return 0; if (pool->po_failed) return 1; @@ -1706,7 +1660,7 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) spin_lock(&ps->ps_lock); - if (ps->ps_node_fini != NULL) + if (ps->ps_node_fini) ps->ps_node_fini(pool, node); LASSERT(pool->po_allocated > 0); @@ -1731,6 +1685,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) { struct list_head *node; kib_pool_t *pool; + unsigned int interval = 1; + unsigned long time_before; + unsigned int trips = 0; int rc; again: @@ -1744,7 +1701,7 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) node = pool->po_free_list.next; list_del(node); - if (ps->ps_node_init != NULL) { + if (ps->ps_node_init) { /* still hold the lock */ ps->ps_node_init(pool, node); } @@ -1756,9 +1713,15 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) if (ps->ps_increasing) { /* another thread is allocating a new pool */ spin_unlock(&ps->ps_lock); - CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n", - ps->ps_name); - schedule(); + trips++; + CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n", + ps->ps_name, interval, trips); + + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(interval); + if (interval < cfs_time_seconds(1)) + interval *= 2; + goto again; } @@ -1772,12 +1735,14 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) spin_unlock(&ps->ps_lock); CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name); - + time_before = cfs_time_current(); rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool); + CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete", + cfs_time_current() - time_before); spin_lock(&ps->ps_lock); ps->ps_increasing = 0; - if (rc == 0) { + if (!rc) { list_add_tail(&pool->po_list, &ps->ps_pool_list); } else { ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY); @@ -1794,37 +1759,37 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); int i; - LASSERT(pool->po_allocated == 0); + LASSERT(!pool->po_allocated); - if (tpo->tpo_tx_pages != NULL) { + if (tpo->tpo_tx_pages) { kiblnd_unmap_tx_pool(tpo); kiblnd_free_pages(tpo->tpo_tx_pages); } - if (tpo->tpo_tx_descs == NULL) + if (!tpo->tpo_tx_descs) goto out; for (i = 0; i < pool->po_size; i++) { kib_tx_t *tx = &tpo->tpo_tx_descs[i]; list_del(&tx->tx_list); - if (tx->tx_pages != NULL) + if (tx->tx_pages) LIBCFS_FREE(tx->tx_pages, LNET_MAX_IOV * sizeof(*tx->tx_pages)); - if (tx->tx_frags != NULL) + if (tx->tx_frags) LIBCFS_FREE(tx->tx_frags, IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); - if (tx->tx_wrq != NULL) + if (tx->tx_wrq) LIBCFS_FREE(tx->tx_wrq, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); - if (tx->tx_sge != NULL) + if (tx->tx_sge) LIBCFS_FREE(tx->tx_sge, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); - if (tx->tx_rd != NULL) + if (tx->tx_rd) LIBCFS_FREE(tx->tx_rd, offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); @@ -1834,7 +1799,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool) pool->po_size * sizeof(kib_tx_t)); out: kiblnd_fini_pool(pool); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + LIBCFS_FREE(tpo, sizeof(*tpo)); } static int kiblnd_tx_pool_size(int ncpts) @@ -1853,7 +1818,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_tx_pool_t *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); - if (tpo == NULL) { + if (!tpo) { CERROR("Failed to allocate TX pool\n"); return -ENOMEM; } @@ -1864,15 +1829,15 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, tpo->tpo_tx_pages = NULL; npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE; - if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) { + if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) { CERROR("Can't allocate tx pages: %d\n", npg); - LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t)); + LIBCFS_FREE(tpo, sizeof(*tpo)); return -ENOMEM; } LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt, size * sizeof(kib_tx_t)); - if (tpo->tpo_tx_descs == NULL) { + if (!tpo->tpo_tx_descs) { CERROR("Can't allocate %d tx descriptors\n", size); ps->ps_pool_destroy(pool); return -ENOMEM; @@ -1884,17 +1849,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_tx_t *tx = &tpo->tpo_tx_descs[i]; tx->tx_pool = tpo; - if (ps->ps_net->ibn_fmr_ps != NULL) { + if (ps->ps_net->ibn_fmr_ps) { LIBCFS_CPT_ALLOC(tx->tx_pages, lnet_cpt_table(), ps->ps_cpt, LNET_MAX_IOV * sizeof(*tx->tx_pages)); - if (tx->tx_pages == NULL) + if (!tx->tx_pages) break; } LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); - if (tx->tx_frags == NULL) + if (!tx->tx_frags) break; sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); @@ -1902,19 +1867,19 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_wrq)); - if (tx->tx_wrq == NULL) + if (!tx->tx_wrq) break; LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, (1 + IBLND_MAX_RDMA_FRAGS) * sizeof(*tx->tx_sge)); - if (tx->tx_sge == NULL) + if (!tx->tx_sge) break; LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, offsetof(kib_rdma_desc_t, rd_frags[IBLND_MAX_RDMA_FRAGS])); - if (tx->tx_rd == NULL) + if (!tx->tx_rd) break; } @@ -1945,23 +1910,23 @@ static void kiblnd_net_fini_pools(kib_net_t *net) kib_tx_poolset_t *tps; kib_fmr_poolset_t *fps; - if (net->ibn_tx_ps != NULL) { + if (net->ibn_tx_ps) { tps = net->ibn_tx_ps[i]; kiblnd_fini_poolset(&tps->tps_poolset); } - if (net->ibn_fmr_ps != NULL) { + if (net->ibn_fmr_ps) { fps = net->ibn_fmr_ps[i]; kiblnd_fini_fmr_poolset(fps); } } - if (net->ibn_tx_ps != NULL) { + if (net->ibn_tx_ps) { cfs_percpt_free(net->ibn_tx_ps); net->ibn_tx_ps = NULL; } - if (net->ibn_fmr_ps != NULL) { + if (net->ibn_fmr_ps) { cfs_percpt_free(net->ibn_fmr_ps); net->ibn_fmr_ps = NULL; } @@ -1975,8 +1940,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) int i; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (*kiblnd_tunables.kib_map_on_demand == 0 && - net->ibn_dev->ibd_hdev->ibh_nmrs == 1) { + if (!*kiblnd_tunables.kib_map_on_demand) { read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } @@ -1996,7 +1960,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) * TX pool must be created later than FMR, see LU-2268 * for details */ - LASSERT(net->ibn_tx_ps == NULL); + LASSERT(!net->ibn_tx_ps); /* * premapping can fail if ibd_nmr > 1, so we always create @@ -2005,56 +1969,45 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(), sizeof(kib_fmr_poolset_t)); - if (net->ibn_fmr_ps == NULL) { + if (!net->ibn_fmr_ps) { CERROR("Failed to allocate FMR pool array\n"); rc = -ENOMEM; goto failed; } for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net, kiblnd_fmr_pool_size(ncpts), kiblnd_fmr_flush_trigger(ncpts)); - if (rc == -ENOSYS && i == 0) /* no FMR */ - break; - - if (rc != 0) { /* a real error */ + if (rc) { CERROR("Can't initialize FMR pool for CPT %d: %d\n", cpt, rc); goto failed; } } - if (i > 0) { + if (i > 0) LASSERT(i == ncpts); - goto create_tx_pool; - } - - cfs_percpt_free(net->ibn_fmr_ps); - net->ibn_fmr_ps = NULL; - - CWARN("Device does not support FMR\n"); - goto failed; create_tx_pool: net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(), sizeof(kib_tx_poolset_t)); - if (net->ibn_tx_ps == NULL) { + if (!net->ibn_tx_ps) { CERROR("Failed to allocate tx pool array\n"); rc = -ENOMEM; goto failed; } for (i = 0; i < ncpts; i++) { - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset, cpt, net, "TX", kiblnd_tx_pool_size(ncpts), kiblnd_create_tx_pool, kiblnd_destroy_tx_pool, kiblnd_tx_init, NULL); - if (rc != 0) { + if (rc) { CERROR("Can't initialize TX pool for CPT %d: %d\n", cpt, rc); goto failed; @@ -2064,14 +2017,16 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) return 0; failed: kiblnd_net_fini_pools(net); - LASSERT(rc != 0); + LASSERT(rc); return rc; } static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) { - /* It's safe to assume a HCA can handle a page size - * matching that of the native system */ + /* + * It's safe to assume a HCA can handle a page size + * matching that of the native system + */ hdev->ibh_page_shift = PAGE_SHIFT; hdev->ibh_page_size = 1 << PAGE_SHIFT; hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1); @@ -2082,44 +2037,28 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) return 0; } - for (hdev->ibh_mr_shift = 0; - hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) { - if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) || - hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1) - return 0; - } - CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size); return -EINVAL; } static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) { - int i; - - if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL) + if (!hdev->ibh_mrs) return; - for (i = 0; i < hdev->ibh_nmrs; i++) { - if (hdev->ibh_mrs[i] == NULL) - break; + ib_dereg_mr(hdev->ibh_mrs); - ib_dereg_mr(hdev->ibh_mrs[i]); - } - - LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs); - hdev->ibh_mrs = NULL; - hdev->ibh_nmrs = 0; + hdev->ibh_mrs = NULL; } void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) { kiblnd_hdev_cleanup_mrs(hdev); - if (hdev->ibh_pd != NULL) + if (hdev->ibh_pd) ib_dealloc_pd(hdev->ibh_pd); - if (hdev->ibh_cmid != NULL) + if (hdev->ibh_cmid) rdma_destroy_id(hdev->ibh_cmid); LIBCFS_FREE(hdev, sizeof(*hdev)); @@ -2132,18 +2071,9 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; rc = kiblnd_hdev_get_attr(hdev); - if (rc != 0) + if (rc) return rc; - LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs)); - if (hdev->ibh_mrs == NULL) { - CERROR("Failed to allocate MRs table\n"); - return -ENOMEM; - } - - hdev->ibh_mrs[0] = NULL; - hdev->ibh_nmrs = 1; - mr = ib_get_dma_mr(hdev->ibh_pd, acflags); if (IS_ERR(mr)) { CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr)); @@ -2151,7 +2081,7 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) return PTR_ERR(mr); } - hdev->ibh_mrs[0] = mr; + hdev->ibh_mrs = mr; return 0; } @@ -2170,12 +2100,13 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) struct sockaddr_in dstaddr; int rc; - if (dev->ibd_hdev == NULL || /* initializing */ - dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */ + if (!dev->ibd_hdev || /* initializing */ + !dev->ibd_hdev->ibh_cmid || /* listener is dead */ *kiblnd_tunables.kib_dev_failover > 1) /* debugging */ return 1; - /* XXX: it's UGLY, but I don't have better way to find + /* + * XXX: it's UGLY, but I don't have better way to find * ib-bonding HCA failover because: * * a. no reliable CM event for HCA failover... @@ -2184,7 +2115,8 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) * We have only two choices at this point: * * a. rdma_bind_addr(), it will conflict with listener cmid - * b. rdma_resolve_addr() to zero addr */ + * b. rdma_resolve_addr() to zero addr + */ cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cmid)) { @@ -2201,7 +2133,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) dstaddr.sin_family = AF_INET; rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr, (struct sockaddr *)&dstaddr, 1); - if (rc != 0 || cmid->device == NULL) { + if (rc || !cmid->device) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); @@ -2230,24 +2162,27 @@ int kiblnd_dev_failover(kib_dev_t *dev) int i; LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || - dev->ibd_can_failover || - dev->ibd_hdev == NULL); + dev->ibd_can_failover || !dev->ibd_hdev); rc = kiblnd_dev_need_failover(dev); if (rc <= 0) goto out; - if (dev->ibd_hdev != NULL && - dev->ibd_hdev->ibh_cmid != NULL) { - /* XXX it's not good to close old listener at here, + if (dev->ibd_hdev && + dev->ibd_hdev->ibh_cmid) { + /* + * XXX it's not good to close old listener at here, * because we can fail to create new listener. * But we have to close it now, otherwise rdma_bind_addr - * will return EADDRINUSE... How crap! */ + * will return EADDRINUSE... How crap! + */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); cmid = dev->ibd_hdev->ibh_cmid; - /* make next schedule of kiblnd_dev_need_failover() - * return 1 for me */ + /* + * make next schedule of kiblnd_dev_need_failover() + * return 1 for me + */ dev->ibd_hdev->ibh_cmid = NULL; write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2269,7 +2204,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) /* Bind to failover device or port */ rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr); - if (rc != 0 || cmid->device == NULL) { + if (rc || !cmid->device) { CERROR("Failed to bind %s:%pI4h to device(%p): %d\n", dev->ibd_ifname, &dev->ibd_ifip, cmid->device, rc); @@ -2278,7 +2213,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) } LIBCFS_ALLOC(hdev, sizeof(*hdev)); - if (hdev == NULL) { + if (!hdev) { CERROR("Failed to allocate kib_hca_dev\n"); rdma_destroy_id(cmid); rc = -ENOMEM; @@ -2300,13 +2235,13 @@ int kiblnd_dev_failover(kib_dev_t *dev) hdev->ibh_pd = pd; rc = rdma_listen(cmid, 0); - if (rc != 0) { + if (rc) { CERROR("Can't start new listener: %d\n", rc); goto out; } rc = kiblnd_hdev_setup_mrs(hdev); - if (rc != 0) { + if (rc) { CERROR("Can't setup device: %d\n", rc); goto out; } @@ -2334,10 +2269,10 @@ int kiblnd_dev_failover(kib_dev_t *dev) kiblnd_destroy_pool_list(&zombie_ppo); if (!list_empty(&zombie_fpo)) kiblnd_destroy_fmr_pool_list(&zombie_fpo); - if (hdev != NULL) + if (hdev) kiblnd_hdev_decref(hdev); - if (rc != 0) + if (rc) dev->ibd_failed_failover++; else dev->ibd_failed_failover = 0; @@ -2347,13 +2282,13 @@ int kiblnd_dev_failover(kib_dev_t *dev) void kiblnd_destroy_dev(kib_dev_t *dev) { - LASSERT(dev->ibd_nnets == 0); + LASSERT(!dev->ibd_nnets); LASSERT(list_empty(&dev->ibd_nets)); list_del(&dev->ibd_fail_list); list_del(&dev->ibd_list); - if (dev->ibd_hdev != NULL) + if (dev->ibd_hdev) kiblnd_hdev_decref(dev->ibd_hdev); LIBCFS_FREE(dev, sizeof(*dev)); @@ -2369,7 +2304,7 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) int rc; rc = lnet_ipif_query(ifname, &up, &ip, &netmask); - if (rc != 0) { + if (rc) { CERROR("Can't query IPoIB interface %s: %d\n", ifname, rc); return NULL; @@ -2381,11 +2316,11 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) } LIBCFS_ALLOC(dev, sizeof(*dev)); - if (dev == NULL) + if (!dev) return NULL; netdev = dev_get_by_name(&init_net, ifname); - if (netdev == NULL) { + if (!netdev) { dev->ibd_can_failover = 0; } else { dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER); @@ -2400,14 +2335,13 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) /* initialize the device */ rc = kiblnd_dev_failover(dev); - if (rc != 0) { + if (rc) { CERROR("Can't initialize device: %d\n", rc); LIBCFS_FREE(dev, sizeof(*dev)); return NULL; } - list_add_tail(&dev->ibd_list, - &kiblnd_data.kib_devs); + list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs); return dev; } @@ -2424,18 +2358,22 @@ static void kiblnd_base_shutdown(void) case IBLND_INIT_ALL: case IBLND_INIT_DATA: - LASSERT(kiblnd_data.kib_peers != NULL); + LASSERT(kiblnd_data.kib_peers); for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) LASSERT(list_empty(&kiblnd_data.kib_peers[i])); LASSERT(list_empty(&kiblnd_data.kib_connd_zombies)); LASSERT(list_empty(&kiblnd_data.kib_connd_conns)); + LASSERT(list_empty(&kiblnd_data.kib_reconn_list)); + LASSERT(list_empty(&kiblnd_data.kib_reconn_wait)); /* flag threads to terminate; wake and wait for them to die */ kiblnd_data.kib_shutdown = 1; - /* NB: we really want to stop scheduler threads net by net + /* + * NB: we really want to stop scheduler threads net by net * instead of the whole module, this should be improved - * with dynamic configuration LNet */ + * with dynamic configuration LNet + */ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) wake_up_all(&sched->ibs_waitq); @@ -2443,7 +2381,7 @@ static void kiblnd_base_shutdown(void) wake_up_all(&kiblnd_data.kib_failover_waitq); i = 2; - while (atomic_read(&kiblnd_data.kib_nthreads) != 0) { + while (atomic_read(&kiblnd_data.kib_nthreads)) { i++; /* power of 2 ? */ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, @@ -2459,20 +2397,20 @@ static void kiblnd_base_shutdown(void) break; } - if (kiblnd_data.kib_peers != NULL) { + if (kiblnd_data.kib_peers) { LIBCFS_FREE(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); } - if (kiblnd_data.kib_scheds != NULL) + if (kiblnd_data.kib_scheds) cfs_percpt_free(kiblnd_data.kib_scheds); kiblnd_data.kib_init = IBLND_INIT_NOTHING; module_put(THIS_MODULE); } -void kiblnd_shutdown(lnet_ni_t *ni) +static void kiblnd_shutdown(lnet_ni_t *ni) { kib_net_t *net = ni->ni_data; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; @@ -2481,7 +2419,7 @@ void kiblnd_shutdown(lnet_ni_t *ni) LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); - if (net == NULL) + if (!net) goto out; write_lock_irqsave(g_lock, flags); @@ -2498,7 +2436,7 @@ void kiblnd_shutdown(lnet_ni_t *ni) /* Wait for all peer state to clean up */ i = 2; - while (atomic_read(&net->ibn_npeers) != 0) { + while (atomic_read(&net->ibn_npeers)) { i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */ "%s: waiting for %d peers to disconnect\n", @@ -2519,10 +2457,9 @@ void kiblnd_shutdown(lnet_ni_t *ni) /* fall through */ case IBLND_INIT_NOTHING: - LASSERT(atomic_read(&net->ibn_nconns) == 0); + LASSERT(!atomic_read(&net->ibn_nconns)); - if (net->ibn_dev != NULL && - net->ibn_dev->ibd_nnets == 0) + if (net->ibn_dev && !net->ibn_dev->ibd_nnets) kiblnd_destroy_dev(net->ibn_dev); break; @@ -2558,7 +2495,7 @@ static int kiblnd_base_startup(void) kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; LIBCFS_ALLOC(kiblnd_data.kib_peers, sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); - if (kiblnd_data.kib_peers == NULL) + if (!kiblnd_data.kib_peers) goto failed; for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]); @@ -2566,12 +2503,15 @@ static int kiblnd_base_startup(void) spin_lock_init(&kiblnd_data.kib_connd_lock); INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns); INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies); + INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list); + INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait); + init_waitqueue_head(&kiblnd_data.kib_connd_waitq); init_waitqueue_head(&kiblnd_data.kib_failover_waitq); kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*sched)); - if (kiblnd_data.kib_scheds == NULL) + if (!kiblnd_data.kib_scheds) goto failed; cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { @@ -2585,8 +2525,10 @@ static int kiblnd_base_startup(void) if (*kiblnd_tunables.kib_nscheds > 0) { nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds); } else { - /* max to half of CPUs, another half is reserved for - * upper layer modules */ + /* + * max to half of CPUs, another half is reserved for + * upper layer modules + */ nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs); } @@ -2601,16 +2543,16 @@ static int kiblnd_base_startup(void) /*****************************************************/ rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn o2iblnd connd: %d\n", rc); goto failed; } - if (*kiblnd_tunables.kib_dev_failover != 0) + if (*kiblnd_tunables.kib_dev_failover) rc = kiblnd_thread_start(kiblnd_failover_thread, NULL, "kiblnd_failover"); - if (rc != 0) { + if (rc) { CERROR("Can't spawn o2iblnd failover thread: %d\n", rc); goto failed; } @@ -2632,7 +2574,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) int nthrs; int i; - if (sched->ibs_nthreads == 0) { + if (!sched->ibs_nthreads) { if (*kiblnd_tunables.kib_nscheds > 0) { nthrs = sched->ibs_nthreads_max; } else { @@ -2655,7 +2597,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", KIB_THREAD_CPT(id), KIB_THREAD_TID(id)); rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name); - if (rc == 0) + if (!rc) continue; CERROR("Can't spawn thread %d for scheduler[%d]: %d\n", @@ -2677,14 +2619,14 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, for (i = 0; i < ncpts; i++) { struct kib_sched_info *sched; - cpt = (cpts == NULL) ? i : cpts[i]; + cpt = !cpts ? i : cpts[i]; sched = kiblnd_data.kib_scheds[cpt]; if (!newdev && sched->ibs_nthreads > 0) continue; rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]); - if (rc != 0) { + if (rc) { CERROR("Failed to start scheduler threads for %s\n", dev->ibd_ifname); return rc; @@ -2702,30 +2644,30 @@ static kib_dev_t *kiblnd_dev_search(char *ifname) colon = strchr(ifname, ':'); list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + if (!strcmp(&dev->ibd_ifname[0], ifname)) return dev; - if (alias != NULL) + if (alias) continue; colon2 = strchr(dev->ibd_ifname, ':'); - if (colon != NULL) + if (colon) *colon = 0; - if (colon2 != NULL) + if (colon2) *colon2 = 0; - if (strcmp(&dev->ibd_ifname[0], ifname) == 0) + if (!strcmp(&dev->ibd_ifname[0], ifname)) alias = dev; - if (colon != NULL) + if (colon) *colon = ':'; - if (colon2 != NULL) + if (colon2) *colon2 = ':'; } return alias; } -int kiblnd_startup(lnet_ni_t *ni) +static int kiblnd_startup(lnet_ni_t *ni) { char *ifname; kib_dev_t *ibdev = NULL; @@ -2739,13 +2681,13 @@ int kiblnd_startup(lnet_ni_t *ni) if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) { rc = kiblnd_base_startup(); - if (rc != 0) + if (rc) return rc; } LIBCFS_ALLOC(net, sizeof(*net)); ni->ni_data = net; - if (net == NULL) + if (!net) goto net_failed; ktime_get_real_ts64(&tv); @@ -2757,11 +2699,11 @@ int kiblnd_startup(lnet_ni_t *ni) ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits; ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits; - if (ni->ni_interfaces[0] != NULL) { + if (ni->ni_interfaces[0]) { /* Use the IPoIB interface specified in 'networks=' */ CLASSERT(LNET_MAX_INTERFACES > 1); - if (ni->ni_interfaces[1] != NULL) { + if (ni->ni_interfaces[1]) { CERROR("Multiple interfaces not supported\n"); goto failed; } @@ -2778,12 +2720,12 @@ int kiblnd_startup(lnet_ni_t *ni) ibdev = kiblnd_dev_search(ifname); - newdev = ibdev == NULL; + newdev = !ibdev; /* hmm...create kib_dev even for alias */ - if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) + if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname)) ibdev = kiblnd_create_dev(ifname); - if (ibdev == NULL) + if (!ibdev) goto failed; net->ibn_dev = ibdev; @@ -2791,11 +2733,11 @@ int kiblnd_startup(lnet_ni_t *ni) rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) + if (rc) goto failed; rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts); - if (rc != 0) { + if (rc) { CERROR("Failed to initialize NI pools: %d\n", rc); goto failed; } @@ -2810,7 +2752,7 @@ int kiblnd_startup(lnet_ni_t *ni) return 0; failed: - if (net->ibn_dev == NULL && ibdev != NULL) + if (!net->ibn_dev && ibdev) kiblnd_destroy_dev(ibdev); net_failed: @@ -2820,25 +2762,35 @@ net_failed: return -ENETDOWN; } -static void __exit kiblnd_module_fini(void) +static lnd_t the_o2iblnd = { + .lnd_type = O2IBLND, + .lnd_startup = kiblnd_startup, + .lnd_shutdown = kiblnd_shutdown, + .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, + .lnd_send = kiblnd_send, + .lnd_recv = kiblnd_recv, +}; + +static void __exit ko2iblnd_exit(void) { lnet_unregister_lnd(&the_o2iblnd); } -static int __init kiblnd_module_init(void) +static int __init ko2iblnd_init(void) { int rc; CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, - ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, - ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) - <= IBLND_MSG_SIZE); + ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) + <= IBLND_MSG_SIZE); rc = kiblnd_tunables_init(); - if (rc != 0) + if (rc) return rc; lnet_register_lnd(&the_o2iblnd); @@ -2847,8 +2799,9 @@ static int __init kiblnd_module_init(void) } MODULE_AUTHOR("OpenSFS, Inc. "); -MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00"); +MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver"); +MODULE_VERSION("2.7.0"); MODULE_LICENSE("GPL"); -module_init(kiblnd_module_init); -module_exit(kiblnd_module_fini); +module_init(ko2iblnd_init); +module_exit(ko2iblnd_exit); -- cgit v1.2.3-54-g00ecf