summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre/lnet/klnds
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/lustre/lnet/klnds')
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c705
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h159
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c1048
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c8
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c631
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h31
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c789
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c197
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c11
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c175
10 files changed, 2053 insertions, 1701 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index cb74ae731..0d32e6541 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -42,15 +42,7 @@
#include <asm/page.h>
#include "o2iblnd.h"
-static lnd_t the_o2iblnd = {
- .lnd_type = O2IBLND,
- .lnd_startup = kiblnd_startup,
- .lnd_shutdown = kiblnd_shutdown,
- .lnd_ctl = kiblnd_ctl,
- .lnd_query = kiblnd_query,
- .lnd_send = kiblnd_send,
- .lnd_recv = kiblnd_recv,
-};
+static lnd_t the_o2iblnd;
kib_data_t kiblnd_data;
@@ -63,7 +55,7 @@ static __u32 kiblnd_cksum(void *ptr, int nob)
sum = ((sum << 1) | (sum >> 31)) + *c++;
/* ensure I don't return 0 (== no checksum) */
- return (sum == 0) ? 1 : sum;
+ return !sum ? 1 : sum;
}
static char *kiblnd_msgtype2str(int type)
@@ -145,7 +137,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
int i;
LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
- msg->ibm_type == IBLND_MSG_PUT_ACK);
+ msg->ibm_type == IBLND_MSG_PUT_ACK);
rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
&msg->ibm_u.get.ibgm_rd :
@@ -189,8 +181,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
{
kib_net_t *net = ni->ni_data;
- /* CAVEAT EMPTOR! all message fields not set here should have been
- * initialised previously. */
+ /*
+ * CAVEAT EMPTOR! all message fields not set here should have been
+ * initialised previously.
+ */
msg->ibm_magic = IBLND_MSG_MAGIC;
msg->ibm_version = version;
/* ibm_type */
@@ -249,11 +243,13 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
return -EPROTO;
}
- /* checksum must be computed with ibm_cksum zero and BEFORE anything
- * gets flipped */
+ /*
+ * checksum must be computed with ibm_cksum zero and BEFORE anything
+ * gets flipped
+ */
msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
msg->ibm_cksum = 0;
- if (msg_cksum != 0 &&
+ if (msg_cksum &&
msg_cksum != kiblnd_cksum(msg, msg_nob)) {
CERROR("Bad checksum\n");
return -EPROTO;
@@ -326,21 +322,21 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
int cpt = lnet_cpt_of_nid(nid);
unsigned long flags;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(nid != LNET_NID_ANY);
LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
- if (peer == NULL) {
+ if (!peer) {
CERROR("Cannot allocate peer\n");
return -ENOMEM;
}
- memset(peer, 0, sizeof(*peer)); /* zero flags etc */
-
peer->ibp_ni = ni;
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
+ peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
+ peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
@@ -350,7 +346,7 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(net->ibn_shutdown == 0);
+ LASSERT(!net->ibn_shutdown);
/* npeers only grows with the global lock held */
atomic_inc(&net->ibn_npeers);
@@ -365,38 +361,36 @@ void kiblnd_destroy_peer(kib_peer_t *peer)
{
kib_net_t *net = peer->ibp_ni->ni_data;
- LASSERT(net != NULL);
- LASSERT(atomic_read(&peer->ibp_refcount) == 0);
+ LASSERT(net);
+ LASSERT(!atomic_read(&peer->ibp_refcount));
LASSERT(!kiblnd_peer_active(peer));
- LASSERT(peer->ibp_connecting == 0);
- LASSERT(peer->ibp_accepting == 0);
- LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(kiblnd_peer_idle(peer));
LASSERT(list_empty(&peer->ibp_tx_queue));
LIBCFS_FREE(peer, sizeof(*peer));
- /* NB a peer's connections keep a reference on their peer until
+ /*
+ * NB a peer's connections keep a reference on their peer until
* they are destroyed, so we can be assured that _all_ state to do
* with this peer has been cleaned up when its refcount drops to
- * zero. */
+ * zero.
+ */
atomic_dec(&net->ibn_npeers);
}
kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
{
- /* the caller is responsible for accounting the additional reference
- * that this creates */
+ /*
+ * the caller is responsible for accounting the additional reference
+ * that this creates
+ */
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
kib_peer_t *peer;
list_for_each(tmp, peer_list) {
-
peer = list_entry(tmp, kib_peer_t, ibp_list);
-
- LASSERT(peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_nid != nid)
continue;
@@ -431,13 +425,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
-
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -474,8 +464,10 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer)
}
/* NB closing peer's last conn unlinked it. */
}
- /* NB peer now unlinked; might even be freed if the peer table had the
- * last ref on it. */
+ /*
+ * NB peer now unlinked; might even be freed if the peer table had the
+ * last ref on it.
+ */
}
static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
@@ -493,7 +485,8 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY) {
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
} else {
lo = 0;
hi = kiblnd_data.kib_peer_hash_size - 1;
@@ -502,9 +495,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -516,7 +507,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
LASSERT(list_empty(&peer->ibp_conns));
list_splice_init(&peer->ibp_tx_queue,
- &zombies);
+ &zombies);
}
kiblnd_del_peer_locked(peer);
@@ -544,11 +535,8 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -558,7 +546,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
continue;
conn = list_entry(ctmp, kib_conn_t,
- ibc_list);
+ ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(
&kiblnd_data.kib_global_lock,
@@ -597,12 +585,12 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
int mtu;
/* XXX There is no path record for iWARP, set by netdev->change_mtu? */
- if (cmid->route.path_rec == NULL)
+ if (!cmid->route.path_rec)
return;
mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
LASSERT(mtu >= 0);
- if (mtu != 0)
+ if (mtu)
cmid->route.path_rec->mtu = mtu;
}
@@ -619,13 +607,13 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
return 0;
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
- if (mask == NULL)
+ if (!mask)
return 0;
/* hash NID to CPU id in this partition... */
off = do_div(nid, cpumask_weight(mask));
for_each_cpu(i, mask) {
- if (off-- == 0)
+ if (!off--)
return i % vectors;
}
@@ -634,15 +622,17 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
}
kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version)
+ int state, int version)
{
- /* CAVEAT EMPTOR:
+ /*
+ * CAVEAT EMPTOR:
* If the new conn is created successfully it takes over the caller's
* ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
* is destroyed. On failure, the caller's ref on 'peer' remains and
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
- * its ref on 'cmid'). */
+ * its ref on 'cmid').
+ */
rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_net_t *net = peer->ibp_ni->ni_data;
kib_dev_t *dev;
@@ -656,7 +646,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
int rc;
int i;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(!in_interrupt());
dev = net->ibn_dev;
@@ -668,14 +658,14 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
sizeof(*init_qp_attr));
- if (init_qp_attr == NULL) {
+ if (!init_qp_attr) {
CERROR("Can't allocate qp_attr for %s\n",
libcfs_nid2str(peer->ibp_nid));
goto failed_0;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
- if (conn == NULL) {
+ if (!conn) {
CERROR("Can't allocate connection for %s\n",
libcfs_nid2str(peer->ibp_nid));
goto failed_1;
@@ -686,6 +676,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
conn->ibc_peer = peer; /* I take the caller's ref */
cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid;
+ conn->ibc_max_frags = peer->ibp_max_frags;
+ conn->ibc_queue_depth = peer->ibp_queue_depth;
INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops);
@@ -697,7 +689,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
sizeof(*conn->ibc_connvars));
- if (conn->ibc_connvars == NULL) {
+ if (!conn->ibc_connvars) {
CERROR("Can't allocate in-progress connection state\n");
goto failed_2;
}
@@ -731,42 +723,42 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
- if (conn->ibc_rxs == NULL) {
+ IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ if (!conn->ibc_rxs) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
}
rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(version));
- if (rc != 0)
+ IBLND_RX_MSG_PAGES(conn));
+ if (rc)
goto failed_2;
kiblnd_map_rx_descs(conn);
- cq_attr.cqe = IBLND_CQ_ENTRIES(version);
+ cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
cq = ib_create_cq(cmid->device,
kiblnd_cq_completion, kiblnd_cq_event, conn,
&cq_attr);
if (IS_ERR(cq)) {
- CERROR("Can't create CQ: %ld, cqe: %d\n",
- PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
+ CERROR("Failed to create CQ with %d CQEs: %ld\n",
+ IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
goto failed_2;
}
conn->ibc_cq = cq;
rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- if (rc != 0) {
- CERROR("Can't request completion notificiation: %d\n", rc);
+ if (rc) {
+ CERROR("Can't request completion notification: %d\n", rc);
goto failed_2;
}
init_qp_attr->event_handler = kiblnd_qp_event;
init_qp_attr->qp_context = conn;
- init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
+ init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
+ init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
init_qp_attr->cap.max_send_sge = 1;
init_qp_attr->cap.max_recv_sge = 1;
init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -777,7 +769,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
conn->ibc_sched = sched;
rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
rc, init_qp_attr->cap.max_send_wr,
init_qp_attr->cap.max_recv_wr);
@@ -787,33 +779,37 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
/* 1 ref for caller and each rxmsg */
- atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
- conn->ibc_nrx = IBLND_RX_MSGS(version);
+ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
+ conn->ibc_nrx = IBLND_RX_MSGS(conn);
/* post receives */
- for (i = 0; i < IBLND_RX_MSGS(version); i++) {
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
rc = kiblnd_post_rx(&conn->ibc_rxs[i],
IBLND_POSTRX_NO_CREDIT);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't post rxmsg: %d\n", rc);
/* Make posted receives complete */
kiblnd_abort_receives(conn);
- /* correct # of posted buffers
- * NB locking needed now I'm racing with completion */
+ /*
+ * correct # of posted buffers
+ * NB locking needed now I'm racing with completion
+ */
spin_lock_irqsave(&sched->ibs_lock, flags);
- conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
+ conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
spin_unlock_irqrestore(&sched->ibs_lock, flags);
- /* cmid will be destroyed by CM(ofed) after cm_callback
+ /*
+ * cmid will be destroyed by CM(ofed) after cm_callback
* returned, so we can't refer it anymore
- * (by kiblnd_connd()->kiblnd_destroy_conn) */
+ * (by kiblnd_connd()->kiblnd_destroy_conn)
+ */
rdma_destroy_qp(conn->ibc_cmid);
conn->ibc_cmid = NULL;
/* Drop my own and unused rxbuffer refcounts */
- while (i++ <= IBLND_RX_MSGS(version))
+ while (i++ <= IBLND_RX_MSGS(conn))
kiblnd_conn_decref(conn);
return NULL;
@@ -822,7 +818,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
/* Init successful! */
LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
- state == IBLND_CONN_PASSIVE_WAIT);
+ state == IBLND_CONN_PASSIVE_WAIT);
conn->ibc_state = state;
/* 1 more conn */
@@ -830,29 +826,29 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
return conn;
failed_2:
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn, true);
failed_1:
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
failed_0:
return NULL;
}
-void kiblnd_destroy_conn(kib_conn_t *conn)
+void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
kib_peer_t *peer = conn->ibc_peer;
int rc;
LASSERT(!in_interrupt());
- LASSERT(atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT(!atomic_read(&conn->ibc_refcount));
LASSERT(list_empty(&conn->ibc_early_rxs));
LASSERT(list_empty(&conn->ibc_tx_noops));
LASSERT(list_empty(&conn->ibc_tx_queue));
LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
LASSERT(list_empty(&conn->ibc_active_txs));
- LASSERT(conn->ibc_noops_posted == 0);
- LASSERT(conn->ibc_nsends_posted == 0);
+ LASSERT(!conn->ibc_noops_posted);
+ LASSERT(!conn->ibc_nsends_posted);
switch (conn->ibc_state) {
default:
@@ -861,7 +857,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn)
case IBLND_CONN_DISCONNECTED:
/* connvars should have been freed already */
- LASSERT(conn->ibc_connvars == NULL);
+ LASSERT(!conn->ibc_connvars);
break;
case IBLND_CONN_INIT:
@@ -869,28 +865,27 @@ void kiblnd_destroy_conn(kib_conn_t *conn)
}
/* conn->ibc_cmid might be destroyed by CM already */
- if (cmid != NULL && cmid->qp != NULL)
+ if (cmid && cmid->qp)
rdma_destroy_qp(cmid);
- if (conn->ibc_cq != NULL) {
+ if (conn->ibc_cq) {
rc = ib_destroy_cq(conn->ibc_cq);
- if (rc != 0)
+ if (rc)
CWARN("Error destroying CQ: %d\n", rc);
}
- if (conn->ibc_rx_pages != NULL)
+ if (conn->ibc_rx_pages)
kiblnd_unmap_rx_descs(conn);
- if (conn->ibc_rxs != NULL) {
+ if (conn->ibc_rxs) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn->ibc_version)
- * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
}
- if (conn->ibc_connvars != NULL)
+ if (conn->ibc_connvars)
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
- if (conn->ibc_hdev != NULL)
+ if (conn->ibc_hdev)
kiblnd_hdev_decref(conn->ibc_hdev);
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
@@ -927,7 +922,7 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
}
int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
- int version, __u64 incarnation)
+ int version, __u64 incarnation)
{
kib_conn_t *conn;
struct list_head *ctmp;
@@ -967,20 +962,18 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (nid != LNET_NID_ANY)
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- else {
+ if (nid != LNET_NID_ANY) {
+ lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ } else {
lo = 0;
hi = kiblnd_data.kib_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-
peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns));
+ LASSERT(!kiblnd_peer_idle(peer));
if (peer->ibp_ni != ni)
continue;
@@ -998,10 +991,10 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
if (nid == LNET_NID_ANY)
return 0;
- return (count == 0) ? -ENOENT : 0;
+ return !count ? -ENOENT : 0;
}
-int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
int rc = -EINVAL;
@@ -1027,14 +1020,14 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
rc = 0;
conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
- if (conn == NULL) {
+ if (!conn) {
rc = -ENOENT;
break;
}
- LASSERT(conn->ibc_cmid != NULL);
+ LASSERT(conn->ibc_cmid);
data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (conn->ibc_cmid->route.path_rec == NULL)
+ if (!conn->ibc_cmid->route.path_rec)
data->ioc_u32[0] = 0; /* iWarp has no path MTU */
else
data->ioc_u32[0] =
@@ -1054,7 +1047,7 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
return rc;
}
-void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
+static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
{
unsigned long last_alive = 0;
unsigned long now = cfs_time_current();
@@ -1065,21 +1058,19 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
read_lock_irqsave(glock, flags);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
- LASSERT(peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !list_empty(&peer->ibp_conns)); /* active conn */
+ if (peer)
last_alive = peer->ibp_last_alive;
- }
read_unlock_irqrestore(glock, flags);
- if (last_alive != 0)
+ if (last_alive)
*when = last_alive;
- /* peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx */
- if (peer == NULL)
+ /*
+ * peer is not persistent in hash, trigger peer creation
+ * and connection establishment with a NULL tx
+ */
+ if (!peer)
kiblnd_launch_tx(ni, NULL, nid);
CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
@@ -1087,13 +1078,13 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
last_alive ? cfs_duration_sec(now - last_alive) : -1);
}
-void kiblnd_free_pages(kib_pages_t *p)
+static void kiblnd_free_pages(kib_pages_t *p)
{
int npages = p->ibp_npages;
int i;
for (i = 0; i < npages; i++) {
- if (p->ibp_pages[i] != NULL)
+ if (p->ibp_pages[i])
__free_page(p->ibp_pages[i]);
}
@@ -1107,7 +1098,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
offsetof(kib_pages_t, ibp_pages[npages]));
- if (p == NULL) {
+ if (!p) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
@@ -1119,7 +1110,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
p->ibp_pages[i] = alloc_pages_node(
cfs_cpt_spread_node(lnet_cpt_table(), cpt),
GFP_NOFS, 0);
- if (p->ibp_pages[i] == NULL) {
+ if (!p->ibp_pages[i]) {
CERROR("Can't allocate page %d of %d\n", i, npages);
kiblnd_free_pages(p);
return -ENOMEM;
@@ -1135,10 +1126,10 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
kib_rx_t *rx;
int i;
- LASSERT(conn->ibc_rxs != NULL);
- LASSERT(conn->ibc_hdev != NULL);
+ LASSERT(conn->ibc_rxs);
+ LASSERT(conn->ibc_hdev);
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
rx = &conn->ibc_rxs[i];
LASSERT(rx->rx_nob >= 0); /* not posted */
@@ -1162,7 +1153,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
int ipg;
int i;
- for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
+ for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
pg = conn->ibc_rx_pages->ibp_pages[ipg];
rx = &conn->ibc_rxs[i];
@@ -1174,7 +1165,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
IBLND_MSG_SIZE,
DMA_FROM_DEVICE);
LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msgaddr));
+ rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
@@ -1187,7 +1178,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
if (pg_off == PAGE_SIZE) {
pg_off = 0;
ipg++;
- LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
+ LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
}
}
}
@@ -1198,9 +1189,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
kib_tx_t *tx;
int i;
- LASSERT(tpo->tpo_pool.po_allocated == 0);
+ LASSERT(!tpo->tpo_pool.po_allocated);
- if (hdev == NULL)
+ if (!hdev)
return;
for (i = 0; i < tpo->tpo_pool.po_size; i++) {
@@ -1224,9 +1215,10 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
while (dev->ibd_failover) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (i++ % 50 == 0)
+ if (!(i++ % 50))
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1252,7 +1244,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
int ipage;
int i;
- LASSERT(net != NULL);
+ LASSERT(net);
dev = net->ibn_dev;
@@ -1260,7 +1252,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
/* No fancy arithmetic when we do the buffer calculations */
- CLASSERT(PAGE_SIZE % IBLND_MSG_SIZE == 0);
+ CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE));
tpo->tpo_hdev = kiblnd_current_hdev(dev);
@@ -1275,7 +1267,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
IBLND_MSG_SIZE, DMA_TO_DEVICE);
LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
- tx->tx_msgaddr));
+ tx->tx_msgaddr));
KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
list_add(&tx->tx_list, &pool->po_free_list);
@@ -1291,68 +1283,32 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
+struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+ int negotiated_nfrags)
{
- __u64 index;
-
- LASSERT(hdev->ibh_mrs[0] != NULL);
-
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- index = addr >> hdev->ibh_mr_shift;
+ __u16 nfrags = (negotiated_nfrags != -1) ?
+ negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
- if (index < hdev->ibh_nmrs &&
- index == ((addr + size - 1) >> hdev->ibh_mr_shift))
- return hdev->ibh_mrs[index];
-
- return NULL;
-}
-
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
-{
- struct ib_mr *prev_mr;
- struct ib_mr *mr;
- int i;
-
- LASSERT(hdev->ibh_mrs[0] != NULL);
+ LASSERT(hdev->ibh_mrs);
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
+ nfrags <= rd->rd_nfrags)
return NULL;
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- for (i = 0, mr = prev_mr = NULL;
- i < rd->rd_nfrags; i++) {
- mr = kiblnd_find_dma_mr(hdev,
- rd->rd_frags[i].rf_addr,
- rd->rd_frags[i].rf_nob);
- if (prev_mr == NULL)
- prev_mr = mr;
-
- if (mr == NULL || prev_mr != mr) {
- /* Can't covered by one single MR */
- mr = NULL;
- break;
- }
- }
-
- return mr;
+ return hdev->ibh_mrs;
}
static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
{
- LASSERT(pool->fpo_map_count == 0);
+ LASSERT(!pool->fpo_map_count);
- if (pool->fpo_fmr_pool != NULL)
+ if (pool->fpo_fmr_pool)
ib_destroy_fmr_pool(pool->fpo_fmr_pool);
- if (pool->fpo_hdev != NULL)
+ if (pool->fpo_hdev)
kiblnd_hdev_decref(pool->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+ LIBCFS_FREE(pool, sizeof(*pool));
}
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
@@ -1387,7 +1343,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
kib_dev_t *dev = fps->fps_net->ibn_dev;
kib_fmr_pool_t *fpo;
struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+ .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
.page_shift = PAGE_SHIFT,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE),
@@ -1399,7 +1355,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
- if (fpo == NULL)
+ if (!fpo)
return -ENOMEM;
fpo->fpo_hdev = kiblnd_current_hdev(dev);
@@ -1410,7 +1366,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
CERROR("Failed to create FMR pool: %d\n", rc);
kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
return rc;
}
@@ -1424,7 +1380,7 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
struct list_head *zombies)
{
- if (fps->fps_net == NULL) /* intialized? */
+ if (!fps->fps_net) /* intialized? */
return;
spin_lock(&fps->fps_lock);
@@ -1434,7 +1390,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
kib_fmr_pool_t, fpo_list);
fpo->fpo_failed = 1;
list_del(&fpo->fpo_list);
- if (fpo->fpo_map_count == 0)
+ if (!fpo->fpo_map_count)
list_add(&fpo->fpo_list, zombies);
else
list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
@@ -1445,7 +1401,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
{
- if (fps->fps_net != NULL) { /* initialized? */
+ if (fps->fps_net) { /* initialized? */
kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
}
@@ -1458,7 +1414,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
kib_fmr_pool_t *fpo;
int rc;
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
+ memset(fps, 0, sizeof(*fps));
fps->fps_net = net;
fps->fps_cpt = cpt;
@@ -1469,7 +1425,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
rc = kiblnd_create_fmr_pool(fps, &fpo);
- if (rc == 0)
+ if (!rc)
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
return rc;
@@ -1477,7 +1433,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
{
- if (fpo->fpo_map_count != 0) /* still in use */
+ if (fpo->fpo_map_count) /* still in use */
return 0;
if (fpo->fpo_failed)
return 1;
@@ -1494,11 +1450,11 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
int rc;
rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(rc == 0);
+ LASSERT(!rc);
- if (status != 0) {
+ if (status) {
rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(rc == 0);
+ LASSERT(!rc);
}
fmr->fmr_pool = NULL;
@@ -1563,11 +1519,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
if (fps->fps_increasing) {
spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET,
- "Another thread is allocating new FMR pool, waiting for her to complete\n");
+ CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
schedule();
goto again;
-
}
if (time_before(cfs_time_current(), fps->fps_next_retry)) {
@@ -1583,7 +1537,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
rc = kiblnd_create_fmr_pool(fps, &fpo);
spin_lock(&fps->fps_lock);
fps->fps_increasing = 0;
- if (rc == 0) {
+ if (!rc) {
fps->fps_version++;
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
@@ -1597,7 +1551,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
static void kiblnd_fini_pool(kib_pool_t *pool)
{
LASSERT(list_empty(&pool->po_free_list));
- LASSERT(pool->po_allocated == 0);
+ LASSERT(!pool->po_allocated);
CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
@@ -1606,7 +1560,7 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
{
CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
- memset(pool, 0, sizeof(kib_pool_t));
+ memset(pool, 0, sizeof(*pool));
INIT_LIST_HEAD(&pool->po_free_list);
pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
pool->po_owner = ps;
@@ -1621,14 +1575,14 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
pool = list_entry(head->next, kib_pool_t, po_list);
list_del(&pool->po_list);
- LASSERT(pool->po_owner != NULL);
+ LASSERT(pool->po_owner);
pool->po_owner->ps_pool_destroy(pool);
}
}
static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
{
- if (ps->ps_net == NULL) /* intialized? */
+ if (!ps->ps_net) /* intialized? */
return;
spin_lock(&ps->ps_lock);
@@ -1637,7 +1591,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
kib_pool_t, po_list);
po->po_failed = 1;
list_del(&po->po_list);
- if (po->po_allocated == 0)
+ if (!po->po_allocated)
list_add(&po->po_list, zombies);
else
list_add(&po->po_list, &ps->ps_failed_pool_list);
@@ -1647,7 +1601,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
static void kiblnd_fini_poolset(kib_poolset_t *ps)
{
- if (ps->ps_net != NULL) { /* initialized? */
+ if (ps->ps_net) { /* initialized? */
kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
kiblnd_destroy_pool_list(&ps->ps_pool_list);
}
@@ -1663,7 +1617,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
kib_pool_t *pool;
int rc;
- memset(ps, 0, sizeof(kib_poolset_t));
+ memset(ps, 0, sizeof(*ps));
ps->ps_cpt = cpt;
ps->ps_net = net;
@@ -1680,7 +1634,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
INIT_LIST_HEAD(&ps->ps_failed_pool_list);
rc = ps->ps_pool_create(ps, size, &pool);
- if (rc == 0)
+ if (!rc)
list_add(&pool->po_list, &ps->ps_pool_list);
else
CERROR("Failed to create the first pool for %s\n", ps->ps_name);
@@ -1690,7 +1644,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
{
- if (pool->po_allocated != 0) /* still in use */
+ if (pool->po_allocated) /* still in use */
return 0;
if (pool->po_failed)
return 1;
@@ -1706,7 +1660,7 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
spin_lock(&ps->ps_lock);
- if (ps->ps_node_fini != NULL)
+ if (ps->ps_node_fini)
ps->ps_node_fini(pool, node);
LASSERT(pool->po_allocated > 0);
@@ -1731,6 +1685,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
{
struct list_head *node;
kib_pool_t *pool;
+ unsigned int interval = 1;
+ unsigned long time_before;
+ unsigned int trips = 0;
int rc;
again:
@@ -1744,7 +1701,7 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
node = pool->po_free_list.next;
list_del(node);
- if (ps->ps_node_init != NULL) {
+ if (ps->ps_node_init) {
/* still hold the lock */
ps->ps_node_init(pool, node);
}
@@ -1756,9 +1713,15 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
spin_unlock(&ps->ps_lock);
- CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting for her to complete\n",
- ps->ps_name);
- schedule();
+ trips++;
+ CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n",
+ ps->ps_name, interval, trips);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(interval);
+ if (interval < cfs_time_seconds(1))
+ interval *= 2;
+
goto again;
}
@@ -1772,12 +1735,14 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
-
+ time_before = cfs_time_current();
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+ CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
+ cfs_time_current() - time_before);
spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
- if (rc == 0) {
+ if (!rc) {
list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
@@ -1794,37 +1759,37 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
int i;
- LASSERT(pool->po_allocated == 0);
+ LASSERT(!pool->po_allocated);
- if (tpo->tpo_tx_pages != NULL) {
+ if (tpo->tpo_tx_pages) {
kiblnd_unmap_tx_pool(tpo);
kiblnd_free_pages(tpo->tpo_tx_pages);
}
- if (tpo->tpo_tx_descs == NULL)
+ if (!tpo->tpo_tx_descs)
goto out;
for (i = 0; i < pool->po_size; i++) {
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
list_del(&tx->tx_list);
- if (tx->tx_pages != NULL)
+ if (tx->tx_pages)
LIBCFS_FREE(tx->tx_pages,
LNET_MAX_IOV *
sizeof(*tx->tx_pages));
- if (tx->tx_frags != NULL)
+ if (tx->tx_frags)
LIBCFS_FREE(tx->tx_frags,
IBLND_MAX_RDMA_FRAGS *
sizeof(*tx->tx_frags));
- if (tx->tx_wrq != NULL)
+ if (tx->tx_wrq)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
- if (tx->tx_sge != NULL)
+ if (tx->tx_sge)
LIBCFS_FREE(tx->tx_sge,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_sge));
- if (tx->tx_rd != NULL)
+ if (tx->tx_rd)
LIBCFS_FREE(tx->tx_rd,
offsetof(kib_rdma_desc_t,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
@@ -1834,7 +1799,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
pool->po_size * sizeof(kib_tx_t));
out:
kiblnd_fini_pool(pool);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ LIBCFS_FREE(tpo, sizeof(*tpo));
}
static int kiblnd_tx_pool_size(int ncpts)
@@ -1853,7 +1818,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
kib_tx_pool_t *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
- if (tpo == NULL) {
+ if (!tpo) {
CERROR("Failed to allocate TX pool\n");
return -ENOMEM;
}
@@ -1864,15 +1829,15 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
tpo->tpo_tx_pages = NULL;
npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
- if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
+ if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
CERROR("Can't allocate tx pages: %d\n", npg);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ LIBCFS_FREE(tpo, sizeof(*tpo));
return -ENOMEM;
}
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
size * sizeof(kib_tx_t));
- if (tpo->tpo_tx_descs == NULL) {
+ if (!tpo->tpo_tx_descs) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
return -ENOMEM;
@@ -1884,17 +1849,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
tx->tx_pool = tpo;
- if (ps->ps_net->ibn_fmr_ps != NULL) {
+ if (ps->ps_net->ibn_fmr_ps) {
LIBCFS_CPT_ALLOC(tx->tx_pages,
lnet_cpt_table(), ps->ps_cpt,
LNET_MAX_IOV * sizeof(*tx->tx_pages));
- if (tx->tx_pages == NULL)
+ if (!tx->tx_pages)
break;
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
- if (tx->tx_frags == NULL)
+ if (!tx->tx_frags)
break;
sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
@@ -1902,19 +1867,19 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
- if (tx->tx_wrq == NULL)
+ if (!tx->tx_wrq)
break;
LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_sge));
- if (tx->tx_sge == NULL)
+ if (!tx->tx_sge)
break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
offsetof(kib_rdma_desc_t,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
- if (tx->tx_rd == NULL)
+ if (!tx->tx_rd)
break;
}
@@ -1945,23 +1910,23 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
kib_tx_poolset_t *tps;
kib_fmr_poolset_t *fps;
- if (net->ibn_tx_ps != NULL) {
+ if (net->ibn_tx_ps) {
tps = net->ibn_tx_ps[i];
kiblnd_fini_poolset(&tps->tps_poolset);
}
- if (net->ibn_fmr_ps != NULL) {
+ if (net->ibn_fmr_ps) {
fps = net->ibn_fmr_ps[i];
kiblnd_fini_fmr_poolset(fps);
}
}
- if (net->ibn_tx_ps != NULL) {
+ if (net->ibn_tx_ps) {
cfs_percpt_free(net->ibn_tx_ps);
net->ibn_tx_ps = NULL;
}
- if (net->ibn_fmr_ps != NULL) {
+ if (net->ibn_fmr_ps) {
cfs_percpt_free(net->ibn_fmr_ps);
net->ibn_fmr_ps = NULL;
}
@@ -1975,8 +1940,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
int i;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0 &&
- net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
+ if (!*kiblnd_tunables.kib_map_on_demand) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
goto create_tx_pool;
}
@@ -1996,7 +1960,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
* TX pool must be created later than FMR, see LU-2268
* for details
*/
- LASSERT(net->ibn_tx_ps == NULL);
+ LASSERT(!net->ibn_tx_ps);
/*
* premapping can fail if ibd_nmr > 1, so we always create
@@ -2005,56 +1969,45 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
- if (net->ibn_fmr_ps == NULL) {
+ if (!net->ibn_fmr_ps) {
CERROR("Failed to allocate FMR pool array\n");
rc = -ENOMEM;
goto failed;
}
for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
+ cpt = !cpts ? i : cpts[i];
rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
kiblnd_fmr_pool_size(ncpts),
kiblnd_fmr_flush_trigger(ncpts));
- if (rc == -ENOSYS && i == 0) /* no FMR */
- break;
-
- if (rc != 0) { /* a real error */
+ if (rc) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
goto failed;
}
}
- if (i > 0) {
+ if (i > 0)
LASSERT(i == ncpts);
- goto create_tx_pool;
- }
-
- cfs_percpt_free(net->ibn_fmr_ps);
- net->ibn_fmr_ps = NULL;
-
- CWARN("Device does not support FMR\n");
- goto failed;
create_tx_pool:
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_tx_poolset_t));
- if (net->ibn_tx_ps == NULL) {
+ if (!net->ibn_tx_ps) {
CERROR("Failed to allocate tx pool array\n");
rc = -ENOMEM;
goto failed;
}
for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
+ cpt = !cpts ? i : cpts[i];
rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
cpt, net, "TX",
kiblnd_tx_pool_size(ncpts),
kiblnd_create_tx_pool,
kiblnd_destroy_tx_pool,
kiblnd_tx_init, NULL);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't initialize TX pool for CPT %d: %d\n",
cpt, rc);
goto failed;
@@ -2064,14 +2017,16 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
return 0;
failed:
kiblnd_net_fini_pools(net);
- LASSERT(rc != 0);
+ LASSERT(rc);
return rc;
}
static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
{
- /* It's safe to assume a HCA can handle a page size
- * matching that of the native system */
+ /*
+ * It's safe to assume a HCA can handle a page size
+ * matching that of the native system
+ */
hdev->ibh_page_shift = PAGE_SHIFT;
hdev->ibh_page_size = 1 << PAGE_SHIFT;
hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
@@ -2082,44 +2037,28 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
return 0;
}
- for (hdev->ibh_mr_shift = 0;
- hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift++) {
- if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
- hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
- return 0;
- }
-
CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
return -EINVAL;
}
static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
{
- int i;
-
- if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
+ if (!hdev->ibh_mrs)
return;
- for (i = 0; i < hdev->ibh_nmrs; i++) {
- if (hdev->ibh_mrs[i] == NULL)
- break;
+ ib_dereg_mr(hdev->ibh_mrs);
- ib_dereg_mr(hdev->ibh_mrs[i]);
- }
-
- LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- hdev->ibh_mrs = NULL;
- hdev->ibh_nmrs = 0;
+ hdev->ibh_mrs = NULL;
}
void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
{
kiblnd_hdev_cleanup_mrs(hdev);
- if (hdev->ibh_pd != NULL)
+ if (hdev->ibh_pd)
ib_dealloc_pd(hdev->ibh_pd);
- if (hdev->ibh_cmid != NULL)
+ if (hdev->ibh_cmid)
rdma_destroy_id(hdev->ibh_cmid);
LIBCFS_FREE(hdev, sizeof(*hdev));
@@ -2132,18 +2071,9 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
rc = kiblnd_hdev_get_attr(hdev);
- if (rc != 0)
+ if (rc)
return rc;
- LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs table\n");
- return -ENOMEM;
- }
-
- hdev->ibh_mrs[0] = NULL;
- hdev->ibh_nmrs = 1;
-
mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
if (IS_ERR(mr)) {
CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
@@ -2151,7 +2081,7 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
return PTR_ERR(mr);
}
- hdev->ibh_mrs[0] = mr;
+ hdev->ibh_mrs = mr;
return 0;
}
@@ -2170,12 +2100,13 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
struct sockaddr_in dstaddr;
int rc;
- if (dev->ibd_hdev == NULL || /* initializing */
- dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */
+ if (!dev->ibd_hdev || /* initializing */
+ !dev->ibd_hdev->ibh_cmid || /* listener is dead */
*kiblnd_tunables.kib_dev_failover > 1) /* debugging */
return 1;
- /* XXX: it's UGLY, but I don't have better way to find
+ /*
+ * XXX: it's UGLY, but I don't have better way to find
* ib-bonding HCA failover because:
*
* a. no reliable CM event for HCA failover...
@@ -2184,7 +2115,8 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
* We have only two choices at this point:
*
* a. rdma_bind_addr(), it will conflict with listener cmid
- * b. rdma_resolve_addr() to zero addr */
+ * b. rdma_resolve_addr() to zero addr
+ */
cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
IB_QPT_RC);
if (IS_ERR(cmid)) {
@@ -2201,7 +2133,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
dstaddr.sin_family = AF_INET;
rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
(struct sockaddr *)&dstaddr, 1);
- if (rc != 0 || cmid->device == NULL) {
+ if (rc || !cmid->device) {
CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
@@ -2230,24 +2162,27 @@ int kiblnd_dev_failover(kib_dev_t *dev)
int i;
LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
- dev->ibd_can_failover ||
- dev->ibd_hdev == NULL);
+ dev->ibd_can_failover || !dev->ibd_hdev);
rc = kiblnd_dev_need_failover(dev);
if (rc <= 0)
goto out;
- if (dev->ibd_hdev != NULL &&
- dev->ibd_hdev->ibh_cmid != NULL) {
- /* XXX it's not good to close old listener at here,
+ if (dev->ibd_hdev &&
+ dev->ibd_hdev->ibh_cmid) {
+ /*
+ * XXX it's not good to close old listener at here,
* because we can fail to create new listener.
* But we have to close it now, otherwise rdma_bind_addr
- * will return EADDRINUSE... How crap! */
+ * will return EADDRINUSE... How crap!
+ */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
cmid = dev->ibd_hdev->ibh_cmid;
- /* make next schedule of kiblnd_dev_need_failover()
- * return 1 for me */
+ /*
+ * make next schedule of kiblnd_dev_need_failover()
+ * return 1 for me
+ */
dev->ibd_hdev->ibh_cmid = NULL;
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
@@ -2269,7 +2204,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
/* Bind to failover device or port */
rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
- if (rc != 0 || cmid->device == NULL) {
+ if (rc || !cmid->device) {
CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
@@ -2278,7 +2213,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
}
LIBCFS_ALLOC(hdev, sizeof(*hdev));
- if (hdev == NULL) {
+ if (!hdev) {
CERROR("Failed to allocate kib_hca_dev\n");
rdma_destroy_id(cmid);
rc = -ENOMEM;
@@ -2300,13 +2235,13 @@ int kiblnd_dev_failover(kib_dev_t *dev)
hdev->ibh_pd = pd;
rc = rdma_listen(cmid, 0);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't start new listener: %d\n", rc);
goto out;
}
rc = kiblnd_hdev_setup_mrs(hdev);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup device: %d\n", rc);
goto out;
}
@@ -2334,10 +2269,10 @@ int kiblnd_dev_failover(kib_dev_t *dev)
kiblnd_destroy_pool_list(&zombie_ppo);
if (!list_empty(&zombie_fpo))
kiblnd_destroy_fmr_pool_list(&zombie_fpo);
- if (hdev != NULL)
+ if (hdev)
kiblnd_hdev_decref(hdev);
- if (rc != 0)
+ if (rc)
dev->ibd_failed_failover++;
else
dev->ibd_failed_failover = 0;
@@ -2347,13 +2282,13 @@ int kiblnd_dev_failover(kib_dev_t *dev)
void kiblnd_destroy_dev(kib_dev_t *dev)
{
- LASSERT(dev->ibd_nnets == 0);
+ LASSERT(!dev->ibd_nnets);
LASSERT(list_empty(&dev->ibd_nets));
list_del(&dev->ibd_fail_list);
list_del(&dev->ibd_list);
- if (dev->ibd_hdev != NULL)
+ if (dev->ibd_hdev)
kiblnd_hdev_decref(dev->ibd_hdev);
LIBCFS_FREE(dev, sizeof(*dev));
@@ -2369,7 +2304,7 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
int rc;
rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't query IPoIB interface %s: %d\n",
ifname, rc);
return NULL;
@@ -2381,11 +2316,11 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
}
LIBCFS_ALLOC(dev, sizeof(*dev));
- if (dev == NULL)
+ if (!dev)
return NULL;
netdev = dev_get_by_name(&init_net, ifname);
- if (netdev == NULL) {
+ if (!netdev) {
dev->ibd_can_failover = 0;
} else {
dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
@@ -2400,14 +2335,13 @@ static kib_dev_t *kiblnd_create_dev(char *ifname)
/* initialize the device */
rc = kiblnd_dev_failover(dev);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't initialize device: %d\n", rc);
LIBCFS_FREE(dev, sizeof(*dev));
return NULL;
}
- list_add_tail(&dev->ibd_list,
- &kiblnd_data.kib_devs);
+ list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
return dev;
}
@@ -2424,18 +2358,22 @@ static void kiblnd_base_shutdown(void)
case IBLND_INIT_ALL:
case IBLND_INIT_DATA:
- LASSERT(kiblnd_data.kib_peers != NULL);
+ LASSERT(kiblnd_data.kib_peers);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
+ LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
/* flag threads to terminate; wake and wait for them to die */
kiblnd_data.kib_shutdown = 1;
- /* NB: we really want to stop scheduler threads net by net
+ /*
+ * NB: we really want to stop scheduler threads net by net
* instead of the whole module, this should be improved
- * with dynamic configuration LNet */
+ * with dynamic configuration LNet
+ */
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
wake_up_all(&sched->ibs_waitq);
@@ -2443,7 +2381,7 @@ static void kiblnd_base_shutdown(void)
wake_up_all(&kiblnd_data.kib_failover_waitq);
i = 2;
- while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
+ while (atomic_read(&kiblnd_data.kib_nthreads)) {
i++;
/* power of 2 ? */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
@@ -2459,20 +2397,20 @@ static void kiblnd_base_shutdown(void)
break;
}
- if (kiblnd_data.kib_peers != NULL) {
+ if (kiblnd_data.kib_peers) {
LIBCFS_FREE(kiblnd_data.kib_peers,
sizeof(struct list_head) *
kiblnd_data.kib_peer_hash_size);
}
- if (kiblnd_data.kib_scheds != NULL)
+ if (kiblnd_data.kib_scheds)
cfs_percpt_free(kiblnd_data.kib_scheds);
kiblnd_data.kib_init = IBLND_INIT_NOTHING;
module_put(THIS_MODULE);
}
-void kiblnd_shutdown(lnet_ni_t *ni)
+static void kiblnd_shutdown(lnet_ni_t *ni)
{
kib_net_t *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
@@ -2481,7 +2419,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
- if (net == NULL)
+ if (!net)
goto out;
write_lock_irqsave(g_lock, flags);
@@ -2498,7 +2436,7 @@ void kiblnd_shutdown(lnet_ni_t *ni)
/* Wait for all peer state to clean up */
i = 2;
- while (atomic_read(&net->ibn_npeers) != 0) {
+ while (atomic_read(&net->ibn_npeers)) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
"%s: waiting for %d peers to disconnect\n",
@@ -2519,10 +2457,9 @@ void kiblnd_shutdown(lnet_ni_t *ni)
/* fall through */
case IBLND_INIT_NOTHING:
- LASSERT(atomic_read(&net->ibn_nconns) == 0);
+ LASSERT(!atomic_read(&net->ibn_nconns));
- if (net->ibn_dev != NULL &&
- net->ibn_dev->ibd_nnets == 0)
+ if (net->ibn_dev && !net->ibn_dev->ibd_nnets)
kiblnd_destroy_dev(net->ibn_dev);
break;
@@ -2558,7 +2495,7 @@ static int kiblnd_base_startup(void)
kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
LIBCFS_ALLOC(kiblnd_data.kib_peers,
sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
- if (kiblnd_data.kib_peers == NULL)
+ if (!kiblnd_data.kib_peers)
goto failed;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
@@ -2566,12 +2503,15 @@ static int kiblnd_base_startup(void)
spin_lock_init(&kiblnd_data.kib_connd_lock);
INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
+ INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
+
init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*sched));
- if (kiblnd_data.kib_scheds == NULL)
+ if (!kiblnd_data.kib_scheds)
goto failed;
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
@@ -2585,8 +2525,10 @@ static int kiblnd_base_startup(void)
if (*kiblnd_tunables.kib_nscheds > 0) {
nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
} else {
- /* max to half of CPUs, another half is reserved for
- * upper layer modules */
+ /*
+ * max to half of CPUs, another half is reserved for
+ * upper layer modules
+ */
nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
}
@@ -2601,16 +2543,16 @@ static int kiblnd_base_startup(void)
/*****************************************************/
rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
- if (rc != 0) {
+ if (rc) {
CERROR("Can't spawn o2iblnd connd: %d\n", rc);
goto failed;
}
- if (*kiblnd_tunables.kib_dev_failover != 0)
+ if (*kiblnd_tunables.kib_dev_failover)
rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
"kiblnd_failover");
- if (rc != 0) {
+ if (rc) {
CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
goto failed;
}
@@ -2632,7 +2574,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
int nthrs;
int i;
- if (sched->ibs_nthreads == 0) {
+ if (!sched->ibs_nthreads) {
if (*kiblnd_tunables.kib_nscheds > 0) {
nthrs = sched->ibs_nthreads_max;
} else {
@@ -2655,7 +2597,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
- if (rc == 0)
+ if (!rc)
continue;
CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
@@ -2677,14 +2619,14 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
for (i = 0; i < ncpts; i++) {
struct kib_sched_info *sched;
- cpt = (cpts == NULL) ? i : cpts[i];
+ cpt = !cpts ? i : cpts[i];
sched = kiblnd_data.kib_scheds[cpt];
if (!newdev && sched->ibs_nthreads > 0)
continue;
rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to start scheduler threads for %s\n",
dev->ibd_ifname);
return rc;
@@ -2702,30 +2644,30 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
colon = strchr(ifname, ':');
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+ if (!strcmp(&dev->ibd_ifname[0], ifname))
return dev;
- if (alias != NULL)
+ if (alias)
continue;
colon2 = strchr(dev->ibd_ifname, ':');
- if (colon != NULL)
+ if (colon)
*colon = 0;
- if (colon2 != NULL)
+ if (colon2)
*colon2 = 0;
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
+ if (!strcmp(&dev->ibd_ifname[0], ifname))
alias = dev;
- if (colon != NULL)
+ if (colon)
*colon = ':';
- if (colon2 != NULL)
+ if (colon2)
*colon2 = ':';
}
return alias;
}
-int kiblnd_startup(lnet_ni_t *ni)
+static int kiblnd_startup(lnet_ni_t *ni)
{
char *ifname;
kib_dev_t *ibdev = NULL;
@@ -2739,13 +2681,13 @@ int kiblnd_startup(lnet_ni_t *ni)
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
rc = kiblnd_base_startup();
- if (rc != 0)
+ if (rc)
return rc;
}
LIBCFS_ALLOC(net, sizeof(*net));
ni->ni_data = net;
- if (net == NULL)
+ if (!net)
goto net_failed;
ktime_get_real_ts64(&tv);
@@ -2757,11 +2699,11 @@ int kiblnd_startup(lnet_ni_t *ni)
ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
- if (ni->ni_interfaces[0] != NULL) {
+ if (ni->ni_interfaces[0]) {
/* Use the IPoIB interface specified in 'networks=' */
CLASSERT(LNET_MAX_INTERFACES > 1);
- if (ni->ni_interfaces[1] != NULL) {
+ if (ni->ni_interfaces[1]) {
CERROR("Multiple interfaces not supported\n");
goto failed;
}
@@ -2778,12 +2720,12 @@ int kiblnd_startup(lnet_ni_t *ni)
ibdev = kiblnd_dev_search(ifname);
- newdev = ibdev == NULL;
+ newdev = !ibdev;
/* hmm...create kib_dev even for alias */
- if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
+ if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname))
ibdev = kiblnd_create_dev(ifname);
- if (ibdev == NULL)
+ if (!ibdev)
goto failed;
net->ibn_dev = ibdev;
@@ -2791,11 +2733,11 @@ int kiblnd_startup(lnet_ni_t *ni)
rc = kiblnd_dev_start_threads(ibdev, newdev,
ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0)
+ if (rc)
goto failed;
rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0) {
+ if (rc) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
}
@@ -2810,7 +2752,7 @@ int kiblnd_startup(lnet_ni_t *ni)
return 0;
failed:
- if (net->ibn_dev == NULL && ibdev != NULL)
+ if (!net->ibn_dev && ibdev)
kiblnd_destroy_dev(ibdev);
net_failed:
@@ -2820,25 +2762,35 @@ net_failed:
return -ENETDOWN;
}
-static void __exit kiblnd_module_fini(void)
+static lnd_t the_o2iblnd = {
+ .lnd_type = O2IBLND,
+ .lnd_startup = kiblnd_startup,
+ .lnd_shutdown = kiblnd_shutdown,
+ .lnd_ctl = kiblnd_ctl,
+ .lnd_query = kiblnd_query,
+ .lnd_send = kiblnd_send,
+ .lnd_recv = kiblnd_recv,
+};
+
+static void __exit ko2iblnd_exit(void)
{
lnet_unregister_lnd(&the_o2iblnd);
}
-static int __init kiblnd_module_init(void)
+static int __init ko2iblnd_init(void)
{
int rc;
CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
- ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
- ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
rc = kiblnd_tunables_init();
- if (rc != 0)
+ if (rc)
return rc;
lnet_register_lnd(&the_o2iblnd);
@@ -2847,8 +2799,9 @@ static int __init kiblnd_module_init(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
+MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
+MODULE_VERSION("2.7.0");
MODULE_LICENSE("GPL");
-module_init(kiblnd_module_init);
-module_exit(kiblnd_module_fini);
+module_init(ko2iblnd_init);
+module_exit(ko2iblnd_exit);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index 025faa9f8..bfcbdd167 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -60,17 +60,17 @@
#include <net/sock.h>
#include <linux/in.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/ib_cm.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_fmr_pool.h>
+
#define DEBUG_SUBSYSTEM S_LND
#include "../../../include/linux/libcfs/libcfs.h"
#include "../../../include/linux/lnet/lnet.h"
#include "../../../include/linux/lnet/lib-lnet.h"
-#include <rdma/rdma_cm.h>
-#include <rdma/ib_cm.h>
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_fmr_pool.h>
-
#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
/* # scheduler loops before reschedule */
#define IBLND_RESCHED 100
@@ -146,9 +146,9 @@ kiblnd_concurrent_sends_v1(void)
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
-#define IBLND_MSG_SIZE (4<<10) /* max size of queued messages (inc hdr) */
+#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand != 0 ? \
+#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
*kiblnd_tunables.kib_map_on_demand : \
IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
@@ -162,18 +162,17 @@ kiblnd_concurrent_sends_v1(void)
#define IBLND_FMR_POOL 256
#define IBLND_FMR_POOL_FLUSH 192
-/* TX messages (shared by all connections) */
-#define IBLND_TX_MSGS() (*kiblnd_tunables.kib_ntx)
-
-/* RX messages (per connection) */
-#define IBLND_RX_MSGS(v) (IBLND_MSG_QUEUE_SIZE(v) * 2 + IBLND_OOB_MSGS(v))
-#define IBLND_RX_MSG_BYTES(v) (IBLND_RX_MSGS(v) * IBLND_MSG_SIZE)
-#define IBLND_RX_MSG_PAGES(v) ((IBLND_RX_MSG_BYTES(v) + PAGE_SIZE - 1) / PAGE_SIZE)
+#define IBLND_RX_MSGS(c) \
+ ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
+#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
+#define IBLND_RX_MSG_PAGES(c) \
+ ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
/* WRs and CQEs (per connection) */
-#define IBLND_RECV_WRS(v) IBLND_RX_MSGS(v)
-#define IBLND_SEND_WRS(v) ((IBLND_RDMA_FRAGS(v) + 1) * IBLND_CONCURRENT_SENDS(v))
-#define IBLND_CQ_ENTRIES(v) (IBLND_RECV_WRS(v) + IBLND_SEND_WRS(v))
+#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
+#define IBLND_SEND_WRS(c) \
+ ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version))
+#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
@@ -209,8 +208,7 @@ typedef struct kib_hca_dev {
__u64 ibh_page_mask; /* page mask of current HCA */
int ibh_mr_shift; /* bits shift of max MR size */
__u64 ibh_mr_size; /* size of MR */
- int ibh_nmrs; /* # of global MRs */
- struct ib_mr **ibh_mrs; /* global MR */
+ struct ib_mr *ibh_mrs; /* global MR */
struct ib_pd *ibh_pd; /* PD */
kib_dev_t *ibh_dev; /* owner */
atomic_t ibh_ref; /* refcount */
@@ -350,6 +348,16 @@ typedef struct {
void *kib_connd; /* the connd task (serialisation assertions) */
struct list_head kib_connd_conns; /* connections to setup/teardown */
struct list_head kib_connd_zombies; /* connections with zero refcount */
+ /* connections to reconnect */
+ struct list_head kib_reconn_list;
+ /* peers wait for reconnection */
+ struct list_head kib_reconn_wait;
+ /**
+ * The second that peers are pulled out from \a kib_reconn_wait
+ * for reconnection.
+ */
+ time64_t kib_reconn_sec;
+
wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
@@ -465,10 +473,10 @@ typedef struct {
#define IBLND_REJECT_FATAL 3 /* Anything else */
#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
-#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match */
- /* mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't */
- /* match mine */
+/* peer's rdma frags doesn't match mine */
+#define IBLND_REJECT_RDMA_FRAGS 6
+/* peer's msg queue size doesn't match mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7
/***********************************************************************/
@@ -527,6 +535,8 @@ typedef struct kib_conn {
struct list_head ibc_list; /* stash on peer's conn list */
struct list_head ibc_sched_list; /* schedule for attention */
__u16 ibc_version; /* version of connection */
+ /* reconnect later */
+ __u16 ibc_reconnect:1;
__u64 ibc_incarnation; /* which instance of the peer */
atomic_t ibc_refcount; /* # users */
int ibc_state; /* what's happening */
@@ -536,6 +546,10 @@ typedef struct kib_conn {
int ibc_outstanding_credits; /* # credits to return */
int ibc_reserved_credits; /* # ACK/DONE msg credits */
int ibc_comms_error; /* set on comms error */
+ /* connections queue depth */
+ __u16 ibc_queue_depth;
+ /* connections max frags */
+ __u16 ibc_max_frags;
unsigned int ibc_nrx:16; /* receive buffers owned */
unsigned int ibc_scheduled:1; /* scheduled for attention */
unsigned int ibc_ready:1; /* CQ callback fired */
@@ -572,18 +586,29 @@ typedef struct kib_peer {
struct list_head ibp_list; /* stash on global peer list */
lnet_nid_t ibp_nid; /* who's on the other end(s) */
lnet_ni_t *ibp_ni; /* LNet interface */
- atomic_t ibp_refcount; /* # users */
struct list_head ibp_conns; /* all active connections */
struct list_head ibp_tx_queue; /* msgs waiting for a conn */
- __u16 ibp_version; /* version of peer */
__u64 ibp_incarnation; /* incarnation of peer */
- int ibp_connecting; /* current active connection attempts
- */
- int ibp_accepting; /* current passive connection attempts
- */
- int ibp_error; /* errno on closing this peer */
- unsigned long ibp_last_alive; /* when (in jiffies) I was last alive
- */
+ /* when (in jiffies) I was last alive */
+ unsigned long ibp_last_alive;
+ /* # users */
+ atomic_t ibp_refcount;
+ /* version of peer */
+ __u16 ibp_version;
+ /* current passive connection attempts */
+ unsigned short ibp_accepting;
+ /* current active connection attempts */
+ unsigned short ibp_connecting;
+ /* reconnect this peer later */
+ unsigned short ibp_reconnecting:1;
+ /* # consecutive reconnection attempts to this peer */
+ unsigned int ibp_reconnected;
+ /* errno on closing this peer */
+ int ibp_error;
+ /* max map_on_demand */
+ __u16 ibp_max_frags;
+ /* max_peer_credits */
+ __u16 ibp_queue_depth;
} kib_peer_t;
extern kib_data_t kiblnd_data;
@@ -611,7 +636,7 @@ kiblnd_dev_can_failover(kib_dev_t *dev)
if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
return 0;
- if (*kiblnd_tunables.kib_dev_failover == 0) /* disabled */
+ if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
return 0;
if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
@@ -661,6 +686,20 @@ do { \
kiblnd_destroy_peer(peer); \
} while (0)
+static inline bool
+kiblnd_peer_connecting(kib_peer_t *peer)
+{
+ return peer->ibp_connecting ||
+ peer->ibp_reconnecting ||
+ peer->ibp_accepting;
+}
+
+static inline bool
+kiblnd_peer_idle(kib_peer_t *peer)
+{
+ return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
+}
+
static inline struct list_head *
kiblnd_nid2peerlist(lnet_nid_t nid)
{
@@ -691,7 +730,8 @@ kiblnd_send_keepalive(kib_conn_t *conn)
{
return (*kiblnd_tunables.kib_keepalive > 0) &&
cfs_time_after(jiffies, conn->ibc_last_send +
- *kiblnd_tunables.kib_keepalive*HZ);
+ msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
+ MSEC_PER_SEC));
}
static inline int
@@ -710,16 +750,16 @@ kiblnd_need_noop(kib_conn_t *conn)
/* No tx to piggyback NOOP onto or no credit to send a tx */
return (list_empty(&conn->ibc_tx_queue) ||
- conn->ibc_credits == 0);
+ !conn->ibc_credits);
}
if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
!list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
- conn->ibc_credits == 0) /* no credit */
+ !conn->ibc_credits) /* no credit */
return 0;
if (conn->ibc_credits == 1 && /* last credit reserved for */
- conn->ibc_outstanding_credits == 0) /* giving back credits */
+ !conn->ibc_outstanding_credits) /* giving back credits */
return 0;
/* No tx to piggyback NOOP onto or no credit to send a tx */
@@ -755,18 +795,19 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
/* lowest bits of the work request id to stash the work item type. */
-#define IBLND_WID_TX 0
-#define IBLND_WID_RDMA 1
-#define IBLND_WID_RX 2
-#define IBLND_WID_MASK 3UL
+#define IBLND_WID_INVAL 0
+#define IBLND_WID_TX 1
+#define IBLND_WID_RX 2
+#define IBLND_WID_RDMA 3
+#define IBLND_WID_MASK 3UL
static inline __u64
kiblnd_ptr2wreqid(void *ptr, int type)
{
unsigned long lptr = (unsigned long)ptr;
- LASSERT((lptr & IBLND_WID_MASK) == 0);
- LASSERT((type & ~IBLND_WID_MASK) == 0);
+ LASSERT(!(lptr & IBLND_WID_MASK));
+ LASSERT(!(type & ~IBLND_WID_MASK));
return (__u64)(lptr | type);
}
@@ -907,9 +948,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd);
-struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
- __u64 addr, __u64 size);
+ kib_rdma_desc_t *rd,
+ int negotiated_nfrags);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
@@ -919,11 +959,6 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
int npages, __u64 iov, kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_startup(lnet_ni_t *ni);
-void kiblnd_shutdown(lnet_ni_t *ni);
-int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg);
-void kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
-
int kiblnd_tunables_init(void);
void kiblnd_tunables_fini(void);
@@ -933,7 +968,6 @@ int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
int kiblnd_failover_thread(void *arg);
int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
-void kiblnd_free_pages(kib_pages_t *p);
int kiblnd_cm_callback(struct rdma_cm_id *cmid,
struct rdma_cm_event *event);
@@ -942,39 +976,30 @@ int kiblnd_translate_mtu(int value);
int kiblnd_dev_failover(kib_dev_t *dev);
int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
void kiblnd_destroy_peer(kib_peer_t *peer);
+bool kiblnd_reconnect_peer(kib_peer_t *peer);
void kiblnd_destroy_dev(kib_dev_t *dev);
void kiblnd_unlink_peer_locked(kib_peer_t *peer);
-void kiblnd_peer_alive(kib_peer_t *peer);
kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
-void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
- int version, __u64 incarnation);
+ int version, __u64 incarnation);
int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
-void kiblnd_connreq_done(kib_conn_t *conn, int status);
kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version);
-void kiblnd_destroy_conn(kib_conn_t *conn);
+ int state, int version);
+void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
void kiblnd_close_conn(kib_conn_t *conn, int error);
void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
-int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int nob, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-
void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
-void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob);
void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
- int status);
-void kiblnd_check_sends (kib_conn_t *conn);
+ int status);
void kiblnd_qp_event(struct ib_event *event, void *arg);
void kiblnd_cq_event(struct ib_event *event, void *arg);
void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp);
+ int credits, lnet_nid_t dstnid, __u64 dststamp);
int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
int kiblnd_post_rx(kib_rx_t *rx, int credit);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index c7b9ccb13..2323e8d3a 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -40,6 +40,15 @@
#include "o2iblnd.h"
+static void kiblnd_peer_alive(kib_peer_t *peer);
+static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+static void kiblnd_check_sends(kib_conn_t *conn);
+static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx,
+ int type, int body_nob);
+static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
static void
@@ -50,12 +59,12 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
int rc;
int i;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(!in_interrupt());
LASSERT(!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */
+ LASSERT(!tx->tx_sending); /* mustn't be awaiting sent callback */
LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */
- LASSERT(tx->tx_pool != NULL);
+ LASSERT(tx->tx_pool);
kiblnd_unmap_tx(ni, tx);
@@ -64,7 +73,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
rc = tx->tx_status;
- if (tx->tx_conn != NULL) {
+ if (tx->tx_conn) {
LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni);
kiblnd_conn_decref(tx->tx_conn);
@@ -78,7 +87,7 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
/* delay finalize until my descs have been freed */
for (i = 0; i < 2; i++) {
- if (lntmsg[i] == NULL)
+ if (!lntmsg[i])
continue;
lnet_finalize(ni, lntmsg[i], rc);
@@ -111,19 +120,19 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
node = kiblnd_pool_alloc_node(&tps->tps_poolset);
- if (node == NULL)
+ if (!node)
return NULL;
- tx = container_of(node, kib_tx_t, tx_list);
+ tx = list_entry(node, kib_tx_t, tx_list);
- LASSERT(tx->tx_nwrq == 0);
+ LASSERT(!tx->tx_nwrq);
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending == 0);
+ LASSERT(!tx->tx_sending);
LASSERT(!tx->tx_waiting);
- LASSERT(tx->tx_status == 0);
- LASSERT(tx->tx_conn == NULL);
- LASSERT(tx->tx_lntmsg[0] == NULL);
- LASSERT(tx->tx_lntmsg[1] == NULL);
- LASSERT(tx->tx_nfrags == 0);
+ LASSERT(!tx->tx_status);
+ LASSERT(!tx->tx_conn);
+ LASSERT(!tx->tx_lntmsg[0]);
+ LASSERT(!tx->tx_lntmsg[1]);
+ LASSERT(!tx->tx_nfrags);
return tx;
}
@@ -149,17 +158,15 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
kib_conn_t *conn = rx->rx_conn;
kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr;
+ struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
int rc;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(!in_interrupt());
LASSERT(credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
-
- mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE);
- LASSERT(mr != NULL);
+ LASSERT(mr);
rx->rx_sge.lkey = mr->lkey;
rx->rx_sge.addr = rx->rx_msgaddr;
@@ -185,7 +192,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
*/
kiblnd_conn_addref(conn);
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
- if (unlikely(rc != 0)) {
+ if (unlikely(rc)) {
CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
rx->rx_nob = 0;
@@ -194,7 +201,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
goto out;
- if (unlikely(rc != 0)) {
+ if (unlikely(rc)) {
kiblnd_close_conn(conn, rc);
kiblnd_drop_rx(rx); /* No more posts for this rx */
goto out;
@@ -225,7 +232,7 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
+ LASSERT(tx->tx_sending || tx->tx_waiting);
if (tx->tx_cookie != cookie)
continue;
@@ -251,7 +258,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
spin_lock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
- if (tx == NULL) {
+ if (!tx) {
spin_unlock(&conn->ibc_lock);
CWARN("Unmatched completion type %x cookie %#llx from %s\n",
@@ -260,7 +267,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
return;
}
- if (tx->tx_status == 0) { /* success so far */
+ if (!tx->tx_status) { /* success so far */
if (status < 0) /* failed? */
tx->tx_status = status;
else if (txtype == IBLND_MSG_GET_REQ)
@@ -269,7 +276,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
tx->tx_waiting = 0;
- idle = !tx->tx_queued && (tx->tx_sending == 0);
+ idle = !tx->tx_queued && !tx->tx_sending;
if (idle)
list_del(&tx->tx_list);
@@ -285,7 +292,7 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't get tx for completion %x for %s\n",
type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
return;
@@ -316,19 +323,18 @@ kiblnd_handle_rx(kib_rx_t *rx)
msg->ibm_type, credits,
libcfs_nid2str(conn->ibc_peer->ibp_nid));
- if (credits != 0) {
+ if (credits) {
/* Have I received credits that will let me send? */
spin_lock(&conn->ibc_lock);
if (conn->ibc_credits + credits >
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
+ conn->ibc_queue_depth) {
rc2 = conn->ibc_credits;
spin_unlock(&conn->ibc_lock);
CERROR("Bad credits from %s: %d + %d > %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc2, credits,
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+ rc2, credits, conn->ibc_queue_depth);
kiblnd_close_conn(conn, -EPROTO);
kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
@@ -360,7 +366,7 @@ kiblnd_handle_rx(kib_rx_t *rx)
break;
}
- if (credits != 0) /* credit already posted */
+ if (credits) /* credit already posted */
post_credit = IBLND_POSTRX_NO_CREDIT;
else /* a keepalive NOOP */
post_credit = IBLND_POSTRX_PEER_CREDIT;
@@ -396,12 +402,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
spin_lock(&conn->ibc_lock);
tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.putack.ibpam_src_cookie);
- if (tx != NULL)
+ msg->ibm_u.putack.ibpam_src_cookie);
+ if (tx)
list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Unmatched PUT_ACK from %s\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
rc = -EPROTO;
@@ -409,10 +415,11 @@ kiblnd_handle_rx(kib_rx_t *rx)
}
LASSERT(tx->tx_waiting);
- /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
+ /*
+ * CAVEAT EMPTOR: I could be racing with tx_complete, but...
* (a) I can overwrite tx_msg since my peer has received it!
- * (b) tx_waiting set tells tx_complete() it's not done. */
-
+ * (b) tx_waiting set tells tx_complete() it's not done.
+ */
tx->tx_nwrq = 0; /* overwrite PUT_REQ */
rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
@@ -469,7 +476,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
int rc;
int err = -EIO;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(rx->rx_nob < 0); /* was posted */
rx->rx_nob = 0; /* isn't now */
@@ -486,9 +493,9 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
rx->rx_nob = nob;
rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
goto failed;
}
@@ -497,7 +504,7 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
msg->ibm_srcstamp != conn->ibc_incarnation ||
msg->ibm_dststamp != net->ibn_incarnation) {
CERROR("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
err = -ESTALE;
goto failed;
}
@@ -537,7 +544,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page((void *)vaddr);
- LASSERT(page != NULL);
+ LASSERT(page);
return page;
}
#ifdef CONFIG_HIGHMEM
@@ -549,7 +556,7 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
#endif
page = virt_to_page(vaddr);
- LASSERT(page != NULL);
+ LASSERT(page);
return page;
}
@@ -565,8 +572,8 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
int rc;
int i;
- LASSERT(tx->tx_pool != NULL);
- LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+ LASSERT(tx->tx_pool);
+ LASSERT(tx->tx_pool->tpo_pool.po_owner);
hdev = tx->tx_pool->tpo_hdev;
@@ -582,13 +589,15 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
fps = net->ibn_fmr_ps[cpt];
rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't map %d pages: %d\n", npages, rc);
return rc;
}
- /* If rd is not tx_rd, it's going to get sent to a peer, who will need
- * the rkey */
+ /*
+ * If rd is not tx_rd, it's going to get sent to a peer, who will need
+ * the rkey
+ */
rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
tx->fmr.fmr_pfmr->fmr->lkey;
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
@@ -602,14 +611,14 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
{
kib_net_t *net = ni->ni_data;
- LASSERT(net != NULL);
+ LASSERT(net);
if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
tx->fmr.fmr_pfmr = NULL;
}
- if (tx->tx_nfrags != 0) {
+ if (tx->tx_nfrags) {
kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
@@ -625,8 +634,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
__u32 nob;
int i;
- /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
- * RDMA sink */
+ /*
+ * If rd is not tx_rd, it's going to get sent to a peer and I'm the
+ * RDMA sink
+ */
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
tx->tx_nfrags = nfrags;
@@ -641,15 +652,15 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
nob += rd->rd_frags[i].rf_nob;
}
- /* looking for pre-mapping MR */
- mr = kiblnd_find_rd_dma_mr(hdev, rd);
- if (mr != NULL) {
+ mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ?
+ tx->tx_conn->ibc_max_frags : -1);
+ if (mr) {
/* found pre-mapping MR */
rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
return 0;
}
- if (net->ibn_fmr_ps != NULL)
+ if (net->ibn_fmr_ps)
return kiblnd_fmr_map_tx(net, tx, rd, nob);
return -EINVAL;
@@ -668,7 +679,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
LASSERT(nob > 0);
LASSERT(niov > 0);
- LASSERT(net != NULL);
+ LASSERT(net);
while (offset >= iov->iov_len) {
offset -= iov->iov_len;
@@ -684,7 +695,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
vaddr = ((unsigned long)iov->iov_base) + offset;
page_offset = vaddr & (PAGE_SIZE - 1);
page = kiblnd_kvaddr_to_page(vaddr);
- if (page == NULL) {
+ if (!page) {
CERROR("Can't find page\n");
return -EFAULT;
}
@@ -710,7 +721,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
static int
kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+ int nkiov, lnet_kiov_t *kiov, int offset, int nob)
{
kib_net_t *net = ni->ni_data;
struct scatterlist *sg;
@@ -720,7 +731,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
LASSERT(nob > 0);
LASSERT(nkiov > 0);
- LASSERT(net != NULL);
+ LASSERT(net);
while (offset >= kiov->kiov_len) {
offset -= kiov->kiov_len;
@@ -750,26 +761,24 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
static int
kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
- __releases(conn->ibc_lock)
- __acquires(conn->ibc_lock)
+ __must_hold(&conn->ibc_lock)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
int ver = conn->ibc_version;
int rc;
int done;
- struct ib_send_wr *bad_wrq;
LASSERT(tx->tx_queued);
/* We rely on this for QP sizing */
LASSERT(tx->tx_nwrq > 0);
- LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
+ LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
- LASSERT(credit == 0 || credit == 1);
+ LASSERT(!credit || credit == 1);
LASSERT(conn->ibc_outstanding_credits >= 0);
- LASSERT(conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+ LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
LASSERT(conn->ibc_credits >= 0);
- LASSERT(conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+ LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
/* tx completions outstanding... */
@@ -778,13 +787,13 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
return -EAGAIN;
}
- if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
+ if (credit && !conn->ibc_credits) { /* no credits */
CDEBUG(D_NET, "%s: no credits\n",
libcfs_nid2str(peer->ibp_nid));
return -EAGAIN;
}
- if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
+ if (credit && !IBLND_OOB_CAPABLE(ver) &&
conn->ibc_credits == 1 && /* last credit reserved */
msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
CDEBUG(D_NET, "%s: not using last credit\n",
@@ -800,9 +809,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
(!kiblnd_need_noop(conn) || /* redundant NOOP */
(IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
- /* OK to drop when posted enough NOOPs, since
+ /*
+ * OK to drop when posted enough NOOPs, since
* kiblnd_check_sends will queue NOOP again when
- * posted NOOPs complete */
+ * posted NOOPs complete
+ */
spin_unlock(&conn->ibc_lock);
kiblnd_tx_done(peer->ibp_ni, tx);
spin_lock(&conn->ibc_lock);
@@ -821,12 +832,14 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
if (msg->ibm_type == IBLND_MSG_NOOP)
conn->ibc_noops_posted++;
- /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
+ /*
+ * CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
* PUT. If so, it was first queued here as a PUT_REQ, sent and
* stashed on ibc_active_txs, matched by an incoming PUT_ACK,
* and then re-queued here. It's (just) possible that
* tx_sending is non-zero if we've not done the tx_complete()
- * from the first send; hence the ++ rather than = below. */
+ * from the first send; hence the ++ rather than = below.
+ */
tx->tx_sending++;
list_add(&tx->tx_list, &conn->ibc_active_txs);
@@ -838,16 +851,25 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq);
+ struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+
+ LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ "bad wr_id %llx, opc %d, flags %d, peer: %s\n",
+ wrq->wr_id, wrq->opcode, wrq->send_flags,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ wrq = NULL;
+ rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq);
}
conn->ibc_last_send = jiffies;
- if (rc == 0)
+ if (!rc)
return 0;
- /* NB credits are transferred in the actual
- * message, which can only be the last work item */
+ /*
+ * NB credits are transferred in the actual
+ * message, which can only be the last work item
+ */
conn->ibc_credits += credit;
conn->ibc_outstanding_credits += msg->ibm_credits;
conn->ibc_nsends_posted--;
@@ -858,7 +880,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
tx->tx_waiting = 0;
tx->tx_sending--;
- done = (tx->tx_sending == 0);
+ done = !tx->tx_sending;
if (done)
list_del(&tx->tx_list);
@@ -881,7 +903,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
return -EIO;
}
-void
+static void
kiblnd_check_sends(kib_conn_t *conn)
{
int ver = conn->ibc_version;
@@ -899,13 +921,13 @@ kiblnd_check_sends(kib_conn_t *conn)
LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
LASSERT(!IBLND_OOB_CAPABLE(ver) ||
- conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
+ conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT(conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
+ kib_tx_t, tx_list);
list_del(&tx->tx_list);
list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
@@ -915,23 +937,21 @@ kiblnd_check_sends(kib_conn_t *conn)
spin_unlock(&conn->ibc_lock);
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx != NULL)
+ if (tx)
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
spin_lock(&conn->ibc_lock);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx_locked(tx, conn);
}
- kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
-
for (;;) {
int credit;
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
+ kib_tx_t, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT(!IBLND_OOB_CAPABLE(ver));
credit = 1;
@@ -940,17 +960,16 @@ kiblnd_check_sends(kib_conn_t *conn)
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
- } else
+ kib_tx_t, tx_list);
+ } else {
break;
+ }
- if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
+ if (kiblnd_post_tx_locked(conn, tx, credit))
break;
}
spin_unlock(&conn->ibc_lock);
-
- kiblnd_conn_decref(conn); /* ...until here */
}
static void
@@ -976,9 +995,10 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
spin_lock(&conn->ibc_lock);
- /* I could be racing with rdma completion. Whoever makes 'tx' idle
- * gets to free it, which also drops its ref on 'conn'. */
-
+ /*
+ * I could be racing with rdma completion. Whoever makes 'tx' idle
+ * gets to free it, which also drops its ref on 'conn'.
+ */
tx->tx_sending--;
conn->ibc_nsends_posted--;
if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
@@ -989,7 +1009,7 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
tx->tx_status = -EIO;
}
- idle = (tx->tx_sending == 0) && /* This is the final callback */
+ idle = !tx->tx_sending && /* This is the final callback */
!tx->tx_waiting && /* Not waiting for peer */
!tx->tx_queued; /* Not re-queued (PUT_DONE) */
if (idle)
@@ -1007,24 +1027,22 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
kiblnd_conn_decref(conn); /* ...until here */
}
-void
+static void
kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
{
kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
- struct ib_mr *mr;
+ struct ib_mr *mr = hdev->ibh_mrs;
LASSERT(tx->tx_nwrq >= 0);
LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT(nob <= IBLND_MSG_SIZE);
+ LASSERT(mr);
kiblnd_init_msg(tx->tx_msg, type, body_nob);
- mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob);
- LASSERT(mr != NULL);
-
sge->lkey = mr->lkey;
sge->addr = tx->tx_msgaddr;
sge->length = nob;
@@ -1041,25 +1059,23 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
tx->tx_nwrq++;
}
-int
+static int
kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
{
kib_msg_t *ibmsg = tx->tx_msg;
kib_rdma_desc_t *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
int rc = resid;
- int srcidx;
- int dstidx;
+ int srcidx = 0;
+ int dstidx = 0;
int wrknob;
LASSERT(!in_interrupt());
- LASSERT(tx->tx_nwrq == 0);
+ LASSERT(!tx->tx_nwrq);
LASSERT(type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- srcidx = dstidx = 0;
+ type == IBLND_MSG_PUT_DONE);
while (resid > 0) {
if (srcidx >= srcrd->rd_nfrags) {
@@ -1074,10 +1090,10 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
break;
}
- if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
- CERROR("RDMA too fragmented for %s (%d): %d/%d src %d/%d dst frags\n",
+ if (tx->tx_nwrq >= conn->ibc_max_frags) {
+ CERROR("RDMA has too many fragments for peer %s (%d), src idx/frags: %d/%d dst idx/frags: %d/%d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- IBLND_RDMA_FRAGS(conn->ibc_version),
+ conn->ibc_max_frags,
srcidx, srcrd->rd_nfrags,
dstidx, dstrd->rd_nfrags);
rc = -EMSGSIZE;
@@ -1127,7 +1143,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
return rc;
}
-void
+static void
kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
{
struct list_head *q;
@@ -1137,9 +1153,11 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
+ tx->tx_deadline = jiffies +
+ msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
+ MSEC_PER_SEC);
- if (tx->tx_conn == NULL) {
+ if (!tx->tx_conn) {
kiblnd_conn_addref(conn);
tx->tx_conn = conn;
LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
@@ -1180,7 +1198,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
list_add_tail(&tx->tx_list, q);
}
-void
+static void
kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn)
{
spin_lock(&conn->ibc_lock);
@@ -1200,19 +1218,19 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
/* allow the port to be reused */
rc = rdma_set_reuseaddr(cmid, 1);
- if (rc != 0) {
+ if (rc) {
CERROR("Unable to set reuse on cmid: %d\n", rc);
return rc;
}
/* look for a free privileged port */
- for (port = PROT_SOCK-1; port > 0; port--) {
+ for (port = PROT_SOCK - 1; port > 0; port--) {
srcaddr->sin_port = htons(port);
rc = rdma_resolve_addr(cmid,
(struct sockaddr *)srcaddr,
(struct sockaddr *)dstaddr,
timeout_ms);
- if (rc == 0) {
+ if (!rc) {
CDEBUG(D_NET, "bound to port %hu\n", port);
return 0;
} else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
@@ -1237,8 +1255,9 @@ kiblnd_connect_peer(kib_peer_t *peer)
struct sockaddr_in dstaddr;
int rc;
- LASSERT(net != NULL);
+ LASSERT(net);
LASSERT(peer->ibp_connecting > 0);
+ LASSERT(!peer->ibp_reconnecting);
cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
IB_QPT_RC);
@@ -1271,14 +1290,14 @@ kiblnd_connect_peer(kib_peer_t *peer)
(struct sockaddr *)&dstaddr,
*kiblnd_tunables.kib_timeout * 1000);
}
- if (rc != 0) {
+ if (rc) {
/* Can't initiate address resolution: */
CERROR("Can't resolve addr for %s: %d\n",
libcfs_nid2str(peer->ibp_nid), rc);
goto failed2;
}
- LASSERT(cmid->device != NULL);
+ LASSERT(cmid->device);
CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
&dev->ibd_ifip, cmid->device->name);
@@ -1286,12 +1305,64 @@ kiblnd_connect_peer(kib_peer_t *peer)
return;
failed2:
+ kiblnd_peer_connect_failed(peer, 1, rc);
kiblnd_peer_decref(peer); /* cmid's ref */
rdma_destroy_id(cmid);
+ return;
failed:
kiblnd_peer_connect_failed(peer, 1, rc);
}
+bool
+kiblnd_reconnect_peer(kib_peer_t *peer)
+{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ struct list_head txs;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&txs);
+
+ write_lock_irqsave(glock, flags);
+ if (!peer->ibp_reconnecting) {
+ if (peer->ibp_accepting)
+ reason = "accepting";
+ else if (peer->ibp_connecting)
+ reason = "connecting";
+ else if (!list_empty(&peer->ibp_conns))
+ reason = "connected";
+ else /* connected then closed */
+ reason = "closed";
+
+ goto no_reconnect;
+ }
+
+ LASSERT(!peer->ibp_accepting && !peer->ibp_connecting &&
+ list_empty(&peer->ibp_conns));
+ peer->ibp_reconnecting = 0;
+
+ if (!kiblnd_peer_active(peer)) {
+ list_splice_init(&peer->ibp_tx_queue, &txs);
+ reason = "unlinked";
+ goto no_reconnect;
+ }
+
+ peer->ibp_connecting++;
+ peer->ibp_reconnected++;
+ write_unlock_irqrestore(glock, flags);
+
+ kiblnd_connect_peer(peer);
+ return true;
+
+no_reconnect:
+ write_unlock_irqrestore(glock, flags);
+
+ CWARN("Abort reconnection of %s: %s\n",
+ libcfs_nid2str(peer->ibp_nid), reason);
+ kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED);
+ return false;
+}
+
void
kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
{
@@ -1302,25 +1373,28 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
unsigned long flags;
int rc;
- /* If I get here, I've committed to send, so I complete the tx with
- * failure on any problems */
-
- LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
- LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
+ /*
+ * If I get here, I've committed to send, so I complete the tx with
+ * failure on any problems
+ */
+ LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */
+ LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */
- /* First time, just use a read lock since I expect to find my peer
- * connected */
+ /*
+ * First time, just use a read lock since I expect to find my peer
+ * connected
+ */
read_lock_irqsave(g_lock, flags);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+ if (peer && !list_empty(&peer->ibp_conns)) {
/* Found a peer with an established connection */
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
read_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx(tx, conn);
kiblnd_conn_decref(conn); /* ...to here */
return;
@@ -1331,14 +1405,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_lock(g_lock);
peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
+ if (peer) {
if (list_empty(&peer->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT(peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0);
- if (tx != NULL)
+ LASSERT(kiblnd_peer_connecting(peer));
+ if (tx)
list_add_tail(&tx->tx_list,
- &peer->ibp_tx_queue);
+ &peer->ibp_tx_queue);
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer);
@@ -1346,7 +1419,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx(tx, conn);
kiblnd_conn_decref(conn); /* ...to here */
}
@@ -1357,9 +1430,9 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
/* Allocate a peer ready to add to the peer table and retry */
rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
- if (tx != NULL) {
+ if (tx) {
tx->tx_status = -EHOSTUNREACH;
tx->tx_waiting = 0;
kiblnd_tx_done(ni, tx);
@@ -1370,14 +1443,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
- if (peer2 != NULL) {
+ if (peer2) {
if (list_empty(&peer2->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT(peer2->ibp_connecting != 0 ||
- peer2->ibp_accepting != 0);
- if (tx != NULL)
+ LASSERT(kiblnd_peer_connecting(peer2));
+ if (tx)
list_add_tail(&tx->tx_list,
- &peer2->ibp_tx_queue);
+ &peer2->ibp_tx_queue);
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer2);
@@ -1385,7 +1457,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
write_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
+ if (tx)
kiblnd_queue_tx(tx, conn);
kiblnd_conn_decref(conn); /* ...to here */
}
@@ -1395,13 +1467,13 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
}
/* Brand new peer */
- LASSERT(peer->ibp_connecting == 0);
+ LASSERT(!peer->ibp_connecting);
peer->ibp_connecting = 1;
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+ LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
- if (tx != NULL)
+ if (tx)
list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
kiblnd_peer_addref(peer);
@@ -1437,13 +1509,13 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT(payload_nob == 0 || payload_niov > 0);
+ LASSERT(!payload_nob || payload_niov > 0);
LASSERT(payload_niov <= LNET_MAX_IOV);
/* Thread context */
LASSERT(!in_interrupt());
/* payload is either all vaddrs or all pages */
- LASSERT(!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT(!(payload_kiov && payload_iov));
switch (type) {
default:
@@ -1451,7 +1523,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return -EIO;
case LNET_MSG_ACK:
- LASSERT(payload_nob == 0);
+ LASSERT(!payload_nob);
break;
case LNET_MSG_GET:
@@ -1464,7 +1536,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
break; /* send IMMEDIATE */
tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate txd for GET to %s\n",
libcfs_nid2str(target.nid));
return -ENOMEM;
@@ -1472,7 +1544,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
ibmsg = tx->tx_msg;
rd = &ibmsg->ibm_u.get.ibgm_rd;
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+ if (!(lntmsg->msg_md->md_options & LNET_MD_KIOV))
rc = kiblnd_setup_rd_iov(ni, tx, rd,
lntmsg->msg_md->md_niov,
lntmsg->msg_md->md_iov.iov,
@@ -1482,7 +1554,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
lntmsg->msg_md->md_niov,
lntmsg->msg_md->md_iov.kiov,
0, lntmsg->msg_md->md_length);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup GET sink for %s: %d\n",
libcfs_nid2str(target.nid), rc);
kiblnd_tx_done(ni, tx);
@@ -1496,7 +1568,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
- if (tx->tx_lntmsg[1] == NULL) {
+ if (!tx->tx_lntmsg[1]) {
CERROR("Can't create reply for GET -> %s\n",
libcfs_nid2str(target.nid));
kiblnd_tx_done(ni, tx);
@@ -1516,14 +1588,14 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
break; /* send IMMEDIATE */
tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate %s txd for %s\n",
type == LNET_MSG_PUT ? "PUT" : "REPLY",
libcfs_nid2str(target.nid));
return -ENOMEM;
}
- if (payload_kiov == NULL)
+ if (!payload_kiov)
rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
payload_niov, payload_iov,
payload_offset, payload_nob);
@@ -1531,7 +1603,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
payload_niov, payload_kiov,
payload_offset, payload_nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup PUT src for %s: %d\n",
libcfs_nid2str(target.nid), rc);
kiblnd_tx_done(ni, tx);
@@ -1555,16 +1627,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
<= IBLND_MSG_SIZE);
tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
+ type, libcfs_nid2str(target.nid));
return -ENOMEM;
}
ibmsg = tx->tx_msg;
ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
- if (payload_kiov != NULL)
+ if (payload_kiov)
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
payload_niov, payload_kiov,
@@ -1596,22 +1668,22 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't get tx for REPLY to %s\n",
libcfs_nid2str(target.nid));
goto failed_0;
}
- if (nob == 0)
+ if (!nob)
rc = 0;
- else if (kiov == NULL)
+ else if (!kiov)
rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
niov, iov, offset, nob);
else
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
niov, kiov, offset, nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup GET src for %s: %d\n",
libcfs_nid2str(target.nid), rc);
goto failed_1;
@@ -1627,12 +1699,11 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
goto failed_1;
}
- if (nob == 0) {
+ if (!nob) {
/* No RDMA: local completion may happen now! */
lnet_finalize(ni, lntmsg, 0);
} else {
- /* RDMA: lnet_finalize(lntmsg) when it
- * completes */
+ /* RDMA: lnet_finalize(lntmsg) when it completes */
tx->tx_lntmsg[0] = lntmsg;
}
@@ -1647,8 +1718,8 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
int
kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kib_rx_t *rx = private;
kib_msg_t *rxmsg = rx->rx_msg;
@@ -1661,7 +1732,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
LASSERT(mlen <= rlen);
LASSERT(!in_interrupt());
/* Either all pages or all vaddrs */
- LASSERT(!(kiov != NULL && iov != NULL));
+ LASSERT(!(kiov && iov));
switch (rxmsg->ibm_type) {
default:
@@ -1671,13 +1742,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
if (nob > rx->rx_nob) {
CERROR("Immediate message from %s too big: %d(%d)\n",
- libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
- nob, rx->rx_nob);
+ libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
+ nob, rx->rx_nob);
rc = -EPROTO;
break;
}
- if (kiov != NULL)
+ if (kiov)
lnet_copy_flat2kiov(niov, kiov, offset,
IBLND_MSG_SIZE, rxmsg,
offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
@@ -1694,7 +1765,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
kib_msg_t *txmsg;
kib_rdma_desc_t *rd;
- if (mlen == 0) {
+ if (!mlen) {
lnet_finalize(ni, lntmsg, 0);
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
rxmsg->ibm_u.putreq.ibprm_cookie);
@@ -1702,7 +1773,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
}
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate tx for %s\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
/* Not replying will break the connection */
@@ -1712,13 +1783,13 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
txmsg = tx->tx_msg;
rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (kiov == NULL)
+ if (!kiov)
rc = kiblnd_setup_rd_iov(ni, tx, rd,
niov, iov, offset, mlen);
else
rc = kiblnd_setup_rd_kiov(ni, tx, rd,
niov, kiov, offset, mlen);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't setup PUT sink for %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_tx_done(ni, tx);
@@ -1744,7 +1815,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
}
case IBLND_MSG_GET_REQ:
- if (lntmsg != NULL) {
+ if (lntmsg) {
/* Optimized GET; RDMA lntmsg's payload */
kiblnd_reply(ni, rx, lntmsg);
} else {
@@ -1778,7 +1849,7 @@ kiblnd_thread_fini(void)
atomic_dec(&kiblnd_data.kib_nthreads);
}
-void
+static void
kiblnd_peer_alive(kib_peer_t *peer)
{
/* This is racy, but everyone's only writing cfs_time_current() */
@@ -1795,10 +1866,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (list_empty(&peer->ibp_conns) &&
- peer->ibp_accepting == 0 &&
- peer->ibp_connecting == 0 &&
- peer->ibp_error != 0) {
+ if (kiblnd_peer_idle(peer) && peer->ibp_error) {
error = peer->ibp_error;
peer->ibp_error = 0;
@@ -1807,7 +1875,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (error != 0)
+ if (error)
lnet_notify(peer->ibp_ni,
peer->ibp_nid, 0, last_alive);
}
@@ -1815,25 +1883,27 @@ kiblnd_peer_notify(kib_peer_t *peer)
void
kiblnd_close_conn_locked(kib_conn_t *conn, int error)
{
- /* This just does the immediate housekeeping. 'error' is zero for a
+ /*
+ * This just does the immediate housekeeping. 'error' is zero for a
* normal shutdown which can happen only after the connection has been
* established. If the connection is established, schedule the
- * connection to be finished off by the connd. Otherwise the connd is
+ * connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
- * Caller holds kib_global_lock exclusively in irq context */
+ * Caller holds kib_global_lock exclusively in irq context
+ */
kib_peer_t *peer = conn->ibc_peer;
kib_dev_t *dev;
unsigned long flags;
- LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- if (error != 0 && conn->ibc_comms_error == 0)
+ if (error && !conn->ibc_comms_error)
conn->ibc_comms_error = error;
if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
return; /* already being handled */
- if (error == 0 &&
+ if (!error &&
list_empty(&conn->ibc_tx_noops) &&
list_empty(&conn->ibc_tx_queue) &&
list_empty(&conn->ibc_tx_queue_rsrvd) &&
@@ -1843,12 +1913,12 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
libcfs_nid2str(peer->ibp_nid));
} else {
CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ libcfs_nid2str(peer->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
@@ -1865,7 +1935,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- if (error != 0 &&
+ if (error &&
kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
@@ -1929,8 +1999,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting ||
- tx->tx_sending != 0);
+ LASSERT(tx->tx_waiting || tx->tx_sending);
} else {
LASSERT(tx->tx_queued);
}
@@ -1938,7 +2007,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
tx->tx_status = -ECONNABORTED;
tx->tx_waiting = 0;
- if (tx->tx_sending == 0) {
+ if (!tx->tx_sending) {
tx->tx_queued = 0;
list_del(&tx->tx_list);
list_add(&tx->tx_list, &zombies);
@@ -1958,14 +2027,17 @@ kiblnd_finalise_conn(kib_conn_t *conn)
kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
- /* abort_receives moves QP state to IB_QPS_ERR. This is only required
+ /*
+ * abort_receives moves QP state to IB_QPS_ERR. This is only required
* for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free. */
+ * rdma_disconnect() does this for free.
+ */
kiblnd_abort_receives(conn);
- /* Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state */
-
+ /*
+ * Complete all tx descs not waiting for sends to complete.
+ * NB we should be safe from RDMA now that the QP has changed state
+ */
kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
@@ -1975,13 +2047,13 @@ kiblnd_finalise_conn(kib_conn_t *conn)
kiblnd_handle_early_rxs(conn);
}
-void
+static void
kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
{
LIST_HEAD(zombies);
unsigned long flags;
- LASSERT(error != 0);
+ LASSERT(error);
LASSERT(!in_interrupt());
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
@@ -1994,14 +2066,14 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
peer->ibp_accepting--;
}
- if (peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0) {
+ if (kiblnd_peer_connecting(peer)) {
/* another connection attempt under way... */
write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ flags);
return;
}
+ peer->ibp_reconnected = 0;
if (list_empty(&peer->ibp_conns)) {
/* Take peer's blocked transmits to complete with error */
list_add(&zombies, &peer->ibp_tx_queue);
@@ -2029,7 +2101,7 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
}
-void
+static void
kiblnd_connreq_done(kib_conn_t *conn, int status)
{
kib_peer_t *peer = conn->ibc_peer;
@@ -2047,14 +2119,14 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
LASSERT(!in_interrupt());
LASSERT((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
+ peer->ibp_connecting > 0) ||
(conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
+ peer->ibp_accepting > 0));
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
conn->ibc_connvars = NULL;
- if (status != 0) {
+ if (status) {
/* failed to establish connection */
kiblnd_peer_connect_failed(peer, active, status);
kiblnd_finalise_conn(conn);
@@ -2068,16 +2140,19 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer);
- /* Add conn to peer's list and nuke any dangling conns from a different
- * peer instance... */
+ /*
+ * Add conn to peer's list and nuke any dangling conns from a different
+ * peer instance...
+ */
kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
list_add(&conn->ibc_list, &peer->ibp_conns);
+ peer->ibp_reconnected = 0;
if (active)
peer->ibp_connecting--;
else
peer->ibp_accepting--;
- if (peer->ibp_version == 0) {
+ if (!peer->ibp_version) {
peer->ibp_version = conn->ibc_version;
peer->ibp_incarnation = conn->ibc_incarnation;
}
@@ -2095,7 +2170,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
list_del_init(&peer->ibp_tx_queue);
if (!kiblnd_peer_active(peer) || /* peer has been deleted */
- conn->ibc_comms_error != 0) { /* error has happened already */
+ conn->ibc_comms_error) { /* error has happened already */
lnet_ni_t *ni = peer->ibp_ni;
/* start to shut down connection */
@@ -2107,6 +2182,16 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
return;
}
+ /**
+ * refcount taken by cmid is not reliable after I released the glock
+ * because this connection is visible to other threads now, another
+ * thread can find and close this connection right after I released
+ * the glock, if kiblnd_cm_callback for RDMA_CM_EVENT_DISCONNECTED is
+ * called, it can release the connection refcount taken by cmid.
+ * It means the connection could be destroyed before I finish my
+ * operations on it.
+ */
+ kiblnd_conn_addref(conn);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* Schedule blocked txs */
@@ -2122,6 +2207,8 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
/* schedule blocked rxs */
kiblnd_handle_early_rxs(conn);
+
+ kiblnd_conn_decref(conn);
}
static void
@@ -2131,7 +2218,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
rc = rdma_reject(cmid, rej, sizeof(*rej));
- if (rc != 0)
+ if (rc)
CWARN("Error %d sending reject\n", rc);
}
@@ -2159,14 +2246,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
/* cmid inherits 'context' from the corresponding listener id */
ibdev = (kib_dev_t *)cmid->context;
- LASSERT(ibdev != NULL);
+ LASSERT(ibdev);
memset(&rej, 0, sizeof(rej));
rej.ibr_magic = IBLND_MSG_MAGIC;
rej.ibr_why = IBLND_REJECT_FATAL;
rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
- peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+ peer_addr = (struct sockaddr_in *)&cmid->route.addr.dst_addr;
if (*kiblnd_tunables.kib_require_priv_port &&
ntohs(peer_addr->sin_port) >= PROT_SOCK) {
__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
@@ -2181,12 +2268,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- /* Future protocol version compatibility support! If the
+ /*
+ * Future protocol version compatibility support! If the
* o2iblnd-specific protocol changes, or when LNET unifies
* protocols over all LNDs, the initial connection will
* negotiate a protocol version. I trap this here to avoid
* console errors; the reject tells the peer which protocol I
- * speak. */
+ * speak.
+ */
if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
goto failed;
@@ -2200,7 +2289,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
rc = kiblnd_unpack_msg(reqmsg, priv_nob);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't parse connection request: %d\n", rc);
goto failed;
}
@@ -2208,17 +2297,17 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
nid = reqmsg->ibm_srcnid;
ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
- if (ni != NULL) {
+ if (ni) {
net = (kib_net_t *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
}
- if (ni == NULL || /* no matching net */
+ if (!ni || /* no matching net */
ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
+ CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
libcfs_nid2str(nid),
- ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+ !ni ? "NA" : libcfs_nid2str(ni->ni_nid),
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
@@ -2227,7 +2316,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
/* check time stamp as soon as possible */
- if (reqmsg->ibm_dststamp != 0 &&
+ if (reqmsg->ibm_dststamp &&
reqmsg->ibm_dststamp != net->ibn_incarnation) {
CWARN("Stale connection request\n");
rej.ibr_why = IBLND_REJECT_CONN_STALE;
@@ -2243,10 +2332,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
+ if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
IBLND_MSG_QUEUE_SIZE(version)) {
- CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
- libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
+ CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
+ libcfs_nid2str(nid),
+ reqmsg->ibm_u.connparams.ibcp_queue_depth,
IBLND_MSG_QUEUE_SIZE(version));
if (version == IBLND_MSG_VERSION)
@@ -2255,18 +2345,28 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
- if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
+ if (reqmsg->ibm_u.connparams.ibcp_max_frags >
IBLND_RDMA_FRAGS(version)) {
- CERROR("Can't accept %s(version %x): incompatible max_frags %d (%d wanted)\n",
- libcfs_nid2str(nid), version,
- reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ IBLND_RDMA_FRAGS(version));
- if (version == IBLND_MSG_VERSION)
+ if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
+ } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
+ IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) {
+ CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ IBLND_RDMA_FRAGS(version));
+
+ if (version >= IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+ goto failed;
}
if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
@@ -2279,17 +2379,21 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
/* assume 'nid' is a new peer; create */
rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
goto failed;
}
+ /* We have validated the peer's parameters so use those */
+ peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
+ peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
+
write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
- if (peer2 != NULL) {
- if (peer2->ibp_version == 0) {
+ if (peer2) {
+ if (!peer2->ibp_version) {
peer2->ibp_version = version;
peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
}
@@ -2298,10 +2402,16 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
peer2->ibp_version != version) {
kiblnd_close_peer_conns_locked(peer2, -ESTALE);
+
+ if (kiblnd_peer_active(peer2)) {
+ peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+ peer2->ibp_version = version;
+ }
write_unlock_irqrestore(g_lock, flags);
- CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
- libcfs_nid2str(nid), peer2->ibp_version, version);
+ CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
+ libcfs_nid2str(nid), peer2->ibp_version, version,
+ peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
kiblnd_peer_decref(peer);
rej.ibr_why = IBLND_REJECT_CONN_STALE;
@@ -2309,7 +2419,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
/* tie-break connection race in favour of the higher NID */
- if (peer2->ibp_connecting != 0 &&
+ if (peer2->ibp_connecting &&
nid < ni->ni_nid) {
write_unlock_irqrestore(g_lock, flags);
@@ -2320,24 +2430,37 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
goto failed;
}
+ /**
+ * passive connection is allowed even this peer is waiting for
+ * reconnection.
+ */
+ peer2->ibp_reconnecting = 0;
peer2->ibp_accepting++;
kiblnd_peer_addref(peer2);
+ /**
+ * Race with kiblnd_launch_tx (active connect) to create peer
+ * so copy validated parameters since we now know what the
+ * peer's limits are
+ */
+ peer2->ibp_max_frags = peer->ibp_max_frags;
+ peer2->ibp_queue_depth = peer->ibp_queue_depth;
+
write_unlock_irqrestore(g_lock, flags);
kiblnd_peer_decref(peer);
peer = peer2;
} else {
/* Brand new peer */
- LASSERT(peer->ibp_accepting == 0);
- LASSERT(peer->ibp_version == 0 &&
- peer->ibp_incarnation == 0);
+ LASSERT(!peer->ibp_accepting);
+ LASSERT(!peer->ibp_version &&
+ !peer->ibp_incarnation);
peer->ibp_accepting = 1;
peer->ibp_version = version;
peer->ibp_incarnation = reqmsg->ibm_srcstamp;
/* I have a ref on ni that prevents it being shutdown */
- LASSERT(net->ibn_shutdown == 0);
+ LASSERT(!net->ibn_shutdown);
kiblnd_peer_addref(peer);
list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
@@ -2345,31 +2468,33 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
write_unlock_irqrestore(g_lock, flags);
}
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
- if (conn == NULL) {
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT,
+ version);
+ if (!conn) {
kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
kiblnd_peer_decref(peer);
rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
goto failed;
}
- /* conn now "owns" cmid, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid. */
-
+ /*
+ * conn now "owns" cmid, so I return success from here on to ensure the
+ * CM callback doesn't destroy cmid.
+ */
conn->ibc_incarnation = reqmsg->ibm_srcstamp;
- conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
- <= IBLND_RX_MSGS(version));
+ conn->ibc_credits = conn->ibc_queue_depth;
+ conn->ibc_reserved_credits = conn->ibc_queue_depth;
+ LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+ IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
ackmsg = &conn->ibc_connvars->cv_msg;
memset(ackmsg, 0, sizeof(*ackmsg));
kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
sizeof(ackmsg->ibm_u.connparams));
- ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
+ ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
+ ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
- ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
@@ -2385,7 +2510,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
rc = rdma_accept(cmid, &cp);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
rej.ibr_version = version;
rej.ibr_why = IBLND_REJECT_FATAL;
@@ -2399,7 +2524,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
return 0;
failed:
- if (ni != NULL)
+ if (ni)
lnet_ni_decref(ni);
rej.ibr_version = version;
@@ -2411,45 +2536,82 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
static void
-kiblnd_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(kib_conn_t *conn, int version,
+ __u64 incarnation, int why, kib_connparams_t *cp)
{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_peer_t *peer = conn->ibc_peer;
char *reason;
- int retry = 0;
+ int msg_size = IBLND_MSG_SIZE;
+ int frag_num = -1;
+ int queue_dep = -1;
+ bool reconnect;
unsigned long flags;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */
+ LASSERT(!peer->ibp_reconnecting);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (cp) {
+ msg_size = cp->ibcp_max_msg_size;
+ frag_num = cp->ibcp_max_frags;
+ queue_dep = cp->ibcp_queue_depth;
+ }
- /* retry connection if it's still needed and no other connection
+ write_lock_irqsave(glock, flags);
+ /**
+ * retry connection if it's still needed and no other connection
* attempts (active or passive) are in progress
* NB: reconnect is still needed even when ibp_tx_queue is
* empty if ibp_version != version because reconnect may be
- * initiated by kiblnd_query() */
- if ((!list_empty(&peer->ibp_tx_queue) ||
- peer->ibp_version != version) &&
- peer->ibp_connecting == 1 &&
- peer->ibp_accepting == 0) {
- retry = 1;
- peer->ibp_connecting++;
-
- peer->ibp_version = version;
- peer->ibp_incarnation = incarnation;
+ * initiated by kiblnd_query()
+ */
+ reconnect = (!list_empty(&peer->ibp_tx_queue) ||
+ peer->ibp_version != version) &&
+ peer->ibp_connecting == 1 &&
+ !peer->ibp_accepting;
+ if (!reconnect) {
+ reason = "no need";
+ goto out;
}
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (!retry)
- return;
-
switch (why) {
default:
reason = "Unknown";
break;
+ case IBLND_REJECT_RDMA_FRAGS:
+ if (!cp) {
+ reason = "can't negotiate max frags";
+ goto out;
+ }
+ if (!*kiblnd_tunables.kib_map_on_demand) {
+ reason = "map_on_demand must be enabled";
+ goto out;
+ }
+ if (conn->ibc_max_frags <= frag_num) {
+ reason = "unsupported max frags";
+ goto out;
+ }
+
+ peer->ibp_max_frags = frag_num;
+ reason = "rdma fragments";
+ break;
+
+ case IBLND_REJECT_MSG_QUEUE_SIZE:
+ if (!cp) {
+ reason = "can't negotiate queue depth";
+ goto out;
+ }
+ if (conn->ibc_queue_depth <= queue_dep) {
+ reason = "unsupported queue depth";
+ goto out;
+ }
+
+ peer->ibp_queue_depth = queue_dep;
+ reason = "queue depth";
+ break;
+
case IBLND_REJECT_CONN_STALE:
reason = "stale";
break;
@@ -2463,14 +2625,24 @@ kiblnd_reconnect(kib_conn_t *conn, int version,
break;
}
- CNETERR("%s: retrying (%s), %x, %x, queue_dep: %d, max_frag: %d, msg_size: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- reason, IBLND_MSG_VERSION, version,
- cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
- cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
- cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE);
+ conn->ibc_reconnect = 1;
+ peer->ibp_reconnecting = 1;
+ peer->ibp_version = version;
+ if (incarnation)
+ peer->ibp_incarnation = incarnation;
+out:
+ write_unlock_irqrestore(glock, flags);
- kiblnd_connect_peer(peer);
+ CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ reconnect ? "reconnect" : "don't reconnect",
+ reason, IBLND_MSG_VERSION, version, msg_size,
+ conn->ibc_queue_depth, queue_dep,
+ conn->ibc_max_frags, frag_num);
+ /**
+ * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
+ * while destroying the zombie
+ */
}
static void
@@ -2483,8 +2655,8 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
switch (reason) {
case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
+ kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
break;
case IB_CM_REJ_INVALID_SERVICE_ID:
@@ -2521,9 +2693,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
if (priv_nob >= sizeof(kib_rej_t) &&
rej->ibr_version > IBLND_MSG_VERSION_1) {
- /* priv_nob is always 148 in current version
+ /*
+ * priv_nob is always 148 in current version
* of OFED, so we still need to check version.
- * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
+ * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
+ */
cp = &rej->ibr_cp;
if (flip) {
@@ -2564,24 +2738,11 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
case IBLND_REJECT_CONN_RACE:
case IBLND_REJECT_CONN_STALE:
case IBLND_REJECT_CONN_UNCOMPAT:
- kiblnd_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
- break;
-
case IBLND_REJECT_MSG_QUEUE_SIZE:
- CERROR("%s rejected: incompatible message queue depth %d, %d\n",
- libcfs_nid2str(peer->ibp_nid),
- cp != NULL ? cp->ibcp_queue_depth :
- IBLND_MSG_QUEUE_SIZE(rej->ibr_version),
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
- break;
-
case IBLND_REJECT_RDMA_FRAGS:
- CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
- libcfs_nid2str(peer->ibp_nid),
- cp != NULL ? cp->ibcp_max_frags :
- IBLND_RDMA_FRAGS(rej->ibr_version),
- IBLND_RDMA_FRAGS(conn->ibc_version));
+ kiblnd_check_reconnect(conn, rej->ibr_version,
+ incarnation,
+ rej->ibr_why, cp);
break;
case IBLND_REJECT_NO_RESOURCES:
@@ -2623,9 +2784,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
int rc = kiblnd_unpack_msg(msg, priv_nob);
unsigned long flags;
- LASSERT(net != NULL);
+ LASSERT(net);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't unpack connack from %s: %d\n",
libcfs_nid2str(peer->ibp_nid), rc);
goto failed;
@@ -2645,22 +2806,22 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
goto failed;
}
- if (msg->ibm_u.connparams.ibcp_queue_depth !=
- IBLND_MSG_QUEUE_SIZE(ver)) {
- CERROR("%s has incompatible queue depth %d(%d wanted)\n",
+ if (msg->ibm_u.connparams.ibcp_queue_depth >
+ conn->ibc_queue_depth) {
+ CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
libcfs_nid2str(peer->ibp_nid),
msg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(ver));
+ conn->ibc_queue_depth);
rc = -EPROTO;
goto failed;
}
- if (msg->ibm_u.connparams.ibcp_max_frags !=
- IBLND_RDMA_FRAGS(ver)) {
- CERROR("%s has incompatible max_frags %d (%d wanted)\n",
+ if (msg->ibm_u.connparams.ibcp_max_frags >
+ conn->ibc_max_frags) {
+ CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
libcfs_nid2str(peer->ibp_nid),
msg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(ver));
+ conn->ibc_max_frags);
rc = -EPROTO;
goto failed;
}
@@ -2682,7 +2843,7 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
rc = -ESTALE;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (rc != 0) {
+ if (rc) {
CERROR("Bad connection reply from %s, rc = %d, version: %x max_frags: %d\n",
libcfs_nid2str(peer->ibp_nid), rc,
msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
@@ -2690,21 +2851,24 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
}
conn->ibc_incarnation = msg->ibm_srcstamp;
- conn->ibc_credits =
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
- LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
- <= IBLND_RX_MSGS(ver));
+ conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags;
+ LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+ IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
kiblnd_connreq_done(conn, 0);
return;
failed:
- /* NB My QP has already established itself, so I handle anything going
+ /*
+ * NB My QP has already established itself, so I handle anything going
* wrong here by setting ibc_comms_error.
* kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
- * immediately tears it down. */
-
- LASSERT(rc != 0);
+ * immediately tears it down.
+ */
+ LASSERT(rc);
conn->ibc_comms_error = rc;
kiblnd_connreq_done(conn, 0);
}
@@ -2724,28 +2888,30 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
incarnation = peer->ibp_incarnation;
- version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
- peer->ibp_version;
+ version = !peer->ibp_version ? IBLND_MSG_VERSION :
+ peer->ibp_version;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
- if (conn == NULL) {
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
+ version);
+ if (!conn) {
kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
kiblnd_peer_decref(peer); /* lose cmid's ref */
return -ENOMEM;
}
- /* conn "owns" cmid now, so I return success from here on to ensure the
+ /*
+ * conn "owns" cmid now, so I return success from here on to ensure the
* CM callback doesn't destroy cmid. conn also takes over cmid's ref
- * on peer */
-
+ * on peer
+ */
msg = &conn->ibc_connvars->cv_msg;
memset(msg, 0, sizeof(*msg));
kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
- msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+ msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
+ msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(peer->ibp_ni, msg, version,
@@ -2764,7 +2930,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
LASSERT(conn->ibc_cmid == cmid);
rc = rdma_connect(cmid, &cp);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't connect to %s: %d\n",
libcfs_nid2str(peer->ibp_nid), rc);
kiblnd_connreq_done(conn, rc);
@@ -2798,10 +2964,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
case RDMA_CM_EVENT_ADDR_ERROR:
peer = (kib_peer_t *)cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc != 0 destroys cmid */
+ return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
peer = (kib_peer_t *)cmid->context;
@@ -2809,14 +2975,14 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
CDEBUG(D_NET, "%s Addr resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
- if (event->status != 0) {
+ if (event->status) {
CNETERR("Can't resolve address for %s: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
rc = event->status;
} else {
rc = rdma_resolve_route(
cmid, *kiblnd_tunables.kib_timeout * 1000);
- if (rc == 0)
+ if (!rc)
return 0;
/* Can't initiate route resolution */
CERROR("Can't resolve route for %s: %d\n",
@@ -2824,7 +2990,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
}
kiblnd_peer_connect_failed(peer, 1, rc);
kiblnd_peer_decref(peer);
- return rc; /* rc != 0 destroys cmid */
+ return rc; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_ERROR:
peer = (kib_peer_t *)cmid->context;
@@ -2832,28 +2998,28 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
- return -EHOSTUNREACH; /* rc != 0 destroys cmid */
+ return -EHOSTUNREACH; /* rc destroys cmid */
case RDMA_CM_EVENT_ROUTE_RESOLVED:
peer = (kib_peer_t *)cmid->context;
CDEBUG(D_NET, "%s Route resolved: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
- if (event->status == 0)
+ if (!event->status)
return kiblnd_active_connect(cmid);
CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, event->status);
kiblnd_peer_decref(peer);
- return event->status; /* rc != 0 destroys cmid */
+ return event->status; /* rc destroys cmid */
case RDMA_CM_EVENT_UNREACHABLE:
conn = (kib_conn_t *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
kiblnd_connreq_done(conn, -ENETDOWN);
kiblnd_conn_decref(conn);
return 0;
@@ -2876,8 +3042,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
case IBLND_CONN_PASSIVE_WAIT:
CERROR("%s: REJECTED %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- event->status);
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status);
kiblnd_connreq_done(conn, -ECONNRESET);
break;
@@ -2933,8 +3099,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
LCONSOLE_ERROR_MSG(0x131,
"Received notification of device removal\n"
"Please shutdown LNET to allow this to proceed\n");
- /* Can't remove network from underneath LNET for now, so I have
- * to ignore this */
+ /*
+ * Can't remove network from underneath LNET for now, so I have
+ * to ignore this
+ */
return 0;
case RDMA_CM_EVENT_ADDR_CHANGE:
@@ -2956,7 +3124,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
LASSERT(tx->tx_queued);
} else {
LASSERT(!tx->tx_queued);
- LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+ LASSERT(tx->tx_waiting || tx->tx_sending);
}
if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
@@ -2989,13 +3157,16 @@ kiblnd_check_conns(int idx)
struct list_head *ptmp;
kib_peer_t *peer;
kib_conn_t *conn;
+ kib_conn_t *temp;
kib_conn_t *tmp;
struct list_head *ctmp;
unsigned long flags;
- /* NB. We expect to have a look at all the peers and not find any
+ /*
+ * NB. We expect to have a look at all the peers and not find any
* RDMAs to time out, so we just use a shared lock while we
- * take a look... */
+ * take a look...
+ */
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
list_for_each(ptmp, peers) {
@@ -3028,8 +3199,7 @@ kiblnd_check_conns(int idx)
conn->ibc_reserved_credits);
list_add(&conn->ibc_connd_list, &closes);
} else {
- list_add(&conn->ibc_connd_list,
- &checksends);
+ list_add(&conn->ibc_connd_list, &checksends);
}
/* +ref for 'closes' or 'checksends' */
kiblnd_conn_addref(conn);
@@ -3040,21 +3210,23 @@ kiblnd_check_conns(int idx)
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* Handle timeout by closing the whole
+ /*
+ * Handle timeout by closing the whole
* connection. We can only be sure RDMA activity
- * has ceased once the QP has been modified. */
+ * has ceased once the QP has been modified.
+ */
list_for_each_entry_safe(conn, tmp, &closes, ibc_connd_list) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
}
- /* In case we have enough credits to return via a
+ /*
+ * In case we have enough credits to return via a
* NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- kib_conn_t, ibc_connd_list);
+ * free to do it last time...
+ */
+ list_for_each_entry_safe(conn, temp, &checksends, ibc_connd_list) {
list_del(&conn->ibc_connd_list);
kiblnd_check_sends(conn);
kiblnd_conn_decref(conn);
@@ -3074,9 +3246,21 @@ kiblnd_disconnect_conn(kib_conn_t *conn)
kiblnd_peer_notify(conn->ibc_peer);
}
+/**
+ * High-water for reconnection to the same peer, reconnection attempt should
+ * be delayed after trying more than KIB_RECONN_HIGH_RACE.
+ */
+#define KIB_RECONN_HIGH_RACE 10
+/**
+ * Allow connd to take a break and handle other things after consecutive
+ * reconnection attemps.
+ */
+#define KIB_RECONN_BREAK 100
+
int
kiblnd_connd(void *arg)
{
+ spinlock_t *lock= &kiblnd_data.kib_connd_lock;
wait_queue_t wait;
unsigned long flags;
kib_conn_t *conn;
@@ -3091,39 +3275,79 @@ kiblnd_connd(void *arg)
init_waitqueue_entry(&wait, current);
kiblnd_data.kib_connd = current;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
while (!kiblnd_data.kib_shutdown) {
+ int reconn = 0;
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ kib_peer_t *peer = NULL;
+
conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
+ kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
+ if (conn->ibc_reconnect) {
+ peer = conn->ibc_peer;
+ kiblnd_peer_addref(peer);
+ }
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn, !peer);
+
+ spin_lock_irqsave(lock, flags);
+ if (!peer)
+ continue;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ conn->ibc_peer = peer;
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_list);
+ else
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_wait);
}
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
+ kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
- flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
kiblnd_disconnect_conn(conn);
kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
+ }
+
+ while (reconn < KIB_RECONN_BREAK) {
+ if (kiblnd_data.kib_reconn_sec !=
+ ktime_get_real_seconds()) {
+ kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
+ list_splice_init(&kiblnd_data.kib_reconn_wait,
+ &kiblnd_data.kib_reconn_list);
+ }
+
+ if (list_empty(&kiblnd_data.kib_reconn_list))
+ break;
+
+ conn = list_entry(kiblnd_data.kib_reconn_list.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
+
+ reconn += kiblnd_reconnect_peer(conn->ibc_peer);
+ kiblnd_peer_decref(conn->ibc_peer);
+ LIBCFS_FREE(conn, sizeof(*conn));
+
+ spin_lock_irqsave(lock, flags);
}
/* careful with the jiffy wrap... */
@@ -3133,21 +3357,22 @@ kiblnd_connd(void *arg)
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
- /* Time to check for RDMA timeouts on a few more
+ /*
+ * Time to check for RDMA timeouts on a few more
* peers: I do checks every 'p' seconds on a
* proportion of the peer table and I need to check
* every connection 'n' times within a timeout
* interval, to ensure I detect a timeout on any
* connection within (n+1)/n times the timeout
- * interval. */
-
+ * interval.
+ */
if (*kiblnd_tunables.kib_timeout > n * p)
chunk = (chunk * n * p) /
*kiblnd_tunables.kib_timeout;
- if (chunk == 0)
+ if (!chunk)
chunk = 1;
for (i = 0; i < chunk; i++) {
@@ -3156,8 +3381,8 @@ kiblnd_connd(void *arg)
kiblnd_data.kib_peer_hash_size;
}
- deadline += p * HZ;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+ spin_lock_irqsave(lock, flags);
}
if (dropped_lock)
@@ -3166,15 +3391,15 @@ kiblnd_connd(void *arg)
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
schedule_timeout(timeout);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
}
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
kiblnd_thread_fini();
return 0;
@@ -3206,12 +3431,14 @@ kiblnd_complete(struct ib_wc *wc)
LBUG();
case IBLND_WID_RDMA:
- /* We only get RDMA completion notification if it fails. All
+ /*
+ * We only get RDMA completion notification if it fails. All
* subsequent work items, including the final SEND will fail
* too. However we can't print out any more info about the
* failing RDMA because 'tx' might be back on the idle list or
* even reused already if we didn't manage to post all our work
- * items */
+ * items
+ */
CNETERR("RDMA (tx: %p) failed: %d\n",
kiblnd_wreqid2ptr(wc->wr_id), wc->status);
return;
@@ -3230,11 +3457,13 @@ kiblnd_complete(struct ib_wc *wc)
void
kiblnd_cq_completion(struct ib_cq *cq, void *arg)
{
- /* NB I'm not allowed to schedule this conn once its refcount has
+ /*
+ * NB I'm not allowed to schedule this conn once its refcount has
* reached 0. Since fundamentally I'm racing with scheduler threads
* consuming my CQ I could be called after all completions have
- * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
- * and this CQ is about to be destroyed so I NOOP. */
+ * occurred. But in this case, !ibc_nrx && !ibc_nsends_posted
+ * and this CQ is about to be destroyed so I NOOP.
+ */
kib_conn_t *conn = arg;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
@@ -3288,7 +3517,7 @@ kiblnd_scheduler(void *arg)
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
- if (rc != 0) {
+ if (rc) {
CWARN("Failed to bind on CPT %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n",
sched->ibs_cpt);
}
@@ -3308,8 +3537,8 @@ kiblnd_scheduler(void *arg)
did_something = 0;
if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- kib_conn_t, ibc_sched_list);
+ conn = list_entry(sched->ibs_conns.next, kib_conn_t,
+ ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
@@ -3317,8 +3546,10 @@ kiblnd_scheduler(void *arg)
spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ wc.wr_id = IBLND_WID_INVAL;
+
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- if (rc == 0) {
+ if (!rc) {
rc = ib_req_notify_cq(conn->ibc_cq,
IB_CQ_NEXT_COMP);
if (rc < 0) {
@@ -3327,13 +3558,22 @@ kiblnd_scheduler(void *arg)
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock,
- flags);
+ flags);
continue;
}
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
}
+ if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
+ LCONSOLE_ERROR("ib_poll_cq (rc: %d) returned invalid wr_id, opcode %d, status: %d, vendor_err: %d, conn: %s status: %d\nplease upgrade firmware and OFED or contact vendor.\n",
+ rc, wc.opcode, wc.status,
+ wc.vendor_err,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ conn->ibc_state);
+ rc = -EINVAL;
+ }
+
if (rc < 0) {
CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
@@ -3346,21 +3586,23 @@ kiblnd_scheduler(void *arg)
spin_lock_irqsave(&sched->ibs_lock, flags);
- if (rc != 0 || conn->ibc_ready) {
- /* There may be another completion waiting; get
+ if (rc || conn->ibc_ready) {
+ /*
+ * There may be another completion waiting; get
* another scheduler to check while I handle
- * this one... */
+ * this one...
+ */
/* +1 ref for sched_conns */
kiblnd_conn_addref(conn);
list_add_tail(&conn->ibc_sched_list,
- &sched->ibs_conns);
+ &sched->ibs_conns);
if (waitqueue_active(&sched->ibs_waitq))
wake_up(&sched->ibs_waitq);
} else {
conn->ibc_scheduled = 0;
}
- if (rc != 0) {
+ if (rc) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
kiblnd_complete(&wc);
@@ -3400,7 +3642,7 @@ kiblnd_failover_thread(void *arg)
unsigned long flags;
int rc;
- LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
+ LASSERT(*kiblnd_tunables.kib_dev_failover);
cfs_block_allsigs();
@@ -3459,13 +3701,15 @@ kiblnd_failover_thread(void *arg)
remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
- if (!long_sleep || rc != 0)
+ if (!long_sleep || rc)
continue;
- /* have a long sleep, routine check all active devices,
+ /*
+ * have a long sleep, routine check all active devices,
* we need checking like this because if there is not active
* connection on the dev and no SEND from local, we may listen
- * on wrong HCA for ever while there is a bonding failover */
+ * on wrong HCA for ever while there is a bonding failover
+ */
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index 1d4e7efb5..b4607dad3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -52,8 +52,10 @@ static int timeout = 50;
module_param(timeout, int, 0644);
MODULE_PARM_DESC(timeout, "timeout (seconds)");
-/* Number of threads in each scheduler pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's set to zero. */
+/*
+ * Number of threads in each scheduler pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's set to zero.
+ */
static int nscheds;
module_param(nscheds, int, 0444);
MODULE_PARM_DESC(nscheds, "number of threads in each scheduler pool");
@@ -200,7 +202,7 @@ kiblnd_tunables_init(void)
if (*kiblnd_tunables.kib_map_on_demand == 1)
*kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
- if (*kiblnd_tunables.kib_concurrent_sends == 0) {
+ if (!*kiblnd_tunables.kib_concurrent_sends) {
if (*kiblnd_tunables.kib_map_on_demand > 0 &&
*kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
*kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index 05aa90ea5..cca7b2f7f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -70,7 +70,7 @@ ksocknal_create_route(__u32 ipaddr, int port)
ksock_route_t *route;
LIBCFS_ALLOC(route, sizeof(*route));
- if (route == NULL)
+ if (!route)
return NULL;
atomic_set(&route->ksnr_refcount, 1);
@@ -91,9 +91,9 @@ ksocknal_create_route(__u32 ipaddr, int port)
void
ksocknal_destroy_route(ksock_route_t *route)
{
- LASSERT(atomic_read(&route->ksnr_refcount) == 0);
+ LASSERT(!atomic_read(&route->ksnr_refcount));
- if (route->ksnr_peer != NULL)
+ if (route->ksnr_peer)
ksocknal_peer_decref(route->ksnr_peer);
LIBCFS_FREE(route, sizeof(*route));
@@ -102,6 +102,7 @@ ksocknal_destroy_route(ksock_route_t *route)
static int
ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
{
+ int cpt = lnet_cpt_of_nid(id.nid);
ksock_net_t *net = ni->ni_data;
ksock_peer_t *peer;
@@ -109,8 +110,8 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
LASSERT(id.pid != LNET_PID_ANY);
LASSERT(!in_interrupt());
- LIBCFS_ALLOC(peer, sizeof(*peer));
- if (peer == NULL)
+ LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
+ if (!peer)
return -ENOMEM;
peer->ksnp_ni = ni;
@@ -152,10 +153,10 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
ksock_net_t *net = peer->ksnp_ni->ni_data;
CDEBUG(D_NET, "peer %s %p deleted\n",
- libcfs_id2str(peer->ksnp_id), peer);
+ libcfs_id2str(peer->ksnp_id), peer);
- LASSERT(atomic_read(&peer->ksnp_refcount) == 0);
- LASSERT(peer->ksnp_accepting == 0);
+ LASSERT(!atomic_read(&peer->ksnp_refcount));
+ LASSERT(!peer->ksnp_accepting);
LASSERT(list_empty(&peer->ksnp_conns));
LASSERT(list_empty(&peer->ksnp_routes));
LASSERT(list_empty(&peer->ksnp_tx_queue));
@@ -163,10 +164,12 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
LIBCFS_FREE(peer, sizeof(*peer));
- /* NB a peer's connections and routes keep a reference on their peer
+ /*
+ * NB a peer's connections and routes keep a reference on their peer
* until they are destroyed, so we can be assured that _all_ state to
* do with this peer has been cleaned up when its refcount drops to
- * zero. */
+ * zero.
+ */
spin_lock_bh(&net->ksnn_lock);
net->ksnn_npeers--;
spin_unlock_bh(&net->ksnn_lock);
@@ -180,7 +183,6 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
ksock_peer_t *peer;
list_for_each(tmp, peer_list) {
-
peer = list_entry(tmp, ksock_peer_t, ksnp_list);
LASSERT(!peer->ksnp_closing);
@@ -207,7 +209,7 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
read_lock(&ksocknal_data.ksnd_global_lock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) /* +1 ref for caller? */
+ if (peer) /* +1 ref for caller? */
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -226,9 +228,11 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer)
ip = peer->ksnp_passive_ips[i];
iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
- /* All IPs in peer->ksnp_passive_ips[] come from the
- * interface list, therefore the call must succeed. */
- LASSERT(iface != NULL);
+ /*
+ * All IPs in peer->ksnp_passive_ips[] come from the
+ * interface list, therefore the call must succeed.
+ */
+ LASSERT(iface);
CDEBUG(D_NET, "peer=%p iface=%p ksni_nroutes=%d\n",
peer, iface, iface->ksni_nroutes);
@@ -246,8 +250,8 @@ ksocknal_unlink_peer_locked(ksock_peer_t *peer)
static int
ksocknal_get_peer_info(lnet_ni_t *ni, int index,
- lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
- int *port, int *conn_count, int *share_count)
+ lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
+ int *port, int *conn_count, int *share_count)
{
ksock_peer_t *peer;
struct list_head *ptmp;
@@ -260,14 +264,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
-
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
- if (peer->ksnp_n_passive_ips == 0 &&
+ if (!peer->ksnp_n_passive_ips &&
list_empty(&peer->ksnp_routes)) {
if (index-- > 0)
continue;
@@ -301,7 +304,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
continue;
route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ ksnr_list);
*id = peer->ksnp_id;
*myip = route->ksnr_myipaddr;
@@ -330,7 +333,7 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
ksocknal_route_addref(route);
if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
- if (route->ksnr_myipaddr == 0) {
+ if (!route->ksnr_myipaddr) {
/* route wasn't bound locally yet (the initial route) */
CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
@@ -345,21 +348,23 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
- if (iface != NULL)
+ if (iface)
iface->ksni_nroutes--;
}
route->ksnr_myipaddr = conn->ksnc_myipaddr;
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
- if (iface != NULL)
+ if (iface)
iface->ksni_nroutes++;
}
- route->ksnr_connected |= (1<<type);
+ route->ksnr_connected |= (1 << type);
route->ksnr_conn_count++;
- /* Successful connection => further attempts can
- * proceed immediately */
+ /*
+ * Successful connection => further attempts can
+ * proceed immediately
+ */
route->ksnr_retry_interval = 0;
}
@@ -371,10 +376,10 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
ksock_route_t *route2;
LASSERT(!peer->ksnp_closing);
- LASSERT(route->ksnr_peer == NULL);
+ LASSERT(!route->ksnr_peer);
LASSERT(!route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting);
- LASSERT(route->ksnr_connected == 0);
+ LASSERT(!route->ksnr_connected);
/* LASSERT(unique) */
list_for_each(tmp, &peer->ksnp_routes) {
@@ -382,8 +387,8 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
CERROR("Duplicate route %s %pI4h\n",
- libcfs_id2str(peer->ksnp_id),
- &route->ksnr_ipaddr);
+ libcfs_id2str(peer->ksnp_id),
+ &route->ksnr_ipaddr);
LBUG();
}
}
@@ -425,10 +430,10 @@ ksocknal_del_route_locked(ksock_route_t *route)
ksocknal_close_conn_locked(conn, 0);
}
- if (route->ksnr_myipaddr != 0) {
+ if (route->ksnr_myipaddr) {
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
- if (iface != NULL)
+ if (iface)
iface->ksni_nroutes--;
}
@@ -438,8 +443,10 @@ ksocknal_del_route_locked(ksock_route_t *route)
if (list_empty(&peer->ksnp_routes) &&
list_empty(&peer->ksnp_conns)) {
- /* I've just removed the last route to a peer with no active
- * connections */
+ /*
+ * I've just removed the last route to a peer with no active
+ * connections
+ */
ksocknal_unlink_peer_locked(peer);
}
}
@@ -460,11 +467,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
/* Have a brand new peer ready... */
rc = ksocknal_create_peer(&peer, ni, id);
- if (rc != 0)
+ if (rc)
return rc;
route = ksocknal_create_route(ipaddr, port);
- if (route == NULL) {
+ if (!route) {
ksocknal_peer_decref(peer);
return -ENOMEM;
}
@@ -472,16 +479,16 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
- LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, id);
- if (peer2 != NULL) {
+ if (peer2) {
ksocknal_peer_decref(peer);
peer = peer2;
} else {
/* peer table takes my ref on peer */
list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(id.nid));
+ ksocknal_nid2peerlist(id.nid));
}
route2 = NULL;
@@ -493,7 +500,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
route2 = NULL;
}
- if (route2 == NULL) {
+ if (!route2) {
ksocknal_add_route_locked(peer, route);
route->ksnr_share_count++;
} else {
@@ -524,7 +531,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
route = list_entry(tmp, ksock_route_t, ksnr_list);
/* no match */
- if (!(ip == 0 || route->ksnr_ipaddr == ip))
+ if (!(!ip || route->ksnr_ipaddr == ip))
continue;
route->ksnr_share_count = 0;
@@ -538,15 +545,16 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
nshared += route->ksnr_share_count;
}
- if (nshared == 0) {
- /* remove everything else if there are no explicit entries
- * left */
-
+ if (!nshared) {
+ /*
+ * remove everything else if there are no explicit entries
+ * left
+ */
list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
/* we should only be removing auto-entries */
- LASSERT(route->ksnr_share_count == 0);
+ LASSERT(!route->ksnr_share_count);
ksocknal_del_route_locked(route);
}
@@ -575,16 +583,16 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
+ if (id.nid != LNET_NID_ANY) {
+ lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ } else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
- list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
+ list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
@@ -604,7 +612,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
LASSERT(list_empty(&peer->ksnp_routes));
list_splice_init(&peer->ksnp_tx_queue,
- &zombies);
+ &zombies);
}
ksocknal_peer_decref(peer); /* ...till here */
@@ -645,7 +653,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
continue;
conn = list_entry(ctmp, ksock_conn_t,
- ksnc_list);
+ ksnc_list);
ksocknal_conn_addref(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
return conn;
@@ -692,8 +700,10 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
nip = net->ksnn_ninterfaces;
LASSERT(nip <= LNET_MAX_INTERFACES);
- /* Only offer interfaces for additional connections if I have
- * more than one. */
+ /*
+ * Only offer interfaces for additional connections if I have
+ * more than one.
+ */
if (nip < 2) {
read_unlock(&ksocknal_data.ksnd_global_lock);
return 0;
@@ -701,7 +711,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
for (i = 0; i < nip; i++) {
ipaddrs[i] = net->ksnn_interfaces[i].ksni_ipaddr;
- LASSERT(ipaddrs[i] != 0);
+ LASSERT(ipaddrs[i]);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -719,11 +729,11 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
int i;
for (i = 0; i < nips; i++) {
- if (ips[i] == 0)
+ if (!ips[i])
continue;
this_xor = ips[i] ^ iface->ksni_ipaddr;
- this_netmatch = ((this_xor & iface->ksni_netmask) == 0) ? 1 : 0;
+ this_netmatch = !(this_xor & iface->ksni_netmask) ? 1 : 0;
if (!(best < 0 ||
best_netmatch < this_netmatch ||
@@ -757,38 +767,45 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
int best_netmatch;
int best_npeers;
- /* CAVEAT EMPTOR: We do all our interface matching with an
+ /*
+ * CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
* expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness shouldn't matter */
-
- /* Also note that I'm not going to return more than n_peerips
- * interfaces, even if I have more myself */
-
+ * O(n**3)-ness shouldn't matter
+ */
+ /*
+ * Also note that I'm not going to return more than n_peerips
+ * interfaces, even if I have more myself
+ */
write_lock_bh(global_lock);
LASSERT(n_peerips <= LNET_MAX_INTERFACES);
LASSERT(net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
- /* Only match interfaces for additional connections
- * if I have > 1 interface */
+ /*
+ * Only match interfaces for additional connections
+ * if I have > 1 interface
+ */
n_ips = (net->ksnn_ninterfaces < 2) ? 0 :
min(n_peerips, net->ksnn_ninterfaces);
for (i = 0; peer->ksnp_n_passive_ips < n_ips; i++) {
/* ^ yes really... */
- /* If we have any new interfaces, first tick off all the
+ /*
+ * If we have any new interfaces, first tick off all the
* peer IPs that match old interfaces, then choose new
* interfaces to match the remaining peer IPS.
* We don't forget interfaces we've stopped using; we might
- * start using them again... */
-
+ * start using them again...
+ */
if (i < peer->ksnp_n_passive_ips) {
/* Old interface. */
ip = peer->ksnp_passive_ips[i];
best_iface = ksocknal_ip2iface(peer->ksnp_ni, ip);
+ /* peer passive ips are kept up to date */
+ LASSERT(best_iface);
} else {
/* choose a new interface */
LASSERT(i == peer->ksnp_n_passive_ips);
@@ -810,9 +827,9 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
k = ksocknal_match_peerip(iface, peerips, n_peerips);
xor = ip ^ peerips[k];
- this_netmatch = ((xor & iface->ksni_netmask) == 0) ? 1 : 0;
+ this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
- if (!(best_iface == NULL ||
+ if (!(!best_iface ||
best_netmatch < this_netmatch ||
(best_netmatch == this_netmatch &&
best_npeers > iface->ksni_npeers)))
@@ -823,10 +840,12 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
best_npeers = iface->ksni_npeers;
}
+ LASSERT(best_iface);
+
best_iface->ksni_npeers++;
ip = best_iface->ksni_ipaddr;
peer->ksnp_passive_ips[i] = ip;
- peer->ksnp_n_passive_ips = i+1;
+ peer->ksnp_n_passive_ips = i + 1;
}
/* mark the best matching peer IP used */
@@ -860,16 +879,19 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
int i;
int j;
- /* CAVEAT EMPTOR: We do all our interface matching with an
+ /*
+ * CAVEAT EMPTOR: We do all our interface matching with an
* exclusive hold of global lock at IRQ priority. We're only
* expecting to be dealing with small numbers of interfaces, so the
- * O(n**3)-ness here shouldn't matter */
-
+ * O(n**3)-ness here shouldn't matter
+ */
write_lock_bh(global_lock);
if (net->ksnn_ninterfaces < 2) {
- /* Only create additional connections
- * if I have > 1 interface */
+ /*
+ * Only create additional connections
+ * if I have > 1 interface
+ */
write_unlock_bh(global_lock);
return;
}
@@ -877,13 +899,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
LASSERT(npeer_ipaddrs <= LNET_MAX_INTERFACES);
for (i = 0; i < npeer_ipaddrs; i++) {
- if (newroute != NULL) {
+ if (newroute) {
newroute->ksnr_ipaddr = peer_ipaddrs[i];
} else {
write_unlock_bh(global_lock);
newroute = ksocknal_create_route(peer_ipaddrs[i], port);
- if (newroute == NULL)
+ if (!newroute)
return;
write_lock_bh(global_lock);
@@ -904,7 +926,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
route = NULL;
}
- if (route != NULL)
+ if (route)
continue;
best_iface = NULL;
@@ -920,21 +942,21 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
/* Using this interface already? */
list_for_each(rtmp, &peer->ksnp_routes) {
route = list_entry(rtmp, ksock_route_t,
- ksnr_list);
+ ksnr_list);
if (route->ksnr_myipaddr == iface->ksni_ipaddr)
break;
route = NULL;
}
- if (route != NULL)
+ if (route)
continue;
- this_netmatch = (((iface->ksni_ipaddr ^
+ this_netmatch = (!((iface->ksni_ipaddr ^
newroute->ksnr_ipaddr) &
- iface->ksni_netmask) == 0) ? 1 : 0;
+ iface->ksni_netmask)) ? 1 : 0;
- if (!(best_iface == NULL ||
+ if (!(!best_iface ||
best_netmatch < this_netmatch ||
(best_netmatch == this_netmatch &&
best_nroutes > iface->ksni_nroutes)))
@@ -945,7 +967,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
best_nroutes = iface->ksni_nroutes;
}
- if (best_iface == NULL)
+ if (!best_iface)
continue;
newroute->ksnr_myipaddr = best_iface->ksni_ipaddr;
@@ -956,7 +978,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
}
write_unlock_bh(global_lock);
- if (newroute != NULL)
+ if (newroute)
ksocknal_route_decref(newroute);
}
@@ -969,10 +991,10 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
int peer_port;
rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port);
- LASSERT(rc == 0); /* we succeeded before */
+ LASSERT(!rc); /* we succeeded before */
LIBCFS_ALLOC(cr, sizeof(*cr));
- if (cr == NULL) {
+ if (!cr) {
LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from %pI4h: memory exhausted\n",
&peer_ip);
return -ENOMEM;
@@ -997,7 +1019,6 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
ksock_route_t *route;
list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
-
if (route->ksnr_ipaddr == ipaddr)
return route->ksnr_connecting;
}
@@ -1006,7 +1027,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
int
ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
- struct socket *sock, int type)
+ struct socket *sock, int type)
{
rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
LIST_HEAD(zombies);
@@ -1026,12 +1047,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
int active;
char *warn = NULL;
- active = (route != NULL);
+ active = !!route;
LASSERT(active == (type != SOCKLND_CONN_NONE));
LIBCFS_ALLOC(conn, sizeof(*conn));
- if (conn == NULL) {
+ if (!conn) {
rc = -ENOMEM;
goto failed_0;
}
@@ -1039,8 +1060,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
conn->ksnc_peer = NULL;
conn->ksnc_route = NULL;
conn->ksnc_sock = sock;
- /* 2 ref, 1 for conn, another extra ref prevents socket
- * being closed before establishment of connection */
+ /*
+ * 2 ref, 1 for conn, another extra ref prevents socket
+ * being closed before establishment of connection
+ */
atomic_set(&conn->ksnc_sock_refcount, 2);
conn->ksnc_type = type;
ksocknal_lib_save_callback(sock, conn);
@@ -1057,21 +1080,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
LIBCFS_ALLOC(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
- if (hello == NULL) {
+ if (!hello) {
rc = -ENOMEM;
goto failed_1;
}
/* stash conn's local and remote addrs */
rc = ksocknal_lib_get_conn_addrs(conn);
- if (rc != 0)
+ if (rc)
goto failed_1;
- /* Find out/confirm peer's NID and connection type and get the
+ /*
+ * Find out/confirm peer's NID and connection type and get the
* vector of interfaces she's willing to let me connect to.
* Passive connections use the listener timeout since the peer sends
- * eagerly */
-
+ * eagerly
+ */
if (active) {
peer = route->ksnr_peer;
LASSERT(ni == peer->ksnp_ni);
@@ -1084,7 +1108,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
conn->ksnc_proto = peer->ksnp_proto;
write_unlock_bh(global_lock);
- if (conn->ksnc_proto == NULL) {
+ if (!conn->ksnc_proto) {
conn->ksnc_proto = &ksocknal_protocol_v3x;
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 2)
@@ -1095,7 +1119,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
}
rc = ksocknal_send_hello(ni, conn, peerid.nid, hello);
- if (rc != 0)
+ if (rc)
goto failed_1;
} else {
peerid.nid = LNET_NID_ANY;
@@ -1109,8 +1133,8 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
if (rc < 0)
goto failed_1;
- LASSERT(rc == 0 || active);
- LASSERT(conn->ksnc_proto != NULL);
+ LASSERT(!rc || active);
+ LASSERT(conn->ksnc_proto);
LASSERT(peerid.nid != LNET_NID_ANY);
cpt = lnet_cpt_of_nid(peerid.nid);
@@ -1120,20 +1144,22 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_lock_bh(global_lock);
} else {
rc = ksocknal_create_peer(&peer, ni, peerid);
- if (rc != 0)
+ if (rc)
goto failed_1;
write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
- LASSERT(((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
+ LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
peer2 = ksocknal_find_peer_locked(ni, peerid);
- if (peer2 == NULL) {
- /* NB this puts an "empty" peer in the peer
- * table (which takes my ref) */
+ if (!peer2) {
+ /*
+ * NB this puts an "empty" peer in the peer
+ * table (which takes my ref)
+ */
list_add_tail(&peer->ksnp_list,
- ksocknal_nid2peerlist(peerid.nid));
+ ksocknal_nid2peerlist(peerid.nid));
} else {
ksocknal_peer_decref(peer);
peer = peer2;
@@ -1143,8 +1169,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
ksocknal_peer_addref(peer);
peer->ksnp_accepting++;
- /* Am I already connecting to this guy? Resolve in
- * favour of higher NID... */
+ /*
+ * Am I already connecting to this guy? Resolve in
+ * favour of higher NID...
+ */
if (peerid.nid < ni->ni_nid &&
ksocknal_connecting(peer, conn->ksnc_ipaddr)) {
rc = EALREADY;
@@ -1161,8 +1189,9 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
goto failed_2;
}
- if (peer->ksnp_proto == NULL) {
- /* Never connected before.
+ if (!peer->ksnp_proto) {
+ /*
+ * Never connected before.
* NB recv_hello may have returned EPROTO to signal my peer
* wants a different protocol than the one I asked for.
*/
@@ -1198,8 +1227,10 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
goto failed_2;
}
- /* Refuse to duplicate an existing connection, unless this is a
- * loopback connection */
+ /*
+ * Refuse to duplicate an existing connection, unless this is a
+ * loopback connection
+ */
if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
list_for_each(tmp, &peer->ksnp_conns) {
conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
@@ -1209,9 +1240,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
conn2->ksnc_type != conn->ksnc_type)
continue;
- /* Reply on a passive connection attempt so the peer
- * realises we're connected. */
- LASSERT(rc == 0);
+ /*
+ * Reply on a passive connection attempt so the peer
+ * realises we're connected.
+ */
+ LASSERT(!rc);
if (!active)
rc = EALREADY;
@@ -1220,9 +1253,11 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
}
}
- /* If the connection created by this route didn't bind to the IP
+ /*
+ * If the connection created by this route didn't bind to the IP
* address the route connected to, the connection/route matching
- * code below probably isn't going to work. */
+ * code below probably isn't going to work.
+ */
if (active &&
route->ksnr_ipaddr != conn->ksnc_ipaddr) {
CERROR("Route %s %pI4h connected to %pI4h\n",
@@ -1231,10 +1266,12 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
&conn->ksnc_ipaddr);
}
- /* Search for a route corresponding to the new connection and
+ /*
+ * Search for a route corresponding to the new connection and
* create an association. This allows incoming connections created
* by routes in my peer to match my own route entries so I don't
- * continually create duplicate routes. */
+ * continually create duplicate routes.
+ */
list_for_each(tmp, &peer->ksnp_routes) {
route = list_entry(tmp, ksock_route_t, ksnr_list);
@@ -1278,14 +1315,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_unlock_bh(global_lock);
- /* We've now got a new connection. Any errors from here on are just
+ /*
+ * We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
* NB (a) we still have to send the reply HELLO for passive
* connections,
* (b) normal I/O on the conn is blocked until I setup and call the
* socket callbacks.
*/
-
CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d incarnation:%lld sched[%d:%d]\n",
libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
&conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
@@ -1305,12 +1342,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
- /* setup the socket AFTER I've received hello (it disables
+ /*
+ * setup the socket AFTER I've received hello (it disables
* SO_LINGER). I might call back to the acceptor who may want
* to send a protocol version response and then close the
* socket; this ensures the socket only tears down after the
- * response has been sent. */
- if (rc == 0)
+ * response has been sent.
+ */
+ if (!rc)
rc = ksocknal_lib_setup_sock(sock);
write_lock_bh(global_lock);
@@ -1323,14 +1362,14 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_unlock_bh(global_lock);
- if (rc != 0) {
+ if (rc) {
write_lock_bh(global_lock);
if (!conn->ksnc_closing) {
/* could be closed by another thread */
ksocknal_close_conn_locked(conn, rc);
}
write_unlock_bh(global_lock);
- } else if (ksocknal_connsock_addref(conn) == 0) {
+ } else if (!ksocknal_connsock_addref(conn)) {
/* Allow I/O to proceed. */
ksocknal_read_callback(conn);
ksocknal_write_callback(conn);
@@ -1352,19 +1391,21 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
write_unlock_bh(global_lock);
- if (warn != NULL) {
+ if (warn) {
if (rc < 0)
CERROR("Not creating conn %s type %d: %s\n",
libcfs_id2str(peerid), conn->ksnc_type, warn);
else
CDEBUG(D_NET, "Not creating conn %s type %d: %s\n",
- libcfs_id2str(peerid), conn->ksnc_type, warn);
+ libcfs_id2str(peerid), conn->ksnc_type, warn);
}
if (!active) {
if (rc > 0) {
- /* Request retry by replying with CONN_NONE
- * ksnc_proto has been set already */
+ /*
+ * Request retry by replying with CONN_NONE
+ * ksnc_proto has been set already
+ */
conn->ksnc_type = SOCKLND_CONN_NONE;
hello->kshm_nips = 0;
ksocknal_send_hello(ni, conn, peerid.nid, hello);
@@ -1379,7 +1420,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
ksocknal_peer_decref(peer);
failed_1:
- if (hello != NULL)
+ if (hello)
LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t,
kshm_ips[LNET_MAX_INTERFACES]));
@@ -1393,15 +1434,17 @@ failed_0:
void
ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
{
- /* This just does the immmediate housekeeping, and queues the
+ /*
+ * This just does the immmediate housekeeping, and queues the
* connection for the reaper to terminate.
- * Caller holds ksnd_global_lock exclusively in irq context */
+ * Caller holds ksnd_global_lock exclusively in irq context
+ */
ksock_peer_t *peer = conn->ksnc_peer;
ksock_route_t *route;
ksock_conn_t *conn2;
struct list_head *tmp;
- LASSERT(peer->ksnp_error == 0);
+ LASSERT(!peer->ksnp_error);
LASSERT(!conn->ksnc_closing);
conn->ksnc_closing = 1;
@@ -1409,10 +1452,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
list_del(&conn->ksnc_list);
route = conn->ksnc_route;
- if (route != NULL) {
+ if (route) {
/* dissociate conn from route... */
LASSERT(!route->ksnr_deleted);
- LASSERT((route->ksnr_connected & (1 << conn->ksnc_type)) != 0);
+ LASSERT(route->ksnr_connected & (1 << conn->ksnc_type));
conn2 = NULL;
list_for_each(tmp, &peer->ksnp_conns) {
@@ -1424,7 +1467,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
conn2 = NULL;
}
- if (conn2 == NULL)
+ if (!conn2)
route->ksnr_connected &= ~(1 << conn->ksnc_type);
conn->ksnc_route = NULL;
@@ -1445,15 +1488,17 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
- /* throw them to the last connection...,
- * these TXs will be send to /dev/null by scheduler */
+ /*
+ * throw them to the last connection...,
+ * these TXs will be send to /dev/null by scheduler
+ */
list_for_each_entry(tx, &peer->ksnp_tx_queue,
- tx_list)
+ tx_list)
ksocknal_tx_prep(conn, tx);
spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
@@ -1461,8 +1506,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
peer->ksnp_error = error; /* stash last conn close reason */
if (list_empty(&peer->ksnp_routes)) {
- /* I've just closed last conn belonging to a
- * peer with no routes to it */
+ /*
+ * I've just closed last conn belonging to a
+ * peer with no routes to it
+ */
ksocknal_unlink_peer_locked(peer);
}
}
@@ -1470,7 +1517,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
list_add_tail(&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
+ &ksocknal_data.ksnd_deathrow_conns);
wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -1482,16 +1529,17 @@ ksocknal_peer_failed(ksock_peer_t *peer)
int notify = 0;
unsigned long last_alive = 0;
- /* There has been a connection failure or comms error; but I'll only
+ /*
+ * There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer is dead if it's to another kernel and
- * there are no connections or connection attempts in existence. */
-
+ * there are no connections or connection attempts in existence.
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
- if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
+ if (!(peer->ksnp_id.pid & LNET_PID_USERFLAG) &&
list_empty(&peer->ksnp_conns) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
+ !peer->ksnp_accepting &&
+ !ksocknal_find_connecting_route_locked(peer)) {
notify = 1;
last_alive = peer->ksnp_last_alive;
}
@@ -1500,7 +1548,7 @@ ksocknal_peer_failed(ksock_peer_t *peer)
if (notify)
lnet_notify(peer->ksnp_ni, peer->ksnp_id.nid, 0,
- last_alive);
+ last_alive);
}
void
@@ -1508,12 +1556,15 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
{
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
+ ksock_tx_t *temp;
ksock_tx_t *tmp;
LIST_HEAD(zlist);
- /* NB safe to finalize TXs because closing of socket will
- * abort all buffered data */
- LASSERT(conn->ksnc_sock == NULL);
+ /*
+ * NB safe to finalize TXs because closing of socket will
+ * abort all buffered data
+ */
+ LASSERT(!conn->ksnc_sock);
spin_lock(&peer->ksnp_lock);
@@ -1521,7 +1572,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
if (tx->tx_conn != conn)
continue;
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0);
+ LASSERT(tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[0] = 0;
tx->tx_zc_aborted = 1; /* mark it as not-acked */
@@ -1531,9 +1582,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
spin_unlock(&peer->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
-
+ list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
@@ -1542,10 +1591,12 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
void
ksocknal_terminate_conn(ksock_conn_t *conn)
{
- /* This gets called by the reaper (guaranteed thread context) to
+ /*
+ * This gets called by the reaper (guaranteed thread context) to
* disengage the socket from its callbacks and close it.
* ksnc_refcount will eventually hit zero, and then the reaper will
- * destroy it. */
+ * destroy it.
+ */
ksock_peer_t *peer = conn->ksnc_peer;
ksock_sched_t *sched = conn->ksnc_scheduler;
int failed = 0;
@@ -1561,7 +1612,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
if (!conn->ksnc_tx_scheduled &&
!list_empty(&conn->ksnc_tx_queue)) {
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
@@ -1576,11 +1627,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
- /* OK, so this conn may not be completely disengaged from its
- * scheduler yet, but it _has_ committed to terminate... */
+ /*
+ * OK, so this conn may not be completely disengaged from its
+ * scheduler yet, but it _has_ committed to terminate...
+ */
conn->ksnc_scheduler->kss_nconns--;
- if (peer->ksnp_error != 0) {
+ if (peer->ksnp_error) {
/* peer's last conn closed in error */
LASSERT(list_empty(&peer->ksnp_conns));
failed = 1;
@@ -1592,11 +1645,13 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
if (failed)
ksocknal_peer_failed(peer);
- /* The socket is closed on the final put; either here, or in
+ /*
+ * The socket is closed on the final put; either here, or in
* ksocknal_{send,recv}msg(). Since we set up the linger2 option
* when the connection was established, this will close the socket
* immediately, aborting anything buffered in it. Any hung
- * zero-copy transmits will therefore complete in finite time. */
+ * zero-copy transmits will therefore complete in finite time.
+ */
ksocknal_connsock_decref(conn);
}
@@ -1605,7 +1660,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn)
{
/* Queue the conn for the reaper to destroy */
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
+ LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
@@ -1622,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
/* Final coup-de-grace of the reaper */
CDEBUG(D_NET, "connection %p\n", conn);
- LASSERT(atomic_read(&conn->ksnc_conn_refcount) == 0);
- LASSERT(atomic_read(&conn->ksnc_sock_refcount) == 0);
- LASSERT(conn->ksnc_sock == NULL);
- LASSERT(conn->ksnc_route == NULL);
+ LASSERT(!atomic_read(&conn->ksnc_conn_refcount));
+ LASSERT(!atomic_read(&conn->ksnc_sock_refcount));
+ LASSERT(!conn->ksnc_sock);
+ LASSERT(!conn->ksnc_route);
LASSERT(!conn->ksnc_tx_scheduled);
LASSERT(!conn->ksnc_rx_scheduled);
LASSERT(list_empty(&conn->ksnc_tx_queue));
@@ -1642,7 +1697,7 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
last_rcv)));
lnet_finalize(conn->ksnc_peer->ksnp_ni,
- conn->ksnc_cookie, -EIO);
+ conn->ksnc_cookie, -EIO);
break;
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
@@ -1685,8 +1740,7 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
- if (ipaddr == 0 ||
- conn->ksnc_ipaddr == ipaddr) {
+ if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
count++;
ksocknal_close_conn_locked(conn, why);
}
@@ -1724,17 +1778,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- if (id.nid != LNET_NID_ANY)
- lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
- else {
+ if (id.nid != LNET_NID_ANY) {
+ lo = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
+ } else {
lo = 0;
hi = ksocknal_data.ksnd_peer_hash_size - 1;
}
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt,
- &ksocknal_data.ksnd_peers[i]) {
-
+ &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
@@ -1748,10 +1802,10 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
- if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
+ if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || !ipaddr)
return 0;
- if (count == 0)
+ if (!count)
return -ENOENT;
else
return 0;
@@ -1760,15 +1814,17 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
void
ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
{
- /* The router is telling me she's been notified of a change in
- * gateway state.... */
+ /*
+ * The router is telling me she's been notified of a change in
+ * gateway state....
+ */
lnet_process_id_t id = {0};
id.nid = gw_nid;
id.pid = LNET_PID_ANY;
CDEBUG(D_NET, "gw %s %s\n", libcfs_nid2str(gw_nid),
- alive ? "up" : "down");
+ alive ? "up" : "down");
if (!alive) {
/* If the gateway crashed, close all open connections... */
@@ -1776,8 +1832,10 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive)
return;
}
- /* ...otherwise do nothing. We can only establish new connections
- * if we have autroutes, and these connect on demand. */
+ /*
+ * ...otherwise do nothing. We can only establish new connections
+ * if we have autroutes, and these connect on demand.
+ */
}
void
@@ -1788,12 +1846,15 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
unsigned long now = cfs_time_current();
ksock_peer_t *peer = NULL;
rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
- lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
+ lnet_process_id_t id = {
+ .nid = nid,
+ .pid = LNET_PID_LUSTRE,
+ };
read_lock(glock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) {
+ if (peer) {
struct list_head *tmp;
ksock_conn_t *conn;
int bufnob;
@@ -1812,13 +1873,13 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
}
last_alive = peer->ksnp_last_alive;
- if (ksocknal_find_connectable_route_locked(peer) == NULL)
+ if (!ksocknal_find_connectable_route_locked(peer))
connect = 0;
}
read_unlock(glock);
- if (last_alive != 0)
+ if (last_alive)
*when = last_alive;
CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago, connect %d\n",
@@ -1834,7 +1895,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
write_lock_bh(glock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL)
+ if (peer)
ksocknal_launch_all_connections_locked(peer);
write_unlock_bh(glock);
@@ -1857,7 +1918,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
list_for_each(tmp, &peer->ksnp_conns) {
if (i++ == index) {
conn = list_entry(tmp, ksock_conn_t,
- ksnc_list);
+ ksnc_list);
ksocknal_conn_addref(conn);
break;
}
@@ -1865,7 +1926,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (conn == NULL)
+ if (!conn)
break;
ksocknal_lib_push_conn(conn);
@@ -1885,7 +1946,8 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
start = &ksocknal_data.ksnd_peers[0];
end = &ksocknal_data.ksnd_peers[hsize - 1];
} else {
- start = end = ksocknal_nid2peerlist(id.nid);
+ start = ksocknal_nid2peerlist(id.nid);
+ end = ksocknal_nid2peerlist(id.nid);
}
for (tmp = start; tmp <= end; tmp++) {
@@ -1910,7 +1972,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- if (i == 0) /* no match */
+ if (!i) /* no match */
break;
rc = 0;
@@ -1934,14 +1996,13 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
struct list_head *rtmp;
ksock_route_t *route;
- if (ipaddress == 0 ||
- netmask == 0)
+ if (!ipaddress || !netmask)
return -EINVAL;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
iface = ksocknal_ip2iface(ni, ipaddress);
- if (iface != NULL) {
+ if (iface) {
/* silently ignore dups */
rc = 0;
} else if (net->ksnn_ninterfaces == LNET_MAX_INTERFACES) {
@@ -1957,16 +2018,15 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
peer = list_entry(ptmp, ksock_peer_t,
- ksnp_list);
+ ksnp_list);
for (j = 0; j < peer->ksnp_n_passive_ips; j++)
if (peer->ksnp_passive_ips[j] == ipaddress)
iface->ksni_npeers++;
list_for_each(rtmp, &peer->ksnp_routes) {
- route = list_entry(rtmp,
- ksock_route_t,
- ksnr_list);
+ route = list_entry(rtmp, ksock_route_t,
+ ksnr_list);
if (route->ksnr_myipaddr == ipaddress)
iface->ksni_nroutes++;
@@ -1995,8 +2055,8 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
for (i = 0; i < peer->ksnp_n_passive_ips; i++)
if (peer->ksnp_passive_ips[i] == ipaddr) {
- for (j = i+1; j < peer->ksnp_n_passive_ips; j++)
- peer->ksnp_passive_ips[j-1] =
+ for (j = i + 1; j < peer->ksnp_n_passive_ips; j++)
+ peer->ksnp_passive_ips[j - 1] =
peer->ksnp_passive_ips[j];
peer->ksnp_n_passive_ips--;
break;
@@ -2008,7 +2068,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
if (route->ksnr_myipaddr != ipaddr)
continue;
- if (route->ksnr_share_count != 0) {
+ if (route->ksnr_share_count) {
/* Manually created; keep, but unbind */
route->ksnr_myipaddr = 0;
} else {
@@ -2041,23 +2101,21 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
for (i = 0; i < net->ksnn_ninterfaces; i++) {
this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
- if (!(ipaddress == 0 ||
- ipaddress == this_ip))
+ if (!(!ipaddress || ipaddress == this_ip))
continue;
rc = 0;
- for (j = i+1; j < net->ksnn_ninterfaces; j++)
- net->ksnn_interfaces[j-1] =
+ for (j = i + 1; j < net->ksnn_ninterfaces; j++)
+ net->ksnn_interfaces[j - 1] =
net->ksnn_interfaces[j];
net->ksnn_ninterfaces--;
for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
list_for_each_safe(tmp, nxt,
- &ksocknal_data.ksnd_peers[j]) {
- peer = list_entry(tmp, ksock_peer_t,
- ksnp_list);
+ &ksocknal_data.ksnd_peers[j]) {
+ peer = list_entry(tmp, ksock_peer_t, ksnp_list);
if (peer->ksnp_ni != ni)
continue;
@@ -2121,7 +2179,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
rc = ksocknal_get_peer_info(ni, data->ioc_count,
&id, &myip, &ip, &port,
&conn_count, &share_count);
- if (rc != 0)
+ if (rc)
return rc;
data->ioc_nid = id.nid;
@@ -2136,7 +2194,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
case IOC_LIBCFS_ADD_PEER:
id.nid = data->ioc_nid;
- id.pid = LUSTRE_SRV_LNET_PID;
+ id.pid = LNET_PID_LUSTRE;
return ksocknal_add_peer(ni, id,
data->ioc_u32[0], /* IP */
data->ioc_u32[1]); /* port */
@@ -2153,7 +2211,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
int nagle;
ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
- if (conn == NULL)
+ if (!conn)
return -ENOENT;
ksocknal_lib_get_conn_tunables(conn, &txmem, &rxmem, &nagle);
@@ -2202,14 +2260,14 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
static void
ksocknal_free_buffers(void)
{
- LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0);
+ LASSERT(!atomic_read(&ksocknal_data.ksnd_nactive_txs));
- if (ksocknal_data.ksnd_sched_info != NULL) {
+ if (ksocknal_data.ksnd_sched_info) {
struct ksock_sched_info *info;
int i;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds != NULL) {
+ if (info->ksi_scheds) {
LIBCFS_FREE(info->ksi_scheds,
info->ksi_nthreads_max *
sizeof(info->ksi_scheds[0]));
@@ -2219,21 +2277,21 @@ ksocknal_free_buffers(void)
}
LIBCFS_FREE(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
+ sizeof(struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
spin_lock(&ksocknal_data.ksnd_tx_lock);
if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
struct list_head zlist;
ksock_tx_t *tx;
+ ksock_tx_t *temp;
list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_list);
+ list_for_each_entry_safe(tx, temp, &zlist, tx_list) {
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
@@ -2250,7 +2308,7 @@ ksocknal_base_shutdown(void)
int i;
int j;
- LASSERT(ksocknal_data.ksnd_nnets == 0);
+ LASSERT(!ksocknal_data.ksnd_nnets);
switch (ksocknal_data.ksnd_init) {
default:
@@ -2258,7 +2316,7 @@ ksocknal_base_shutdown(void)
case SOCKNAL_INIT_ALL:
case SOCKNAL_INIT_DATA:
- LASSERT(ksocknal_data.ksnd_peers != NULL);
+ LASSERT(ksocknal_data.ksnd_peers);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
LASSERT(list_empty(&ksocknal_data.ksnd_peers[i]));
@@ -2268,14 +2326,13 @@ ksocknal_base_shutdown(void)
LASSERT(list_empty(&ksocknal_data.ksnd_connd_connreqs));
LASSERT(list_empty(&ksocknal_data.ksnd_connd_routes));
- if (ksocknal_data.ksnd_sched_info != NULL) {
+ if (ksocknal_data.ksnd_sched_info) {
cfs_percpt_for_each(info, i,
ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
+ if (!info->ksi_scheds)
continue;
for (j = 0; j < info->ksi_nthreads_max; j++) {
-
sched = &info->ksi_scheds[j];
LASSERT(list_empty(
&sched->kss_tx_conns));
@@ -2283,7 +2340,7 @@ ksocknal_base_shutdown(void)
&sched->kss_rx_conns));
LASSERT(list_empty(
&sched->kss_zombie_noop_txs));
- LASSERT(sched->kss_nconns == 0);
+ LASSERT(!sched->kss_nconns);
}
}
}
@@ -2293,10 +2350,10 @@ ksocknal_base_shutdown(void)
wake_up_all(&ksocknal_data.ksnd_connd_waitq);
wake_up_all(&ksocknal_data.ksnd_reaper_waitq);
- if (ksocknal_data.ksnd_sched_info != NULL) {
+ if (ksocknal_data.ksnd_sched_info) {
cfs_percpt_for_each(info, i,
ksocknal_data.ksnd_sched_info) {
- if (info->ksi_scheds == NULL)
+ if (!info->ksi_scheds)
continue;
for (j = 0; j < info->ksi_nthreads_max; j++) {
@@ -2308,7 +2365,7 @@ ksocknal_base_shutdown(void)
i = 4;
read_lock(&ksocknal_data.ksnd_global_lock);
- while (ksocknal_data.ksnd_nthreads != 0) {
+ while (ksocknal_data.ksnd_nthreads) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d threads to terminate\n",
@@ -2332,7 +2389,6 @@ ksocknal_base_shutdown(void)
static __u64
ksocknal_new_incarnation(void)
{
-
/* The incarnation number is the time this module loaded and it
* identifies this particular instance of the socknal.
*/
@@ -2347,15 +2403,15 @@ ksocknal_base_startup(void)
int i;
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING);
- LASSERT(ksocknal_data.ksnd_nnets == 0);
+ LASSERT(!ksocknal_data.ksnd_nnets);
memset(&ksocknal_data, 0, sizeof(ksocknal_data)); /* zero pointers */
ksocknal_data.ksnd_peer_hash_size = SOCKNAL_PEER_HASH_SIZE;
LIBCFS_ALLOC(ksocknal_data.ksnd_peers,
- sizeof(struct list_head) *
- ksocknal_data.ksnd_peer_hash_size);
- if (ksocknal_data.ksnd_peers == NULL)
+ sizeof(struct list_head) *
+ ksocknal_data.ksnd_peer_hash_size);
+ if (!ksocknal_data.ksnd_peers)
return -ENOMEM;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
@@ -2386,7 +2442,7 @@ ksocknal_base_startup(void)
ksocknal_data.ksnd_sched_info = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(*info));
- if (ksocknal_data.ksnd_sched_info == NULL)
+ if (!ksocknal_data.ksnd_sched_info)
goto failed;
cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
@@ -2397,8 +2453,10 @@ ksocknal_base_startup(void)
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = min(nthrs, *ksocknal_tunables.ksnd_nscheds);
} else {
- /* max to half of CPUs, assume another half should be
- * reserved for upper layer modules */
+ /*
+ * max to half of CPUs, assume another half should be
+ * reserved for upper layer modules
+ */
nthrs = min(max(SOCKNAL_NSCHEDS, nthrs >> 1), nthrs);
}
@@ -2407,7 +2465,7 @@ ksocknal_base_startup(void)
LIBCFS_CPT_ALLOC(info->ksi_scheds, lnet_cpt_table(), i,
info->ksi_nthreads_max * sizeof(*sched));
- if (info->ksi_scheds == NULL)
+ if (!info->ksi_scheds)
goto failed;
for (; nthrs > 0; nthrs--) {
@@ -2425,8 +2483,10 @@ ksocknal_base_startup(void)
ksocknal_data.ksnd_connd_starting = 0;
ksocknal_data.ksnd_connd_failed_stamp = 0;
ksocknal_data.ksnd_connd_starting_stamp = ktime_get_real_seconds();
- /* must have at least 2 connds to remain responsive to accepts while
- * connecting */
+ /*
+ * must have at least 2 connds to remain responsive to accepts while
+ * connecting
+ */
if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1)
*ksocknal_tunables.ksnd_nconnds = SOCKNAL_CONND_RESV + 1;
@@ -2446,7 +2506,7 @@ ksocknal_base_startup(void)
snprintf(name, sizeof(name), "socknal_cd%02d", i);
rc = ksocknal_thread_start(ksocknal_connd,
(void *)((ulong_ptr_t)i), name);
- if (rc != 0) {
+ if (rc) {
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
ksocknal_data.ksnd_connd_starting--;
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
@@ -2456,7 +2516,7 @@ ksocknal_base_startup(void)
}
rc = ksocknal_thread_start(ksocknal_reaper, NULL, "socknal_reaper");
- if (rc != 0) {
+ if (rc) {
CERROR("Can't spawn socknal reaper: %d\n", rc);
goto failed;
}
@@ -2491,7 +2551,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
}
- if (peer != NULL) {
+ if (peer) {
ksock_route_t *route;
ksock_conn_t *conn;
@@ -2515,9 +2575,9 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
list_for_each(tmp, &peer->ksnp_conns) {
conn = list_entry(tmp, ksock_conn_t, ksnc_list);
CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
- atomic_read(&conn->ksnc_conn_refcount),
- atomic_read(&conn->ksnc_sock_refcount),
- conn->ksnc_type, conn->ksnc_closing);
+ atomic_read(&conn->ksnc_conn_refcount),
+ atomic_read(&conn->ksnc_sock_refcount),
+ conn->ksnc_type, conn->ksnc_closing);
}
}
@@ -2548,7 +2608,7 @@ ksocknal_shutdown(lnet_ni_t *ni)
/* Wait for all peer state to clean up */
i = 2;
spin_lock_bh(&net->ksnn_lock);
- while (net->ksnn_npeers != 0) {
+ while (net->ksnn_npeers) {
spin_unlock_bh(&net->ksnn_lock);
i++;
@@ -2565,15 +2625,15 @@ ksocknal_shutdown(lnet_ni_t *ni)
spin_unlock_bh(&net->ksnn_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
- LASSERT(net->ksnn_interfaces[i].ksni_npeers == 0);
- LASSERT(net->ksnn_interfaces[i].ksni_nroutes == 0);
+ LASSERT(!net->ksnn_interfaces[i].ksni_npeers);
+ LASSERT(!net->ksnn_interfaces[i].ksni_nroutes);
}
list_del(&net->ksnn_list);
LIBCFS_FREE(net, sizeof(*net));
ksocknal_data.ksnd_nnets--;
- if (ksocknal_data.ksnd_nnets == 0)
+ if (!ksocknal_data.ksnd_nnets)
ksocknal_base_shutdown();
}
@@ -2601,7 +2661,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
continue;
rc = lnet_ipif_query(names[i], &up, &ip, &mask);
- if (rc != 0) {
+ if (rc) {
CWARN("Can't get interface %s info: %d\n",
names[i], rc);
continue;
@@ -2628,7 +2688,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
lnet_ipif_free_enumeration(names, n);
- if (j == 0)
+ if (!j)
CERROR("Can't find any usable interfaces\n");
return j;
@@ -2647,21 +2707,20 @@ ksocknal_search_new_ipif(ksock_net_t *net)
ksock_net_t *tmp;
int j;
- if (colon != NULL) /* ignore alias device */
+ if (colon) /* ignore alias device */
*colon = 0;
- list_for_each_entry(tmp, &ksocknal_data.ksnd_nets,
- ksnn_list) {
+ list_for_each_entry(tmp, &ksocknal_data.ksnd_nets, ksnn_list) {
for (j = 0; !found && j < tmp->ksnn_ninterfaces; j++) {
char *ifnam2 =
&tmp->ksnn_interfaces[j].ksni_name[0];
char *colon2 = strchr(ifnam2, ':');
- if (colon2 != NULL)
+ if (colon2)
*colon2 = 0;
- found = strcmp(ifnam, ifnam2) == 0;
- if (colon2 != NULL)
+ found = !strcmp(ifnam, ifnam2);
+ if (colon2)
*colon2 = ':';
}
if (found)
@@ -2669,7 +2728,7 @@ ksocknal_search_new_ipif(ksock_net_t *net)
}
new_ipif += !found;
- if (colon != NULL)
+ if (colon)
*colon = ':';
}
@@ -2683,7 +2742,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
int rc = 0;
int i;
- if (info->ksi_nthreads == 0) {
+ if (!info->ksi_nthreads) {
if (*ksocknal_tunables.ksnd_nscheds > 0) {
nthrs = info->ksi_nthreads_max;
} else {
@@ -2711,7 +2770,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
rc = ksocknal_thread_start(ksocknal_scheduler,
(void *)id, name);
- if (rc == 0)
+ if (!rc)
continue;
CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
@@ -2734,7 +2793,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
for (i = 0; i < ncpts; i++) {
struct ksock_sched_info *info;
- int cpt = (cpts == NULL) ? i : cpts[i];
+ int cpt = !cpts ? i : cpts[i];
LASSERT(cpt < cfs_cpt_number(lnet_cpt_table()));
info = ksocknal_data.ksnd_sched_info[cpt];
@@ -2743,7 +2802,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
continue;
rc = ksocknal_start_schedulers(info);
- if (rc != 0)
+ if (rc)
return rc;
}
return 0;
@@ -2760,12 +2819,12 @@ ksocknal_startup(lnet_ni_t *ni)
if (ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING) {
rc = ksocknal_base_startup();
- if (rc != 0)
+ if (rc)
return rc;
}
LIBCFS_ALLOC(net, sizeof(*net));
- if (net == NULL)
+ if (!net)
goto fail_0;
spin_lock_init(&net->ksnn_lock);
@@ -2776,7 +2835,7 @@ ksocknal_startup(lnet_ni_t *ni)
ni->ni_peertxcredits = *ksocknal_tunables.ksnd_peertxcredits;
ni->ni_peerrtrcredits = *ksocknal_tunables.ksnd_peerrtrcredits;
- if (ni->ni_interfaces[0] == NULL) {
+ if (!ni->ni_interfaces[0]) {
rc = ksocknal_enumerate_interfaces(net);
if (rc <= 0)
goto fail_1;
@@ -2786,14 +2845,14 @@ ksocknal_startup(lnet_ni_t *ni)
for (i = 0; i < LNET_MAX_INTERFACES; i++) {
int up;
- if (ni->ni_interfaces[i] == NULL)
+ if (!ni->ni_interfaces[i])
break;
rc = lnet_ipif_query(ni->ni_interfaces[i], &up,
- &net->ksnn_interfaces[i].ksni_ipaddr,
- &net->ksnn_interfaces[i].ksni_netmask);
+ &net->ksnn_interfaces[i].ksni_ipaddr,
+ &net->ksnn_interfaces[i].ksni_netmask);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't get interface %s info: %d\n",
ni->ni_interfaces[i], rc);
goto fail_1;
@@ -2814,7 +2873,7 @@ ksocknal_startup(lnet_ni_t *ni)
/* call it before add it to ksocknal_data.ksnd_nets */
rc = ksocknal_net_start_threads(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0)
+ if (rc)
goto fail_1;
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
@@ -2828,20 +2887,18 @@ ksocknal_startup(lnet_ni_t *ni)
fail_1:
LIBCFS_FREE(net, sizeof(*net));
fail_0:
- if (ksocknal_data.ksnd_nnets == 0)
+ if (!ksocknal_data.ksnd_nnets)
ksocknal_base_shutdown();
return -ENETDOWN;
}
-static void __exit
-ksocknal_module_fini(void)
+static void __exit ksocklnd_exit(void)
{
lnet_unregister_lnd(&the_ksocklnd);
}
-static int __init
-ksocknal_module_init(void)
+static int __init ksocklnd_init(void)
{
int rc;
@@ -2861,7 +2918,7 @@ ksocknal_module_init(void)
the_ksocklnd.lnd_accept = ksocknal_accept;
rc = ksocknal_tunables_init();
- if (rc != 0)
+ if (rc)
return rc;
lnet_register_lnd(&the_ksocklnd);
@@ -2870,9 +2927,9 @@ ksocknal_module_init(void)
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
+MODULE_DESCRIPTION("TCP Socket LNet Network Driver");
+MODULE_VERSION("2.7.0");
MODULE_LICENSE("GPL");
-MODULE_VERSION("3.0.0");
-module_init(ksocknal_module_init);
-module_exit(ksocknal_module_fini);
+module_init(ksocklnd_init);
+module_exit(ksocklnd_exit);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
index f4fa72550..a60d72f94 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
@@ -19,10 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#ifndef _SOCKLND_SOCKLND_H_
@@ -69,8 +65,10 @@
#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */
-/* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
- * no risk if we're not running on a CONFIG_HIGHMEM platform. */
+/*
+ * risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled).
+ * no risk if we're not running on a CONFIG_HIGHMEM platform.
+ */
#ifdef CONFIG_HIGHMEM
# define SOCKNAL_RISK_KMAP_DEADLOCK 0
#else
@@ -237,15 +235,16 @@ typedef struct {
#define SOCKNAL_INIT_DATA 1
#define SOCKNAL_INIT_ALL 2
-/* A packet just assembled for transmission is represented by 1 or more
+/*
+ * A packet just assembled for transmission is represented by 1 or more
* struct iovec fragments (the first frag contains the portals header),
* followed by 0 or more lnet_kiov_t fragments.
*
* On the receive side, initially 1 struct iovec fragment is posted for
* receive (the header). Once the header has been received, the payload is
* received into either struct iovec or lnet_kiov_t fragments, depending on
- * what the header matched or whether the message needs forwarding. */
-
+ * what the header matched or whether the message needs forwarding.
+ */
struct ksock_conn; /* forward ref */
struct ksock_peer; /* forward ref */
struct ksock_route; /* forward ref */
@@ -284,12 +283,14 @@ typedef struct /* transmit packet */
} tx_frags;
} ksock_tx_t;
-#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
/* network zero copy callback descriptor embedded in ksock_tx_t */
-/* space for the rx frag descriptors; we either read a single contiguous
- * header, or up to LNET_MAX_IOV frags of payload of either type. */
+/*
+ * space for the rx frag descriptors; we either read a single contiguous
+ * header, or up to LNET_MAX_IOV frags of payload of either type.
+ */
typedef union {
struct kvec iov[LNET_MAX_IOV];
lnet_kiov_t kiov[LNET_MAX_IOV];
@@ -463,11 +464,13 @@ typedef struct ksock_proto {
/* handle ZC ACK */
int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
- /* msg type matches the connection type:
+ /*
+ * msg type matches the connection type:
* return value:
* return MATCH_NO : no
* return MATCH_YES : matching type
- * return MATCH_MAY : can be backup */
+ * return MATCH_MAY : can be backup
+ */
int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
} ksock_proto_t;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 477b385f1..976fd7892 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
@@ -47,10 +44,10 @@ ksocknal_alloc_tx(int type, int size)
spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
- if (tx == NULL)
+ if (!tx)
LIBCFS_ALLOC(tx, size);
- if (tx == NULL)
+ if (!tx)
return NULL;
atomic_set(&tx->tx_refcount, 1);
@@ -70,7 +67,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
ksock_tx_t *tx;
tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate noop tx desc\n");
return NULL;
}
@@ -90,11 +87,11 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
}
void
-ksocknal_free_tx (ksock_tx_t *tx)
+ksocknal_free_tx(ksock_tx_t *tx)
{
atomic_dec(&ksocknal_data.ksnd_nactive_txs);
- if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+ if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
/* it's a noop tx */
spin_lock(&ksocknal_data.ksnd_tx_lock);
@@ -107,7 +104,7 @@ ksocknal_free_tx (ksock_tx_t *tx)
}
static int
-ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
{
struct kvec *iov = tx->tx_iov;
int nob;
@@ -122,7 +119,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
return rc;
nob = rc;
- LASSERT (nob <= tx->tx_resid);
+ LASSERT(nob <= tx->tx_resid);
tx->tx_resid -= nob;
/* "consume" iov */
@@ -138,19 +135,19 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
nob -= iov->iov_len;
tx->tx_iov = ++iov;
tx->tx_niov--;
- } while (nob != 0);
+ } while (nob);
return rc;
}
static int
-ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
{
lnet_kiov_t *kiov = tx->tx_kiov;
int nob;
int rc;
- LASSERT(tx->tx_niov == 0);
+ LASSERT(!tx->tx_niov);
LASSERT(tx->tx_nkiov > 0);
/* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
@@ -160,7 +157,7 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
return rc;
nob = rc;
- LASSERT (nob <= tx->tx_resid);
+ LASSERT(nob <= tx->tx_resid);
tx->tx_resid -= nob;
/* "consume" kiov */
@@ -176,27 +173,27 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
nob -= (int)kiov->kiov_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
- } while (nob != 0);
+ } while (nob);
return rc;
}
static int
-ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
+ if (ksocknal_data.ksnd_stall_tx) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
}
- LASSERT(tx->tx_resid != 0);
+ LASSERT(tx->tx_resid);
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
+ if (rc) {
+ LASSERT(conn->ksnc_closing);
return -ESHUTDOWN;
}
@@ -205,10 +202,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
/* testing... */
ksocknal_data.ksnd_enomem_tx--;
rc = -EAGAIN;
- } else if (tx->tx_niov != 0) {
- rc = ksocknal_send_iov (conn, tx);
+ } else if (tx->tx_niov) {
+ rc = ksocknal_send_iov(conn, tx);
} else {
- rc = ksocknal_send_kiov (conn, tx);
+ rc = ksocknal_send_kiov(conn, tx);
}
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
@@ -216,8 +213,10 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
conn->ksnc_tx_bufnob += rc; /* account it */
if (bufnob < conn->ksnc_tx_bufnob) {
- /* allocated send buffer bytes < computed; infer
- * something got ACKed */
+ /*
+ * allocated send buffer bytes < computed; infer
+ * something got ACKed
+ */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
@@ -227,7 +226,7 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
if (rc <= 0) { /* Didn't write anything? */
- if (rc == 0) /* some stacks return 0 instead of -EAGAIN */
+ if (!rc) /* some stacks return 0 instead of -EAGAIN */
rc = -EAGAIN;
/* Check if EAGAIN is due to memory pressure */
@@ -238,17 +237,17 @@ ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
}
/* socket's wmem_queued now includes 'rc' bytes */
- atomic_sub (rc, &conn->ksnc_tx_nob);
+ atomic_sub(rc, &conn->ksnc_tx_nob);
rc = 0;
- } while (tx->tx_resid != 0);
+ } while (tx->tx_resid);
ksocknal_connsock_decref(conn);
return rc;
}
static int
-ksocknal_recv_iov (ksock_conn_t *conn)
+ksocknal_recv_iov(ksock_conn_t *conn)
{
struct kvec *iov = conn->ksnc_rx_iov;
int nob;
@@ -256,8 +255,10 @@ ksocknal_recv_iov (ksock_conn_t *conn)
LASSERT(conn->ksnc_rx_niov > 0);
- /* Never touch conn->ksnc_rx_iov or change connection
- * status inside ksocknal_lib_recv_iov */
+ /*
+ * Never touch conn->ksnc_rx_iov or change connection
+ * status inside ksocknal_lib_recv_iov
+ */
rc = ksocknal_lib_recv_iov(conn);
if (rc <= 0)
@@ -287,13 +288,13 @@ ksocknal_recv_iov (ksock_conn_t *conn)
nob -= iov->iov_len;
conn->ksnc_rx_iov = ++iov;
conn->ksnc_rx_niov--;
- } while (nob != 0);
+ } while (nob);
return rc;
}
static int
-ksocknal_recv_kiov (ksock_conn_t *conn)
+ksocknal_recv_kiov(ksock_conn_t *conn)
{
lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
int nob;
@@ -301,8 +302,10 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
LASSERT(conn->ksnc_rx_nkiov > 0);
- /* Never touch conn->ksnc_rx_kiov or change connection
- * status inside ksocknal_lib_recv_iov */
+ /*
+ * Never touch conn->ksnc_rx_kiov or change connection
+ * status inside ksocknal_lib_recv_iov
+ */
rc = ksocknal_lib_recv_kiov(conn);
if (rc <= 0)
@@ -332,41 +335,43 @@ ksocknal_recv_kiov (ksock_conn_t *conn)
nob -= kiov->kiov_len;
conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--;
- } while (nob != 0);
+ } while (nob);
return 1;
}
static int
-ksocknal_receive (ksock_conn_t *conn)
+ksocknal_receive(ksock_conn_t *conn)
{
- /* Return 1 on success, 0 on EOF, < 0 on error.
+ /*
+ * Return 1 on success, 0 on EOF, < 0 on error.
* Caller checks ksnc_rx_nob_wanted to determine
- * progress/completion. */
+ * progress/completion.
+ */
int rc;
- if (ksocknal_data.ksnd_stall_rx != 0) {
+ if (ksocknal_data.ksnd_stall_rx) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
}
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
- LASSERT (conn->ksnc_closing);
+ if (rc) {
+ LASSERT(conn->ksnc_closing);
return -ESHUTDOWN;
}
for (;;) {
- if (conn->ksnc_rx_niov != 0)
- rc = ksocknal_recv_iov (conn);
+ if (conn->ksnc_rx_niov)
+ rc = ksocknal_recv_iov(conn);
else
- rc = ksocknal_recv_kiov (conn);
+ rc = ksocknal_recv_kiov(conn);
if (rc <= 0) {
/* error/EOF or partial receive */
if (rc == -EAGAIN) {
rc = 1;
- } else if (rc == 0 && conn->ksnc_rx_started) {
+ } else if (!rc && conn->ksnc_rx_started) {
/* EOF in the middle of a message */
rc = -EPROTO;
}
@@ -375,7 +380,7 @@ ksocknal_receive (ksock_conn_t *conn)
/* Completed a fragment */
- if (conn->ksnc_rx_nob_wanted == 0) {
+ if (!conn->ksnc_rx_nob_wanted) {
rc = 1;
break;
}
@@ -386,36 +391,36 @@ ksocknal_receive (ksock_conn_t *conn)
}
void
-ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
- int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
+ int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
- LASSERT(ni != NULL || tx->tx_conn != NULL);
+ LASSERT(ni || tx->tx_conn);
- if (tx->tx_conn != NULL)
+ if (tx->tx_conn)
ksocknal_conn_decref(tx->tx_conn);
- if (ni == NULL && tx->tx_conn != NULL)
+ if (!ni && tx->tx_conn)
ni = tx->tx_conn->ksnc_peer->ksnp_ni;
- ksocknal_free_tx (tx);
- if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
- lnet_finalize (ni, lnetmsg, rc);
+ ksocknal_free_tx(tx);
+ if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
+ lnet_finalize(ni, lnetmsg, rc);
}
void
-ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error)
+ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
{
ksock_tx_t *tx;
- while (!list_empty (txlist)) {
+ while (!list_empty(txlist)) {
tx = list_entry(txlist->next, ksock_tx_t, tx_list);
- if (error && tx->tx_lnetmsg != NULL) {
+ if (error && tx->tx_lnetmsg) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
- le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type),
- le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length),
+ le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
+ le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
} else if (error) {
@@ -435,12 +440,14 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
ksock_conn_t *conn = tx->tx_conn;
ksock_peer_t *peer = conn->ksnc_peer;
- /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
+ /*
+ * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
* to ksnp_zc_req_list if some fragment of this message should be sent
* zero-copy. Our peer will send an ACK containing this cookie when
* she has received this message to tell us we can signal completion.
* tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
- * ksnp_zc_req_list. */
+ * ksnp_zc_req_list.
+ */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
LASSERT(tx->tx_zc_capable);
@@ -450,9 +457,10 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
!conn->ksnc_zc_capable)
return;
- /* assign cookie and queue tx to pending list, it will be released when
- * a matching ack is received. See ksocknal_handle_zcack() */
-
+ /*
+ * assign cookie and queue tx to pending list, it will be released when
+ * a matching ack is received. See ksocknal_handle_zcack()
+ */
ksocknal_tx_addref(tx);
spin_lock(&peer->ksnp_lock);
@@ -461,11 +469,11 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+ LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
- if (peer->ksnp_zc_next_cookie == 0)
+ if (!peer->ksnp_zc_next_cookie)
peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
@@ -485,7 +493,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
spin_lock(&peer->ksnp_lock);
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ if (!tx->tx_msg.ksm_zc_cookies[0]) {
/* Not waiting for an ACK */
spin_unlock(&peer->ksnp_lock);
return;
@@ -500,20 +508,20 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
}
static int
-ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
{
int rc;
if (tx->tx_zc_capable && !tx->tx_zc_checked)
ksocknal_check_zc_req(tx);
- rc = ksocknal_transmit (conn, tx);
+ rc = ksocknal_transmit(conn, tx);
CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
- if (tx->tx_resid == 0) {
+ if (!tx->tx_resid) {
/* Sent everything OK */
- LASSERT (rc == 0);
+ LASSERT(!rc);
return 0;
}
@@ -532,13 +540,13 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
/* enomem list takes over scheduler's ref... */
- LASSERT (conn->ksnc_tx_scheduled);
+ LASSERT(conn->ksnc_tx_scheduled);
list_add_tail(&conn->ksnc_tx_list,
- &ksocknal_data.ksnd_enomem_conns);
+ &ksocknal_data.ksnd_enomem_conns);
if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
SOCKNAL_ENOMEM_RETRY),
ksocknal_data.ksnd_reaper_waketime))
- wake_up (&ksocknal_data.ksnd_reaper_waitq);
+ wake_up(&ksocknal_data.ksnd_reaper_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
return rc;
@@ -569,21 +577,19 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
ksocknal_uncheck_zc_req(tx);
/* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
+ ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
return rc;
}
static void
-ksocknal_launch_connection_locked (ksock_route_t *route)
+ksocknal_launch_connection_locked(ksock_route_t *route)
{
-
/* called holding write lock on ksnd_global_lock */
LASSERT(!route->ksnr_scheduled);
LASSERT(!route->ksnr_connecting);
- LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0);
+ LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
@@ -591,14 +597,14 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
list_add_tail(&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
+ &ksocknal_data.ksnd_connd_routes);
wake_up(&ksocknal_data.ksnd_connd_waitq);
spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
}
void
-ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
{
ksock_route_t *route;
@@ -606,7 +612,7 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer)
for (;;) {
/* launch any/all connections that need it */
route = ksocknal_find_connectable_route_locked(peer);
- if (route == NULL)
+ if (!route)
return;
ksocknal_launch_connection_locked(route);
@@ -623,15 +629,15 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
int tnob = 0;
int fnob = 0;
- list_for_each (tmp, &peer->ksnp_conns) {
+ list_for_each(tmp, &peer->ksnp_conns) {
ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list);
int nob = atomic_read(&c->ksnc_tx_nob) +
c->ksnc_sock->sk->sk_wmem_queued;
int rc;
LASSERT(!c->ksnc_closing);
- LASSERT(c->ksnc_proto != NULL &&
- c->ksnc_proto->pro_match_tx != NULL);
+ LASSERT(c->ksnc_proto &&
+ c->ksnc_proto->pro_match_tx);
rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
@@ -642,7 +648,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
continue;
case SOCKNAL_MATCH_YES: /* typed connection */
- if (typed == NULL || tnob > nob ||
+ if (!typed || tnob > nob ||
(tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
typed = c;
@@ -651,7 +657,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
break;
case SOCKNAL_MATCH_MAY: /* fallback connection */
- if (fallback == NULL || fnob > nob ||
+ if (!fallback || fnob > nob ||
(fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
fallback = c;
@@ -662,9 +668,9 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
}
/* prefer the typed selection */
- conn = (typed != NULL) ? typed : fallback;
+ conn = (typed) ? typed : fallback;
- if (conn != NULL)
+ if (conn)
conn->ksnc_tx_last_post = cfs_time_current();
return conn;
@@ -675,48 +681,51 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
{
conn->ksnc_proto->pro_pack(tx);
- atomic_add (tx->tx_nob, &conn->ksnc_tx_nob);
+ atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
ksocknal_conn_addref(conn); /* +1 ref for tx */
tx->tx_conn = conn;
}
void
-ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
{
ksock_sched_t *sched = conn->ksnc_scheduler;
ksock_msg_t *msg = &tx->tx_msg;
ksock_tx_t *ztx = NULL;
int bufnob = 0;
- /* called holding global lock (read or irq-write) and caller may
+ /*
+ * called holding global lock (read or irq-write) and caller may
* not have dropped this lock between finding conn and calling me,
* so we don't need the {get,put}connsock dance to deref
- * ksnc_sock... */
+ * ksnc_sock...
+ */
LASSERT(!conn->ksnc_closing);
CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
- libcfs_id2str(conn->ksnc_peer->ksnp_id),
- &conn->ksnc_ipaddr,
- conn->ksnc_port);
+ libcfs_id2str(conn->ksnc_peer->ksnp_id),
+ &conn->ksnc_ipaddr, conn->ksnc_port);
ksocknal_tx_prep(conn, tx);
- /* Ensure the frags we've been given EXACTLY match the number of
+ /*
+ * Ensure the frags we've been given EXACTLY match the number of
* bytes we want to send. Many TCP/IP stacks disregard any total
* size parameters passed to them and just look at the frags.
*
* We always expect at least 1 mapped fragment containing the
- * complete ksocknal message header. */
- LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) +
+ * complete ksocknal message header.
+ */
+ LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
(unsigned int)tx->tx_nob);
LASSERT(tx->tx_niov >= 1);
LASSERT(tx->tx_resid == tx->tx_nob);
- CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
- tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type :
- KSOCK_MSG_NOOP,
- tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
+ CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
+ tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
+ KSOCK_MSG_NOOP,
+ tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
/*
* FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
@@ -725,7 +734,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
spin_lock_bh(&sched->kss_lock);
- if (list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
+ if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
/* First packet starts the timeout */
conn->ksnc_tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
@@ -736,26 +745,30 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
}
if (msg->ksm_type == KSOCK_MSG_NOOP) {
- /* The packet is noop ZC ACK, try to piggyback the ack_cookie
- * on a normal packet so I don't need to send it */
- LASSERT(msg->ksm_zc_cookies[1] != 0);
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+ /*
+ * The packet is noop ZC ACK, try to piggyback the ack_cookie
+ * on a normal packet so I don't need to send it
+ */
+ LASSERT(msg->ksm_zc_cookies[1]);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
} else {
- /* It's a normal packet - can it piggback a noop zc-ack that
- * has been queued already? */
- LASSERT(msg->ksm_zc_cookies[1] == 0);
- LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL);
+ /*
+ * It's a normal packet - can it piggback a noop zc-ack that
+ * has been queued already?
+ */
+ LASSERT(!msg->ksm_zc_cookies[1]);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
/* ztx will be released later */
}
- if (ztx != NULL) {
- atomic_sub (ztx->tx_nob, &conn->ksnc_tx_nob);
+ if (ztx) {
+ atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
}
@@ -763,24 +776,23 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
!conn->ksnc_tx_scheduled) { /* not scheduled to send */
/* +1 ref for scheduler */
ksocknal_conn_addref(conn);
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
ksock_route_t *
-ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
+ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
{
unsigned long now = cfs_time_current();
struct list_head *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -788,10 +800,10 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
continue;
/* all route types connected ? */
- if ((ksocknal_route_mask() & ~route->ksnr_connected) == 0)
+ if (!(ksocknal_route_mask() & ~route->ksnr_connected))
continue;
- if (!(route->ksnr_retry_interval == 0 || /* first attempt */
+ if (!(!route->ksnr_retry_interval || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
"Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
@@ -809,13 +821,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
}
ksock_route_t *
-ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
+ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
{
struct list_head *tmp;
ksock_route_t *route;
- list_for_each (tmp, &peer->ksnp_routes) {
- route = list_entry (tmp, ksock_route_t, ksnr_list);
+ list_for_each(tmp, &peer->ksnp_routes) {
+ route = list_entry(tmp, ksock_route_t, ksnr_list);
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
@@ -827,7 +839,7 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer)
}
int
-ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
{
ksock_peer_t *peer;
ksock_conn_t *conn;
@@ -835,21 +847,23 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int retry;
int rc;
- LASSERT(tx->tx_conn == NULL);
+ LASSERT(!tx->tx_conn);
g_lock = &ksocknal_data.ksnd_global_lock;
for (retry = 0;; retry = 1) {
read_lock(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL) {
- if (ksocknal_find_connectable_route_locked(peer) == NULL) {
+ if (peer) {
+ if (!ksocknal_find_connectable_route_locked(peer)) {
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn != NULL) {
- /* I've got no routes that need to be
+ if (conn) {
+ /*
+ * I've got no routes that need to be
* connecting and I do have an actual
- * connection... */
- ksocknal_queue_tx_locked (tx, conn);
+ * connection...
+ */
+ ksocknal_queue_tx_locked(tx, conn);
read_unlock(g_lock);
return 0;
}
@@ -862,12 +876,12 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
write_lock_bh(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
- if (peer != NULL)
+ if (peer)
break;
write_unlock_bh(g_lock);
- if ((id.pid & LNET_PID_USERFLAG) != 0) {
+ if (id.pid & LNET_PID_USERFLAG) {
CERROR("Refusing to create a connection to userspace process %s\n",
libcfs_id2str(id));
return -EHOSTUNREACH;
@@ -881,7 +895,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
rc = ksocknal_add_peer(ni, id,
LNET_NIDADDR(id.nid),
lnet_acceptor_port());
- if (rc != 0) {
+ if (rc) {
CERROR("Can't add peer %s: %d\n",
libcfs_id2str(id), rc);
return rc;
@@ -891,21 +905,21 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
ksocknal_launch_all_connections_locked(peer);
conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
- if (conn != NULL) {
+ if (conn) {
/* Connection exists; queue message on it */
- ksocknal_queue_tx_locked (tx, conn);
+ ksocknal_queue_tx_locked(tx, conn);
write_unlock_bh(g_lock);
return 0;
}
if (peer->ksnp_accepting > 0 ||
- ksocknal_find_connecting_route_locked (peer) != NULL) {
+ ksocknal_find_connecting_route_locked(peer)) {
/* the message is going to be pinned to the peer */
tx->tx_deadline =
cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
/* Queue the message until a connection is established */
- list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
+ list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
write_unlock_bh(g_lock);
return 0;
}
@@ -932,19 +946,20 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
int desc_size;
int rc;
- /* NB 'private' is different depending on what we're sending.
- * Just ignore it... */
-
+ /*
+ * NB 'private' is different depending on what we're sending.
+ * Just ignore it...
+ */
CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT(payload_nob == 0 || payload_niov > 0);
+ LASSERT(!payload_nob || payload_niov > 0);
LASSERT(payload_niov <= LNET_MAX_IOV);
/* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- LASSERT (!in_interrupt ());
+ LASSERT(!(payload_kiov && payload_iov));
+ LASSERT(!in_interrupt());
- if (payload_iov != NULL)
+ if (payload_iov)
desc_size = offsetof(ksock_tx_t,
tx_frags.virt.iov[1 + payload_niov]);
else
@@ -954,7 +969,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
if (lntmsg->msg_vmflush)
mpflag = cfs_memory_pressure_get_and_set();
tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
- if (tx == NULL) {
+ if (!tx) {
CERROR("Can't allocate tx desc type %d size %d\n",
type, desc_size);
if (lntmsg->msg_vmflush)
@@ -965,7 +980,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
tx->tx_conn = NULL; /* set when assigned a conn */
tx->tx_lnetmsg = lntmsg;
- if (payload_iov != NULL) {
+ if (payload_iov) {
tx->tx_kiov = NULL;
tx->tx_nkiov = 0;
tx->tx_iov = tx->tx_frags.virt.iov;
@@ -992,7 +1007,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
if (!mpflag)
cfs_memory_pressure_restore(mpflag);
- if (rc == 0)
+ if (!rc)
return 0;
ksocknal_free_tx(tx);
@@ -1014,7 +1029,7 @@ ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
}
void
-ksocknal_thread_fini (void)
+ksocknal_thread_fini(void)
{
write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads--;
@@ -1022,7 +1037,7 @@ ksocknal_thread_fini (void)
}
int
-ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
{
static char ksocknal_slop_buffer[4096];
@@ -1030,14 +1045,14 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
unsigned int niov;
int skipped;
- LASSERT(conn->ksnc_proto != NULL);
+ LASSERT(conn->ksnc_proto);
- if ((*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) != 0) {
+ if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
/* Remind the socket to ack eagerly... */
ksocknal_lib_eager_ack(conn);
}
- if (nob_to_skip == 0) { /* right at next packet boundary now */
+ if (!nob_to_skip) { /* right at next packet boundary now */
conn->ksnc_rx_started = 0;
mb(); /* racing with timeout thread */
@@ -1061,11 +1076,11 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
- conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t);
+ conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
break;
default:
- LBUG ();
+ LBUG();
}
conn->ksnc_rx_niov = 1;
@@ -1075,9 +1090,10 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
return 1;
}
- /* Set up to skip as much as possible now. If there's more left
- * (ran out of iov entries) we'll get called again */
-
+ /*
+ * Set up to skip as much as possible now. If there's more left
+ * (ran out of iov entries) we'll get called again
+ */
conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
conn->ksnc_rx_nob_left = nob_to_skip;
conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
@@ -1093,8 +1109,8 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
skipped += nob;
nob_to_skip -= nob;
- } while (nob_to_skip != 0 && /* mustn't overflow conn's rx iov */
- niov < sizeof(conn->ksnc_rx_iov_space) / sizeof (struct iovec));
+ } while (nob_to_skip && /* mustn't overflow conn's rx iov */
+ niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
conn->ksnc_rx_niov = niov;
conn->ksnc_rx_kiov = NULL;
@@ -1104,13 +1120,13 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip)
}
static int
-ksocknal_process_receive (ksock_conn_t *conn)
+ksocknal_process_receive(ksock_conn_t *conn)
{
lnet_hdr_t *lhdr;
lnet_process_id_t *id;
int rc;
- LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
+ LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
@@ -1119,13 +1135,13 @@ ksocknal_process_receive (ksock_conn_t *conn)
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
again:
- if (conn->ksnc_rx_nob_wanted != 0) {
+ if (conn->ksnc_rx_nob_wanted) {
rc = ksocknal_receive(conn);
if (rc <= 0) {
- LASSERT (rc != -EAGAIN);
+ LASSERT(rc != -EAGAIN);
- if (rc == 0)
+ if (!rc)
CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
conn,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
@@ -1139,12 +1155,12 @@ ksocknal_process_receive (ksock_conn_t *conn)
conn->ksnc_port);
/* it's not an error if conn is being closed */
- ksocknal_close_conn_and_siblings (conn,
- (conn->ksnc_closing) ? 0 : rc);
- return (rc == 0 ? -ESHUTDOWN : rc);
+ ksocknal_close_conn_and_siblings(conn,
+ (conn->ksnc_closing) ? 0 : rc);
+ return (!rc ? -ESHUTDOWN : rc);
}
- if (conn->ksnc_rx_nob_wanted != 0) {
+ if (conn->ksnc_rx_nob_wanted) {
/* short read */
return -EAGAIN;
}
@@ -1169,7 +1185,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
- conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ conn->ksnc_msg.ksm_csum && /* has checksum */
conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
/* NOOP Checksum error */
CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
@@ -1180,10 +1196,10 @@ ksocknal_process_receive (ksock_conn_t *conn)
return -EIO;
}
- if (conn->ksnc_msg.ksm_zc_cookies[1] != 0) {
+ if (conn->ksnc_msg.ksm_zc_cookies[1]) {
__u64 cookie = 0;
- LASSERT (conn->ksnc_proto != &ksocknal_protocol_v1x);
+ LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
cookie = conn->ksnc_msg.ksm_zc_cookies[0];
@@ -1191,7 +1207,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
conn->ksnc_msg.ksm_zc_cookies[1]);
- if (rc != 0) {
+ if (rc) {
CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
@@ -1202,7 +1218,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
- ksocknal_new_packet (conn, 0);
+ ksocknal_new_packet(conn, 0);
return 0; /* NOOP is done and just return */
}
@@ -1224,7 +1240,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
/* unpack message header */
conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
- if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) {
+ if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
/* Userspace peer */
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
id = &conn->ksnc_peer->ksnp_id;
@@ -1243,14 +1259,14 @@ ksocknal_process_receive (ksock_conn_t *conn)
if (rc < 0) {
/* I just received garbage: give up on this conn */
ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings (conn, rc);
+ ksocknal_close_conn_and_siblings(conn, rc);
ksocknal_conn_decref(conn);
return -EPROTO;
}
/* I'm racing with ksocknal_recv() */
- LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
- conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
+ LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
+ conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
return 0;
@@ -1262,8 +1278,8 @@ ksocknal_process_receive (ksock_conn_t *conn)
/* payload all received */
rc = 0;
- if (conn->ksnc_rx_nob_left == 0 && /* not truncating */
- conn->ksnc_msg.ksm_csum != 0 && /* has checksum */
+ if (!conn->ksnc_rx_nob_left && /* not truncating */
+ conn->ksnc_msg.ksm_csum && /* has checksum */
conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
@@ -1271,7 +1287,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
rc = -EIO;
}
- if (rc == 0 && conn->ksnc_msg.ksm_zc_cookies[0] != 0) {
+ if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
@@ -1285,16 +1301,16 @@ ksocknal_process_receive (ksock_conn_t *conn)
lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
- if (rc != 0) {
+ if (rc) {
ksocknal_new_packet(conn, 0);
- ksocknal_close_conn_and_siblings (conn, rc);
+ ksocknal_close_conn_and_siblings(conn, rc);
return -EPROTO;
}
/* Fall through */
case SOCKNAL_RX_SLOP:
/* starting new packet? */
- if (ksocknal_new_packet (conn, conn->ksnc_rx_nob_left))
+ if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
return 0; /* come back later */
goto again; /* try to finish reading slop now */
@@ -1308,9 +1324,9 @@ ksocknal_process_receive (ksock_conn_t *conn)
}
int
-ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
- unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
ksock_conn_t *conn = private;
ksock_sched_t *sched = conn->ksnc_scheduler;
@@ -1322,7 +1338,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
conn->ksnc_rx_nob_wanted = mlen;
conn->ksnc_rx_nob_left = rlen;
- if (mlen == 0 || iov != NULL) {
+ if (!mlen || iov) {
conn->ksnc_rx_nkiov = 0;
conn->ksnc_rx_kiov = NULL;
conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
@@ -1349,8 +1365,8 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
- wake_up (&sched->kss_waitq);
- LASSERT (conn->ksnc_rx_ready);
+ wake_up(&sched->kss_waitq);
+ LASSERT(conn->ksnc_rx_ready);
break;
case SOCKNAL_RX_PARSE:
@@ -1396,7 +1412,7 @@ int ksocknal_scheduler(void *arg)
cfs_block_allsigs();
rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set CPT affinity to %d: %d\n",
info->ksi_cpt, rc);
}
@@ -1408,18 +1424,20 @@ int ksocknal_scheduler(void *arg)
/* Ensure I progress everything semi-fairly */
- if (!list_empty (&sched->kss_rx_conns)) {
+ if (!list_empty(&sched->kss_rx_conns)) {
conn = list_entry(sched->kss_rx_conns.next,
- ksock_conn_t, ksnc_rx_list);
+ ksock_conn_t, ksnc_rx_list);
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
LASSERT(conn->ksnc_rx_ready);
- /* clear rx_ready in case receive isn't complete.
+ /*
+ * clear rx_ready in case receive isn't complete.
* Do it BEFORE we call process_recv, since
* data_ready can set it any time after we release
- * kss_lock. */
+ * kss_lock.
+ */
conn->ksnc_rx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
@@ -1431,18 +1449,20 @@ int ksocknal_scheduler(void *arg)
LASSERT(conn->ksnc_rx_scheduled);
/* Did process_receive get everything it wanted? */
- if (rc == 0)
+ if (!rc)
conn->ksnc_rx_ready = 1;
if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
- /* Conn blocked waiting for ksocknal_recv()
+ /*
+ * Conn blocked waiting for ksocknal_recv()
* I change its state (under lock) to signal
- * it can be rescheduled */
+ * it can be rescheduled
+ */
conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
} else if (conn->ksnc_rx_ready) {
/* reschedule for rx */
- list_add_tail (&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ list_add_tail(&conn->ksnc_rx_list,
+ &sched->kss_rx_conns);
} else {
conn->ksnc_rx_scheduled = 0;
/* drop my ref */
@@ -1452,25 +1472,24 @@ int ksocknal_scheduler(void *arg)
did_something = 1;
}
- if (!list_empty (&sched->kss_tx_conns)) {
+ if (!list_empty(&sched->kss_tx_conns)) {
LIST_HEAD(zlist);
if (!list_empty(&sched->kss_zombie_noop_txs)) {
- list_add(&zlist,
- &sched->kss_zombie_noop_txs);
+ list_add(&zlist, &sched->kss_zombie_noop_txs);
list_del_init(&sched->kss_zombie_noop_txs);
}
conn = list_entry(sched->kss_tx_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ ksock_conn_t, ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
tx = list_entry(conn->ksnc_tx_queue.next,
- ksock_tx_t, tx_list);
+ ksock_tx_t, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
@@ -1478,16 +1497,20 @@ int ksocknal_scheduler(void *arg)
/* dequeue now so empty list => more to send */
list_del(&tx->tx_list);
- /* Clear tx_ready in case send isn't complete. Do
+ /*
+ * Clear tx_ready in case send isn't complete. Do
* it BEFORE we call process_transmit, since
* write_space can set it any time after we release
- * kss_lock. */
+ * kss_lock.
+ */
conn->ksnc_tx_ready = 0;
spin_unlock_bh(&sched->kss_lock);
if (!list_empty(&zlist)) {
- /* free zombie noop txs, it's fast because
- * noop txs are just put in freelist */
+ /*
+ * free zombie noop txs, it's fast because
+ * noop txs are just put in freelist
+ */
ksocknal_txlist_done(NULL, &zlist, 0);
}
@@ -1496,8 +1519,7 @@ int ksocknal_scheduler(void *arg)
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
spin_lock_bh(&sched->kss_lock);
- list_add(&tx->tx_list,
- &conn->ksnc_tx_queue);
+ list_add(&tx->tx_list, &conn->ksnc_tx_queue);
} else {
/* Complete send; tx -ref */
ksocknal_tx_decref(tx);
@@ -1508,13 +1530,15 @@ int ksocknal_scheduler(void *arg)
}
if (rc == -ENOMEM) {
- /* Do nothing; after a short timeout, this
- * conn will be reposted on kss_tx_conns. */
+ /*
+ * Do nothing; after a short timeout, this
+ * conn will be reposted on kss_tx_conns.
+ */
} else if (conn->ksnc_tx_ready &&
!list_empty(&conn->ksnc_tx_queue)) {
/* reschedule for tx */
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ &sched->kss_tx_conns);
} else {
conn->ksnc_tx_scheduled = 0;
/* drop my ref */
@@ -1533,7 +1557,7 @@ int ksocknal_scheduler(void *arg)
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
!ksocknal_sched_cansleep(sched));
- LASSERT (rc == 0);
+ LASSERT(!rc);
} else {
cond_resched();
}
@@ -1551,7 +1575,7 @@ int ksocknal_scheduler(void *arg)
* Add connection to kss_rx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_read_callback (ksock_conn_t *conn)
+void ksocknal_read_callback(ksock_conn_t *conn)
{
ksock_sched_t *sched;
@@ -1562,13 +1586,12 @@ void ksocknal_read_callback (ksock_conn_t *conn)
conn->ksnc_rx_ready = 1;
if (!conn->ksnc_rx_scheduled) { /* not being progressed */
- list_add_tail(&conn->ksnc_rx_list,
- &sched->kss_rx_conns);
+ list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
conn->ksnc_rx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
@@ -1577,7 +1600,7 @@ void ksocknal_read_callback (ksock_conn_t *conn)
* Add connection to kss_tx_conns of scheduler
* and wakeup the scheduler.
*/
-void ksocknal_write_callback (ksock_conn_t *conn)
+void ksocknal_write_callback(ksock_conn_t *conn)
{
ksock_sched_t *sched;
@@ -1589,20 +1612,19 @@ void ksocknal_write_callback (ksock_conn_t *conn)
if (!conn->ksnc_tx_scheduled && /* not being progressed */
!list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
- list_add_tail (&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
conn->ksnc_tx_scheduled = 1;
/* extra ref for scheduler */
ksocknal_conn_addref(conn);
- wake_up (&sched->kss_waitq);
+ wake_up(&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
}
static ksock_proto_t *
-ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
+ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
{
__u32 version = 0;
@@ -1611,7 +1633,7 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
version = __swab32(hello->kshm_version);
- if (version != 0) {
+ if (version) {
#if SOCKNAL_VERSION_DEBUG
if (*ksocknal_tunables.ksnd_protocol == 1)
return NULL;
@@ -1632,11 +1654,11 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
- CLASSERT(sizeof (lnet_magicversion_t) ==
- offsetof (ksock_hello_msg_t, kshm_src_nid));
+ CLASSERT(sizeof(lnet_magicversion_t) ==
+ offsetof(ksock_hello_msg_t, kshm_src_nid));
- if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) &&
- hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR))
+ if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
+ hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
return &ksocknal_protocol_v1x;
}
@@ -1644,8 +1666,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello)
}
int
-ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
+ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
{
/* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
ksock_net_t *net = (ksock_net_t *)ni->ni_data;
@@ -1653,7 +1675,7 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn,
LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
/* rely on caller to hold a ref on socket so it wouldn't disappear */
- LASSERT(conn->ksnc_proto != NULL);
+ LASSERT(conn->ksnc_proto);
hello->kshm_src_nid = ni->ni_nid;
hello->kshm_dst_nid = peer_nid;
@@ -1682,9 +1704,9 @@ ksocknal_invert_type(int type)
}
int
-ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
- ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
- __u64 *incarnation)
+ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
+ __u64 *incarnation)
{
/* Return < 0 fatal error
* 0 success
@@ -1692,7 +1714,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
* EPROTO protocol version mismatch
*/
struct socket *sock = conn->ksnc_sock;
- int active = (conn->ksnc_proto != NULL);
+ int active = !!conn->ksnc_proto;
int timeout;
int proto_match;
int rc;
@@ -1705,20 +1727,20 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
- rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
- if (rc != 0) {
+ rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+ if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
- LASSERT (rc < 0);
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0);
return rc;
}
if (hello->kshm_magic != LNET_PROTO_MAGIC &&
hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
- hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
+ hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
- __cpu_to_le32 (hello->kshm_magic),
+ __cpu_to_le32(hello->kshm_magic),
LNET_PROTO_TCP_MAGIC,
&conn->ksnc_ipaddr);
return -EPROTO;
@@ -1726,15 +1748,15 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
rc = lnet_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0);
return rc;
}
proto = ksocknal_parse_proto_version(hello);
- if (proto == NULL) {
+ if (!proto) {
if (!active) {
/* unknown protocol from peer, tell peer my protocol */
conn->ksnc_proto = &ksocknal_protocol_v3x;
@@ -1760,7 +1782,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading or checking hello from from %pI4h\n",
rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0);
@@ -1792,8 +1814,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
if (conn->ksnc_type == SOCKLND_CONN_NONE) {
CERROR("Unexpected type %d from %s ip %pI4h\n",
- hello->kshm_ctype, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr);
+ hello->kshm_ctype, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1816,9 +1838,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
- conn->ksnc_type, libcfs_id2str(*peerid),
- &conn->ksnc_ipaddr,
- hello->kshm_ctype);
+ conn->ksnc_type, libcfs_id2str(*peerid),
+ &conn->ksnc_ipaddr, hello->kshm_ctype);
return -EPROTO;
}
@@ -1826,7 +1847,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
}
static int
-ksocknal_connect (ksock_route_t *route)
+ksocknal_connect(ksock_route_t *route)
{
LIST_HEAD(zombies);
ksock_peer_t *peer = route->ksnr_peer;
@@ -1850,10 +1871,12 @@ ksocknal_connect (ksock_route_t *route)
for (;;) {
wanted = ksocknal_route_mask() & ~route->ksnr_connected;
- /* stop connecting if peer/route got closed under me, or
- * route got connected while queued */
+ /*
+ * stop connecting if peer/route got closed under me, or
+ * route got connected while queued
+ */
if (peer->ksnp_closing || route->ksnr_deleted ||
- wanted == 0) {
+ !wanted) {
retry_later = 0;
break;
}
@@ -1869,14 +1892,14 @@ ksocknal_connect (ksock_route_t *route)
if (retry_later) /* needs reschedule */
break;
- if ((wanted & (1 << SOCKLND_CONN_ANY)) != 0) {
+ if (wanted & (1 << SOCKLND_CONN_ANY)) {
type = SOCKLND_CONN_ANY;
- } else if ((wanted & (1 << SOCKLND_CONN_CONTROL)) != 0) {
+ } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) {
type = SOCKLND_CONN_CONTROL;
- } else if ((wanted & (1 << SOCKLND_CONN_BULK_IN)) != 0) {
+ } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) {
type = SOCKLND_CONN_BULK_IN;
} else {
- LASSERT ((wanted & (1 << SOCKLND_CONN_BULK_OUT)) != 0);
+ LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT));
type = SOCKLND_CONN_BULK_OUT;
}
@@ -1893,7 +1916,7 @@ ksocknal_connect (ksock_route_t *route)
rc = lnet_connect(&sock, peer->ksnp_id.nid,
route->ksnr_myipaddr,
route->ksnr_ipaddr, route->ksnr_port);
- if (rc != 0)
+ if (rc)
goto failed;
rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
@@ -1904,9 +1927,11 @@ ksocknal_connect (ksock_route_t *route)
goto failed;
}
- /* A +ve RC means I have to retry because I lost the connection
- * race or I have to renegotiate protocol version */
- retry_later = (rc != 0);
+ /*
+ * A +ve RC means I have to retry because I lost the connection
+ * race or I have to renegotiate protocol version
+ */
+ retry_later = (rc);
if (retry_later)
CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid));
@@ -1918,17 +1943,20 @@ ksocknal_connect (ksock_route_t *route)
route->ksnr_connecting = 0;
if (retry_later) {
- /* re-queue for attention; this frees me up to handle
- * the peer's incoming connection request */
-
+ /*
+ * re-queue for attention; this frees me up to handle
+ * the peer's incoming connection request
+ */
if (rc == EALREADY ||
- (rc == 0 && peer->ksnp_accepting > 0)) {
- /* We want to introduce a delay before next
+ (!rc && peer->ksnp_accepting > 0)) {
+ /*
+ * We want to introduce a delay before next
* attempt to connect if we lost conn race,
* but the race is resolved quickly usually,
- * so min_reconnectms should be good heuristic */
+ * so min_reconnectms should be good heuristic
+ */
route->ksnr_retry_interval =
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000;
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
}
@@ -1949,30 +1977,34 @@ ksocknal_connect (ksock_route_t *route)
route->ksnr_retry_interval *= 2;
route->ksnr_retry_interval =
max(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms)/1000);
+ cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
route->ksnr_retry_interval =
min(route->ksnr_retry_interval,
- cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms)/1000);
+ cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
- LASSERT (route->ksnr_retry_interval != 0);
+ LASSERT(route->ksnr_retry_interval);
route->ksnr_timeout = cfs_time_add(cfs_time_current(),
route->ksnr_retry_interval);
if (!list_empty(&peer->ksnp_tx_queue) &&
- peer->ksnp_accepting == 0 &&
- ksocknal_find_connecting_route_locked(peer) == NULL) {
+ !peer->ksnp_accepting &&
+ !ksocknal_find_connecting_route_locked(peer)) {
ksock_conn_t *conn;
- /* ksnp_tx_queue is queued on a conn on successful
- * connection for V1.x and V2.x */
- if (!list_empty (&peer->ksnp_conns)) {
+ /*
+ * ksnp_tx_queue is queued on a conn on successful
+ * connection for V1.x and V2.x
+ */
+ if (!list_empty(&peer->ksnp_conns)) {
conn = list_entry(peer->ksnp_conns.next,
- ksock_conn_t, ksnc_list);
- LASSERT (conn->ksnc_proto == &ksocknal_protocol_v3x);
+ ksock_conn_t, ksnc_list);
+ LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
}
- /* take all the blocked packets while I've got the lock and
- * complete below... */
+ /*
+ * take all the blocked packets while I've got the lock and
+ * complete below...
+ */
list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
@@ -2011,8 +2043,10 @@ ksocknal_connd_check_start(time64_t sec, long *timeout)
if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
- /* can't create more connd, or still have enough
- * threads to handle more connecting */
+ /*
+ * can't create more connd, or still have enough
+ * threads to handle more connecting
+ */
return 0;
}
@@ -2041,7 +2075,7 @@ ksocknal_connd_check_start(time64_t sec, long *timeout)
rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- if (rc == 0)
+ if (!rc)
return 1;
/* we tried ... */
@@ -2093,8 +2127,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout)
ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
}
-/* Go through connd_routes queue looking for a route that we can process
- * right now, @timeout_p can be updated if we need to come back later */
+/*
+ * Go through connd_routes queue looking for a route that we can process
+ * right now, @timeout_p can be updated if we need to come back later
+ */
static ksock_route_t *
ksocknal_connd_get_route_locked(signed long *timeout_p)
{
@@ -2104,10 +2140,9 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
now = cfs_time_current();
/* connd_routes can contain both pending and ordinary routes */
- list_for_each_entry (route, &ksocknal_data.ksnd_connd_routes,
- ksnr_connd_list) {
-
- if (route->ksnr_retry_interval == 0 ||
+ list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
+ ksnr_connd_list) {
+ if (!route->ksnr_retry_interval ||
cfs_time_aftereq(now, route->ksnr_timeout))
return route;
@@ -2120,7 +2155,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
}
int
-ksocknal_connd (void *arg)
+ksocknal_connd(void *arg)
{
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
ksock_connreq_t *cr;
@@ -2172,15 +2207,17 @@ ksocknal_connd (void *arg)
spin_lock_bh(connd_lock);
}
- /* Only handle an outgoing connection request if there
+ /*
+ * Only handle an outgoing connection request if there
* is a thread left to handle incoming connections and
- * create new connd */
+ * create new connd
+ */
if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
ksocknal_data.ksnd_connd_running) {
route = ksocknal_connd_get_route_locked(&timeout);
}
- if (route != NULL) {
- list_del (&route->ksnr_connd_list);
+ if (route) {
+ list_del(&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
spin_unlock_bh(connd_lock);
dropped_lock = 1;
@@ -2231,24 +2268,26 @@ ksocknal_connd (void *arg)
}
static ksock_conn_t *
-ksocknal_find_timed_out_conn (ksock_peer_t *peer)
+ksocknal_find_timed_out_conn(ksock_peer_t *peer)
{
/* We're called with a shared lock on ksnd_global_lock */
ksock_conn_t *conn;
struct list_head *ctmp;
- list_for_each (ctmp, &peer->ksnp_conns) {
+ list_for_each(ctmp, &peer->ksnp_conns) {
int error;
- conn = list_entry (ctmp, ksock_conn_t, ksnc_list);
+ conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
/* Don't need the {get,put}connsock dance to deref ksnc_sock */
LASSERT(!conn->ksnc_closing);
- /* SOCK_ERROR will reset error code of socket in
- * some platform (like Darwin8.x) */
+ /*
+ * SOCK_ERROR will reset error code of socket in
+ * some platform (like Darwin8.x)
+ */
error = conn->ksnc_sock->sk->sk_err;
- if (error != 0) {
+ if (error) {
ksocknal_conn_addref(conn);
switch (error) {
@@ -2292,11 +2331,13 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
}
if ((!list_empty(&conn->ksnc_tx_queue) ||
- conn->ksnc_sock->sk->sk_wmem_queued != 0) &&
+ conn->ksnc_sock->sk->sk_wmem_queued) &&
cfs_time_aftereq(cfs_time_current(),
conn->ksnc_tx_deadline)) {
- /* Timed out messages queued for sending or
- * buffered in the socket's send buffer */
+ /*
+ * Timed out messages queued for sending or
+ * buffered in the socket's send buffer
+ */
ksocknal_conn_addref(conn);
CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
libcfs_id2str(peer->ksnp_id),
@@ -2313,20 +2354,18 @@ static inline void
ksocknal_flush_stale_txs(ksock_peer_t *peer)
{
ksock_tx_t *tx;
+ ksock_tx_t *tmp;
LIST_HEAD(stale_txs);
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- while (!list_empty (&peer->ksnp_tx_queue)) {
- tx = list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
-
+ list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
if (!cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline))
break;
- list_del (&tx->tx_list);
- list_add_tail (&tx->tx_list, &stale_txs);
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &stale_txs);
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2336,6 +2375,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
static int
ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+ __must_hold(&ksocknal_data.ksnd_global_lock)
{
ksock_sched_t *sched;
ksock_conn_t *conn;
@@ -2356,12 +2396,14 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
return 0;
- /* retry 10 secs later, so we wouldn't put pressure
- * on this peer if we failed to send keepalive this time */
+ /*
+ * retry 10 secs later, so we wouldn't put pressure
+ * on this peer if we failed to send keepalive this time
+ */
peer->ksnp_send_keepalive = cfs_time_shift(10);
conn = ksocknal_find_conn_locked(peer, NULL, 1);
- if (conn != NULL) {
+ if (conn) {
sched = conn->ksnc_scheduler;
spin_lock_bh(&sched->kss_lock);
@@ -2378,12 +2420,12 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
/* cookie = 1 is reserved for keepalive PING */
tx = ksocknal_alloc_tx_noop(1, 1);
- if (tx == NULL) {
+ if (!tx) {
read_lock(&ksocknal_data.ksnd_global_lock);
return -ENOMEM;
}
- if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+ if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
read_lock(&ksocknal_data.ksnd_global_lock);
return 1;
}
@@ -2395,7 +2437,7 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
}
static void
-ksocknal_check_peer_timeouts (int idx)
+ksocknal_check_peer_timeouts(int idx)
{
struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
ksock_peer_t *peer;
@@ -2403,9 +2445,11 @@ ksocknal_check_peer_timeouts (int idx)
ksock_tx_t *tx;
again:
- /* NB. We expect to have a look at all the peers and not find any
+ /*
+ * NB. We expect to have a look at all the peers and not find any
* connections to time out, so we just use a shared lock while we
- * take a look... */
+ * take a look...
+ */
read_lock(&ksocknal_data.ksnd_global_lock);
list_for_each_entry(peer, peers, ksnp_list) {
@@ -2413,35 +2457,37 @@ ksocknal_check_peer_timeouts (int idx)
int resid = 0;
int n = 0;
- if (ksocknal_send_keepalive_locked(peer) != 0) {
+ if (ksocknal_send_keepalive_locked(peer)) {
read_unlock(&ksocknal_data.ksnd_global_lock);
goto again;
}
- conn = ksocknal_find_timed_out_conn (peer);
+ conn = ksocknal_find_timed_out_conn(peer);
- if (conn != NULL) {
+ if (conn) {
read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
- /* NB we won't find this one again, but we can't
+ /*
+ * NB we won't find this one again, but we can't
* just proceed with the next peer, since we dropped
- * ksnd_global_lock and it might be dead already! */
+ * ksnd_global_lock and it might be dead already!
+ */
ksocknal_conn_decref(conn);
goto again;
}
- /* we can't process stale txs right here because we're
- * holding only shared lock */
- if (!list_empty (&peer->ksnp_tx_queue)) {
- ksock_tx_t *tx =
- list_entry (peer->ksnp_tx_queue.next,
- ksock_tx_t, tx_list);
+ /*
+ * we can't process stale txs right here because we're
+ * holding only shared lock
+ */
+ if (!list_empty(&peer->ksnp_tx_queue)) {
+ ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
+ ksock_tx_t, tx_list);
if (cfs_time_aftereq(cfs_time_current(),
tx->tx_deadline)) {
-
ksocknal_peer_addref(peer);
read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2466,13 +2512,13 @@ ksocknal_check_peer_timeouts (int idx)
n++;
}
- if (n == 0) {
+ if (!n) {
spin_unlock(&peer->ksnp_lock);
continue;
}
tx = list_entry(peer->ksnp_zc_req_list.next,
- ksock_tx_t, tx_zc_list);
+ ksock_tx_t, tx_zc_list);
deadline = tx->tx_deadline;
resid = tx->tx_resid;
conn = tx->tx_conn;
@@ -2486,7 +2532,7 @@ ksocknal_check_peer_timeouts (int idx)
cfs_duration_sec(cfs_time_current() - deadline),
resid, conn->ksnc_sock->sk->sk_wmem_queued);
- ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
+ ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
ksocknal_conn_decref(conn);
goto again;
}
@@ -2495,7 +2541,7 @@ ksocknal_check_peer_timeouts (int idx)
}
int
-ksocknal_reaper (void *arg)
+ksocknal_reaper(void *arg)
{
wait_queue_t wait;
ksock_conn_t *conn;
@@ -2515,12 +2561,10 @@ ksocknal_reaper (void *arg)
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
-
- if (!list_empty (&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry (ksocknal_data. \
- ksnd_deathrow_conns.next,
- ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
+ conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
+ ksock_conn_t, ksnc_list);
+ list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2531,10 +2575,10 @@ ksocknal_reaper (void *arg)
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry (ksocknal_data.ksnd_zombie_conns.\
- next, ksock_conn_t, ksnc_list);
- list_del (&conn->ksnc_list);
+ if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
+ conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
+ ksock_conn_t, ksnc_list);
+ list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2544,9 +2588,9 @@ ksocknal_reaper (void *arg)
continue;
}
- if (!list_empty (&ksocknal_data.ksnd_enomem_conns)) {
+ if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
list_add(&enomem_conns,
- &ksocknal_data.ksnd_enomem_conns);
+ &ksocknal_data.ksnd_enomem_conns);
list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
@@ -2554,10 +2598,10 @@ ksocknal_reaper (void *arg)
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty (&enomem_conns)) {
- conn = list_entry (enomem_conns.next,
- ksock_conn_t, ksnc_tx_list);
- list_del (&conn->ksnc_tx_list);
+ while (!list_empty(&enomem_conns)) {
+ conn = list_entry(enomem_conns.next, ksock_conn_t,
+ ksnc_tx_list);
+ list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
@@ -2566,7 +2610,7 @@ ksocknal_reaper (void *arg)
LASSERT(conn->ksnc_tx_scheduled);
conn->ksnc_tx_ready = 1;
list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
+ &sched->kss_tx_conns);
wake_up(&sched->kss_waitq);
spin_unlock_bh(&sched->kss_lock);
@@ -2580,21 +2624,22 @@ ksocknal_reaper (void *arg)
const int p = 1;
int chunk = ksocknal_data.ksnd_peer_hash_size;
- /* Time to check for timeouts on a few more peers: I do
+ /*
+ * Time to check for timeouts on a few more peers: I do
* checks every 'p' seconds on a proportion of the peer
* table and I need to check every connection 'n' times
* within a timeout interval, to ensure I detect a
* timeout on any connection within (n+1)/n times the
- * timeout interval. */
-
+ * timeout interval.
+ */
if (*ksocknal_tunables.ksnd_timeout > n * p)
chunk = (chunk * n * p) /
*ksocknal_tunables.ksnd_timeout;
- if (chunk == 0)
+ if (!chunk)
chunk = 1;
for (i = 0; i < chunk; i++) {
- ksocknal_check_peer_timeouts (peer_index);
+ ksocknal_check_peer_timeouts(peer_index);
peer_index = (peer_index + 1) %
ksocknal_data.ksnd_peer_hash_size;
}
@@ -2602,25 +2647,27 @@ ksocknal_reaper (void *arg)
deadline = cfs_time_add(deadline, cfs_time_seconds(p));
}
- if (nenomem_conns != 0) {
- /* Reduce my timeout if I rescheduled ENOMEM conns.
+ if (nenomem_conns) {
+ /*
+ * Reduce my timeout if I rescheduled ENOMEM conns.
* This also prevents me getting woken immediately
- * if any go back on my enomem list. */
+ * if any go back on my enomem list.
+ */
timeout = SOCKNAL_ENOMEM_RETRY;
}
ksocknal_data.ksnd_reaper_waketime =
cfs_time_add(cfs_time_current(), timeout);
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
if (!ksocknal_data.ksnd_shuttingdown &&
- list_empty (&ksocknal_data.ksnd_deathrow_conns) &&
- list_empty (&ksocknal_data.ksnd_zombie_conns))
+ list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
+ list_empty(&ksocknal_data.ksnd_zombie_conns))
schedule_timeout(timeout);
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&ksocknal_data.ksnd_reaper_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index cf8e43bd3..d4ce06d0a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -45,13 +45,13 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
/* Didn't need the {get,put}connsock dance to deref ksnc_sock... */
LASSERT(!conn->ksnc_closing);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d getting sock peer IP\n", rc);
return rc;
}
rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d getting sock local IP\n", rc);
return rc;
}
@@ -67,9 +67,11 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn)
if (conn->ksnc_proto == &ksocknal_protocol_v1x)
return 0;
- /* ZC if the socket supports scatter/gather and doesn't need software
- * checksums */
- return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_CSUM_MASK) != 0);
+ /*
+ * ZC if the socket supports scatter/gather and doesn't need software
+ * checksums
+ */
+ return ((caps & NETIF_F_SG) && (caps & NETIF_F_CSUM_MASK));
}
int
@@ -82,12 +84,13 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */
conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */
tx->tx_nob == tx->tx_resid && /* frist sending */
- tx->tx_msg.ksm_csum == 0) /* not checksummed */
+ !tx->tx_msg.ksm_csum) /* not checksummed */
ksocknal_lib_csum_tx(tx);
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
-
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
{
#if SOCKNAL_SINGLE_FRAG_TX
struct kvec scratch;
@@ -123,11 +126,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
int nob;
/* Not NOOP message */
- LASSERT(tx->tx_lnetmsg != NULL);
+ LASSERT(tx->tx_lnetmsg);
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
- if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
+ if (tx->tx_msg.ksm_zc_cookies[0]) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
struct page *page = kiov->kiov_page;
@@ -136,13 +141,13 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->kiov_len);
+ page, offset, kiov->kiov_len);
if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
msgflg |= MSG_MORE;
- if (sk->sk_prot->sendpage != NULL) {
+ if (sk->sk_prot->sendpage) {
rc = sk->sk_prot->sendpage(sk, page,
offset, fragsize, msgflg);
} else {
@@ -187,13 +192,14 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn)
int opt = 1;
struct socket *sock = conn->ksnc_sock;
- /* Remind the socket to ACK eagerly. If I don't, the socket might
+ /*
+ * Remind the socket to ACK eagerly. If I don't, the socket might
* think I'm about to send something it could piggy-back the ACK
* on, introducing delay in completing zero-copy sends in my
- * peer. */
-
- kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
- (char *)&opt, sizeof(opt));
+ * peer.
+ */
+ kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, (char *)&opt,
+ sizeof(opt));
}
int
@@ -218,8 +224,10 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
int sum;
__u32 saved_csum;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
LASSERT(niov > 0);
for (nob = i = 0; i < niov; i++) {
@@ -228,8 +236,8 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
}
LASSERT(nob <= conn->ksnc_rx_nob_wanted);
- rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- scratchiov, niov, nob, MSG_DONTWAIT);
+ rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, niov, nob,
+ MSG_DONTWAIT);
saved_csum = 0;
if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -237,7 +245,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
conn->ksnc_msg.ksm_csum = 0;
}
- if (saved_csum != 0) {
+ if (saved_csum) {
/* accumulate checksum */
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
@@ -258,7 +266,7 @@ ksocknal_lib_recv_iov(ksock_conn_t *conn)
static void
ksocknal_lib_kiov_vunmap(void *addr)
{
- if (addr == NULL)
+ if (!addr)
return;
vunmap(addr);
@@ -272,7 +280,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
int nob;
int i;
- if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL)
+ if (!*ksocknal_tunables.ksnd_zc_recv || !pages)
return NULL;
LASSERT(niov <= LNET_MAX_IOV);
@@ -282,8 +290,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
return NULL;
for (nob = i = 0; i < niov; i++) {
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
+ if ((kiov[i].kiov_offset && i > 0) ||
+ (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;
@@ -291,7 +299,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
}
addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
- if (addr == NULL)
+ if (!addr)
return NULL;
iov->iov_base = addr + kiov[0].kiov_offset;
@@ -329,10 +337,12 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
int fragnob;
int n;
- /* NB we can't trust socket ops to either consume our iovs
- * or leave them alone. */
+ /*
+ * NB we can't trust socket ops to either consume our iovs
+ * or leave them alone.
+ */
addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
- if (addr != NULL) {
+ if (addr) {
nob = scratchiov[0].iov_len;
n = 1;
@@ -347,17 +357,19 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
LASSERT(nob <= conn->ksnc_rx_nob_wanted);
- rc = kernel_recvmsg(conn->ksnc_sock, &msg,
- (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
+ rc = kernel_recvmsg(conn->ksnc_sock, &msg, (struct kvec *)scratchiov,
+ n, nob, MSG_DONTWAIT);
- if (conn->ksnc_msg.ksm_csum != 0) {
+ if (conn->ksnc_msg.ksm_csum) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
LASSERT(i < niov);
- /* Dang! have to kmap again because I have nowhere to
+ /*
+ * Dang! have to kmap again because I have nowhere to
* stash the mapped address. But by doing it while the
* page is still mapped, the kernel just bumps the map
- * count and returns me the address it stashed. */
+ * count and returns me the address it stashed.
+ */
base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
fragnob = kiov[i].kiov_len;
if (fragnob > sum)
@@ -370,7 +382,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
}
}
- if (addr != NULL) {
+ if (addr) {
ksocknal_lib_kiov_vunmap(addr);
} else {
for (i = 0; i < niov; i++)
@@ -388,7 +400,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
void *base;
LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg);
- LASSERT(tx->tx_conn != NULL);
+ LASSERT(tx->tx_conn);
LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x);
tx->tx_msg.ksm_csum = 0;
@@ -396,7 +408,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base,
tx->tx_iov[0].iov_len);
- if (tx->tx_kiov != NULL) {
+ if (tx->tx_kiov) {
for (i = 0; i < tx->tx_nkiov; i++) {
base = kmap(tx->tx_kiov[i].kiov_page) +
tx->tx_kiov[i].kiov_offset;
@@ -427,22 +439,22 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *
int rc;
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) {
+ if (rc) {
LASSERT(conn->ksnc_closing);
*txmem = *rxmem = *nagle = 0;
return -ESHUTDOWN;
}
rc = lnet_sock_getbuf(sock, txmem, rxmem);
- if (rc == 0) {
+ if (!rc) {
len = sizeof(*nagle);
rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)nagle, &len);
+ (char *)nagle, &len);
}
ksocknal_connsock_decref(conn);
- if (rc == 0)
+ if (!rc)
*nagle = !*nagle;
else
*txmem = *rxmem = *nagle = 0;
@@ -463,23 +475,24 @@ ksocknal_lib_setup_sock(struct socket *sock)
sock->sk->sk_allocation = GFP_NOFS;
- /* Ensure this socket aborts active sends immediately when we close
- * it. */
-
+ /*
+ * Ensure this socket aborts active sends immediately when we close
+ * it.
+ */
linger.l_onoff = 0;
linger.l_linger = 0;
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
- (char *)&linger, sizeof(linger));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, (char *)&linger,
+ sizeof(linger));
+ if (rc) {
CERROR("Can't set SO_LINGER: %d\n", rc);
return rc;
}
option = -1;
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, (char *)&option,
+ sizeof(option));
+ if (rc) {
CERROR("Can't set SO_LINGER2: %d\n", rc);
return rc;
}
@@ -488,8 +501,8 @@ ksocknal_lib_setup_sock(struct socket *sock)
option = 1;
rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ (char *)&option, sizeof(option));
+ if (rc) {
CERROR("Can't disable nagle: %d\n", rc);
return rc;
}
@@ -497,10 +510,10 @@ ksocknal_lib_setup_sock(struct socket *sock)
rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size,
*ksocknal_tunables.ksnd_rx_buffer_size);
- if (rc != 0) {
+ if (rc) {
CERROR("Can't set buffer tx %d, rx %d buffers: %d\n",
- *ksocknal_tunables.ksnd_tx_buffer_size,
- *ksocknal_tunables.ksnd_rx_buffer_size, rc);
+ *ksocknal_tunables.ksnd_tx_buffer_size,
+ *ksocknal_tunables.ksnd_rx_buffer_size, rc);
return rc;
}
@@ -514,9 +527,9 @@ ksocknal_lib_setup_sock(struct socket *sock)
do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0);
option = (do_keepalive ? 1 : 0);
- rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
- (char *)&option, sizeof(option));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *)&option,
+ sizeof(option));
+ if (rc) {
CERROR("Can't set SO_KEEPALIVE: %d\n", rc);
return rc;
}
@@ -524,23 +537,23 @@ ksocknal_lib_setup_sock(struct socket *sock)
if (!do_keepalive)
return 0;
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
- (char *)&keep_idle, sizeof(keep_idle));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, (char *)&keep_idle,
+ sizeof(keep_idle));
+ if (rc) {
CERROR("Can't set TCP_KEEPIDLE: %d\n", rc);
return rc;
}
rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
- (char *)&keep_intvl, sizeof(keep_intvl));
- if (rc != 0) {
+ (char *)&keep_intvl, sizeof(keep_intvl));
+ if (rc) {
CERROR("Can't set TCP_KEEPINTVL: %d\n", rc);
return rc;
}
- rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
- (char *)&keep_count, sizeof(keep_count));
- if (rc != 0) {
+ rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keep_count,
+ sizeof(keep_count));
+ if (rc) {
CERROR("Can't set TCP_KEEPCNT: %d\n", rc);
return rc;
}
@@ -558,7 +571,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
int rc;
rc = ksocknal_connsock_addref(conn);
- if (rc != 0) /* being shut down */
+ if (rc) /* being shut down */
return;
sk = conn->ksnc_sock->sk;
@@ -570,8 +583,8 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
release_sock(sk);
rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- LASSERT(rc == 0);
+ (char *)&val, sizeof(val));
+ LASSERT(!rc);
lock_sock(sk);
tp->nonagle = nonagle;
@@ -593,11 +606,12 @@ ksocknal_data_ready(struct sock *sk)
read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
- if (conn == NULL) { /* raced with ksocknal_terminate_conn */
+ if (!conn) { /* raced with ksocknal_terminate_conn */
LASSERT(sk->sk_data_ready != &ksocknal_data_ready);
sk->sk_data_ready(sk);
- } else
+ } else {
ksocknal_read_callback(conn);
+ }
read_unlock(&ksocknal_data.ksnd_global_lock);
}
@@ -619,14 +633,14 @@ ksocknal_write_space(struct sock *sk)
CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n",
sk, wspace, min_wpace, conn,
- (conn == NULL) ? "" : (conn->ksnc_tx_ready ?
+ !conn ? "" : (conn->ksnc_tx_ready ?
" ready" : " blocked"),
- (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ?
+ !conn ? "" : (conn->ksnc_tx_scheduled ?
" scheduled" : " idle"),
- (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ?
+ !conn ? "" : (list_empty(&conn->ksnc_tx_queue) ?
" empty" : " queued"));
- if (conn == NULL) { /* raced with ksocknal_terminate_conn */
+ if (!conn) { /* raced with ksocknal_terminate_conn */
LASSERT(sk->sk_write_space != &ksocknal_write_space);
sk->sk_write_space(sk);
@@ -637,10 +651,11 @@ ksocknal_write_space(struct sock *sk)
if (wspace >= min_wpace) { /* got enough space */
ksocknal_write_callback(conn);
- /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
+ /*
+ * Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the
* ENOMEM check in ksocknal_transmit is race-free (think about
- * it). */
-
+ * it).
+ */
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
}
@@ -666,15 +681,19 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
void
ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
{
- /* Remove conn's network callbacks.
+ /*
+ * Remove conn's network callbacks.
* NB I _have_ to restore the callback, rather than storing a noop,
- * since the socket could survive past this module being unloaded!! */
+ * since the socket could survive past this module being unloaded!!
+ */
sock->sk->sk_data_ready = conn->ksnc_saved_data_ready;
sock->sk->sk_write_space = conn->ksnc_saved_write_space;
- /* A callback could be in progress already; they hold a read lock
+ /*
+ * A callback could be in progress already; they hold a read lock
* on ksnd_global_lock (to serialise with me) and NOOP if
- * sk_user_data is NULL. */
+ * sk_user_data is NULL.
+ */
sock->sk->sk_user_data = NULL;
return ;
@@ -691,14 +710,16 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn)
if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) &&
!conn->ksnc_tx_ready) {
- /* SOCK_NOSPACE is set when the socket fills
+ /*
+ * SOCK_NOSPACE is set when the socket fills
* and cleared in the write_space callback
* (which also sets ksnc_tx_ready). If
* SOCK_NOSPACE and ksnc_tx_ready are BOTH
* zero, I didn't fill the socket and
* write_space won't reschedule me, so I
* return -ENOMEM to get my caller to retry
- * after a timeout */
+ * after a timeout
+ */
rc = -ENOMEM;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
index fdb2b23e2..6329cbe66 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
@@ -14,9 +14,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
@@ -41,8 +38,10 @@ static int peer_timeout = 180;
module_param(peer_timeout, int, 0444);
MODULE_PARM_DESC(peer_timeout, "Seconds without aliveness news to declare peer dead (<=0 to disable)");
-/* Number of daemons in each thread pool which is percpt,
- * we will estimate reasonable value based on CPUs if it's not set. */
+/*
+ * Number of daemons in each thread pool which is percpt,
+ * we will estimate reasonable value based on CPUs if it's not set.
+ */
static unsigned int nscheds;
module_param(nscheds, int, 0444);
MODULE_PARM_DESC(nscheds, "# scheduler daemons in each pool while starting");
@@ -72,7 +71,7 @@ static int typed_conns = 1;
module_param(typed_conns, int, 0444);
MODULE_PARM_DESC(typed_conns, "use different sockets for bulk");
-static int min_bulk = 1<<10;
+static int min_bulk = 1 << 10;
module_param(min_bulk, int, 0644);
MODULE_PARM_DESC(min_bulk, "smallest 'large' message");
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index 986bce4c9..32cc31e4c 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -19,9 +19,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with Portals; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "socklnd.h"
@@ -56,15 +53,14 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
/* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
LASSERT(!list_empty(&conn->ksnc_tx_queue));
- LASSERT(tx != NULL);
+ LASSERT(tx);
/* Next TX that can carry ZC-ACK or LNet message */
if (tx->tx_list.next == &conn->ksnc_tx_queue) {
/* no more packets queued */
conn->ksnc_tx_carrier = NULL;
} else {
- conn->ksnc_tx_carrier = list_entry(tx->tx_list.next,
- ksock_tx_t, tx_list);
+ conn->ksnc_tx_carrier = list_next_entry(tx, tx_list);
LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type);
}
}
@@ -75,8 +71,8 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
{
ksock_tx_t *tx = conn->ksnc_tx_carrier;
- LASSERT(tx_ack == NULL ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+ LASSERT(!tx_ack ||
+ tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
/*
* Enqueue or piggyback tx_ack / cookie
@@ -85,10 +81,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
* . There is tx can piggyback cookie of tx_ack (or cookie),
* piggyback the cookie and return the tx.
*/
- if (tx == NULL) {
- if (tx_ack != NULL) {
+ if (!tx) {
+ if (tx_ack) {
list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
return 0;
@@ -96,16 +92,16 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) {
/* tx is noop zc-ack, can't piggyback zc-ack cookie */
- if (tx_ack != NULL)
+ if (tx_ack)
list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
return 0;
}
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET);
- LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0);
+ LASSERT(!tx->tx_msg.ksm_zc_cookies[1]);
- if (tx_ack != NULL)
+ if (tx_ack)
cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
/* piggyback the zc-ack cookie */
@@ -128,7 +124,7 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
* . If there is NOOP on the connection, piggyback the cookie
* and replace the NOOP tx, and return the NOOP tx.
*/
- if (tx == NULL) { /* nothing on queue */
+ if (!tx) { /* nothing on queue */
list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_msg;
return NULL;
@@ -162,22 +158,22 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
/* non-blocking ZC-ACK (to router) */
- LASSERT(tx_ack == NULL ||
- tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
+ LASSERT(!tx_ack ||
+ tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
tx = conn->ksnc_tx_carrier;
- if (tx == NULL) {
- if (tx_ack != NULL) {
+ if (!tx) {
+ if (tx_ack) {
list_add_tail(&tx_ack->tx_list,
- &conn->ksnc_tx_queue);
+ &conn->ksnc_tx_queue);
conn->ksnc_tx_carrier = tx_ack;
}
return 0;
}
- /* conn->ksnc_tx_carrier != NULL */
+ /* conn->ksnc_tx_carrier */
- if (tx_ack != NULL)
+ if (tx_ack)
cookie = tx_ack->tx_msg.ksm_zc_cookies[1];
if (cookie == SOCKNAL_KEEPALIVE_PING) /* ignore keepalive PING */
@@ -185,7 +181,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) {
/* replace the keepalive PING with a real ACK */
- LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0);
+ LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
tx->tx_msg.ksm_zc_cookies[1] = cookie;
return 1;
}
@@ -197,7 +193,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
return 1; /* XXX return error in the future */
}
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ if (!tx->tx_msg.ksm_zc_cookies[0]) {
/* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
@@ -233,7 +229,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
tmp = tx->tx_msg.ksm_zc_cookies[0];
}
- if (tmp != 0) {
+ if (tmp) {
/* range of cookies */
tx->tx_msg.ksm_zc_cookies[0] = tmp - 1;
tx->tx_msg.ksm_zc_cookies[1] = tmp + 1;
@@ -261,7 +257,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
}
/* failed to piggyback ZC-ACK */
- if (tx_ack != NULL) {
+ if (tx_ack) {
list_add_tail(&tx_ack->tx_list, &conn->ksnc_tx_queue);
/* the next tx can piggyback at least 1 ACK */
ksocknal_next_tx_carrier(conn);
@@ -280,7 +276,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
return SOCKNAL_MATCH_YES;
#endif
- if (tx == NULL || tx->tx_lnetmsg == NULL) {
+ if (!tx || !tx->tx_lnetmsg) {
/* noop packet */
nob = offsetof(ksock_msg_t, ksm_u);
} else {
@@ -319,7 +315,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
{
int nob;
- if (tx == NULL || tx->tx_lnetmsg == NULL)
+ if (!tx || !tx->tx_lnetmsg)
nob = offsetof(ksock_msg_t, ksm_u);
else
nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t);
@@ -334,7 +330,7 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
case SOCKLND_CONN_ACK:
if (nonblk)
return SOCKNAL_MATCH_YES;
- else if (tx == NULL || tx->tx_lnetmsg == NULL)
+ else if (!tx || !tx->tx_lnetmsg)
return SOCKNAL_MATCH_MAY;
else
return SOCKNAL_MATCH_NO;
@@ -369,10 +365,10 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
read_lock(&ksocknal_data.ksnd_global_lock);
conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
- if (conn != NULL) {
+ if (conn) {
ksock_sched_t *sched = conn->ksnc_scheduler;
- LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
spin_lock_bh(&sched->kss_lock);
@@ -390,11 +386,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
/* ACK connection is not ready, or can't piggyback the ACK */
tx = ksocknal_alloc_tx_noop(cookie, !!remote);
- if (tx == NULL)
+ if (!tx)
return -ENOMEM;
rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id);
- if (rc == 0)
+ if (!rc)
return 0;
ksocknal_free_tx(tx);
@@ -407,11 +403,12 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
{
ksock_peer_t *peer = conn->ksnc_peer;
ksock_tx_t *tx;
+ ksock_tx_t *temp;
ksock_tx_t *tmp;
LIST_HEAD(zlist);
int count;
- if (cookie1 == 0)
+ if (!cookie1)
cookie1 = cookie2;
count = (cookie1 > cookie2) ? 2 : (cookie2 - cookie1 + 1);
@@ -424,8 +421,8 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
spin_lock(&peer->ksnp_lock);
- list_for_each_entry_safe(tx, tmp,
- &peer->ksnp_zc_req_list, tx_zc_list) {
+ list_for_each_entry_safe(tx, tmp, &peer->ksnp_zc_req_list,
+ tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
@@ -433,20 +430,19 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
list_del(&tx->tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
- if (--count == 0)
+ if (!--count)
break;
}
}
spin_unlock(&peer->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list);
+ list_for_each_entry_safe(tx, temp, &zlist, tx_zc_list) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
- return count == 0 ? 0 : -EPROTO;
+ return !count ? 0 : -EPROTO;
}
static int
@@ -461,58 +457,59 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid));
LIBCFS_ALLOC(hdr, sizeof(*hdr));
- if (hdr == NULL) {
+ if (!hdr) {
CERROR("Can't allocate lnet_hdr_t\n");
return -ENOMEM;
}
hmv = (lnet_magicversion_t *)&hdr->dest_nid;
- /* Re-organize V2.x message header to V1.x (lnet_hdr_t)
- * header and send out */
- hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC);
- hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR);
- hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR);
+ /*
+ * Re-organize V2.x message header to V1.x (lnet_hdr_t)
+ * header and send out
+ */
+ hmv->magic = cpu_to_le32(LNET_PROTO_TCP_MAGIC);
+ hmv->version_major = cpu_to_le16(KSOCK_PROTO_V1_MAJOR);
+ hmv->version_minor = cpu_to_le16(KSOCK_PROTO_V1_MINOR);
- if (the_lnet.ln_testprotocompat != 0) {
+ if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
- if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ if (the_lnet.ln_testprotocompat & 1) {
hmv->version_major++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
- if ((the_lnet.ln_testprotocompat & 2) != 0) {
+ if (the_lnet.ln_testprotocompat & 2) {
hmv->magic = LNET_PROTO_MAGIC;
the_lnet.ln_testprotocompat &= ~2;
}
LNET_UNLOCK();
}
- hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid);
- hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid);
- hdr->type = cpu_to_le32 (LNET_MSG_HELLO);
- hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32));
- hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype);
- hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation);
+ hdr->src_nid = cpu_to_le64(hello->kshm_src_nid);
+ hdr->src_pid = cpu_to_le32(hello->kshm_src_pid);
+ hdr->type = cpu_to_le32(LNET_MSG_HELLO);
+ hdr->payload_length = cpu_to_le32(hello->kshm_nips * sizeof(__u32));
+ hdr->msg.hello.type = cpu_to_le32(hello->kshm_ctype);
+ hdr->msg.hello.incarnation = cpu_to_le64(hello->kshm_src_incarnation);
rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
rc, &conn->ksnc_ipaddr, conn->ksnc_port);
goto out;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
goto out;
- for (i = 0; i < (int) hello->kshm_nips; i++) {
- hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]);
- }
+ for (i = 0; i < (int) hello->kshm_nips; i++)
+ hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
rc = lnet_sock_write(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
@@ -532,10 +529,10 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
hello->kshm_magic = LNET_PROTO_MAGIC;
hello->kshm_version = conn->ksnc_proto->pro_version;
- if (the_lnet.ln_testprotocompat != 0) {
+ if (the_lnet.ln_testprotocompat) {
/* single-shot proto check */
LNET_LOCK();
- if ((the_lnet.ln_testprotocompat & 1) != 0) {
+ if (the_lnet.ln_testprotocompat & 1) {
hello->kshm_version++; /* just different! */
the_lnet.ln_testprotocompat &= ~1;
}
@@ -544,19 +541,19 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips),
lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
rc, &conn->ksnc_ipaddr, conn->ksnc_port);
return rc;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
return 0;
rc = lnet_sock_write(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32),
lnet_acceptor_timeout());
- if (rc != 0) {
+ if (rc) {
CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n",
rc, hello->kshm_nips,
&conn->ksnc_ipaddr, conn->ksnc_port);
@@ -575,7 +572,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
int i;
LIBCFS_ALLOC(hdr, sizeof(*hdr));
- if (hdr == NULL) {
+ if (!hdr) {
CERROR("Can't allocate lnet_hdr_t\n");
return -ENOMEM;
}
@@ -583,15 +580,15 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
rc = lnet_sock_read(sock, &hdr->src_nid,
sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid),
timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
goto out;
}
/* ...and check we got what we expected */
- if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
+ if (hdr->type != cpu_to_le32(LNET_MSG_HELLO)) {
CERROR("Expecting a HELLO hdr, but got type %d from %pI4h\n",
le32_to_cpu(hdr->type),
&conn->ksnc_ipaddr);
@@ -613,14 +610,14 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
goto out;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
goto out;
rc = lnet_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
goto out;
}
@@ -628,7 +625,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
for (i = 0; i < (int) hello->kshm_nips; i++) {
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
- if (hello->kshm_ips[i] == 0) {
+ if (!hello->kshm_ips[i]) {
CERROR("Zero IP[%d] from ip %pI4h\n",
i, &conn->ksnc_ipaddr);
rc = -EPROTO;
@@ -657,9 +654,9 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
offsetof(ksock_hello_msg_t, kshm_ips) -
offsetof(ksock_hello_msg_t, kshm_src_nid),
timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
return rc;
}
@@ -681,14 +678,14 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
return -EPROTO;
}
- if (hello->kshm_nips == 0)
+ if (!hello->kshm_nips)
return 0;
rc = lnet_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
- if (rc != 0) {
+ if (rc) {
CERROR("Error %d reading IPs from ip %pI4h\n",
- rc, &conn->ksnc_ipaddr);
+ rc, &conn->ksnc_ipaddr);
LASSERT(rc < 0 && rc != -EALREADY);
return rc;
}
@@ -697,7 +694,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
if (conn->ksnc_flip)
__swab32s(&hello->kshm_ips[i]);
- if (hello->kshm_ips[i] == 0) {
+ if (!hello->kshm_ips[i]) {
CERROR("Zero IP[%d] from ip %pI4h\n",
i, &conn->ksnc_ipaddr);
return -EPROTO;
@@ -712,12 +709,13 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
{
/* V1.x has no KSOCK_MSG_NOOP */
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT(tx->tx_lnetmsg != NULL);
+ LASSERT(tx->tx_lnetmsg);
tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr;
tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t);
- tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+ tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
+ tx->tx_resid = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t);
}
static void
@@ -725,17 +723,19 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx)
{
tx->tx_iov[0].iov_base = &tx->tx_msg;
- if (tx->tx_lnetmsg != NULL) {
+ if (tx->tx_lnetmsg) {
LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr;
tx->tx_iov[0].iov_len = sizeof(ksock_msg_t);
- tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+ tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
+ tx->tx_resid = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len;
} else {
LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP);
tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
- tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
+ tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
}
/* Don't checksum before start sending, because packet can be piggybacked with ACK */
}
@@ -745,7 +745,8 @@ ksocknal_unpack_msg_v1(ksock_msg_t *msg)
{
msg->ksm_csum = 0;
msg->ksm_type = KSOCK_MSG_LNET;
- msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0;
+ msg->ksm_zc_cookies[0] = 0;
+ msg->ksm_zc_cookies[1] = 0;
}
static void