From e5fd91f1ef340da553f7a79da9540c3db711c937 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Tue, 8 Sep 2015 01:01:14 -0300 Subject: Linux-libre 4.2-gnu --- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 474 +++++++------- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 681 ++++++++++---------- .../staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 484 +++++++------- .../lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c | 48 +- drivers/staging/lustre/lnet/klnds/socklnd/Makefile | 2 +- .../staging/lustre/lnet/klnds/socklnd/socklnd.c | 485 +++++++------- .../staging/lustre/lnet/klnds/socklnd/socklnd.h | 601 +++++++++-------- .../staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 402 ++++++------ .../lustre/lnet/klnds/socklnd/socklnd_lib-linux.c | 714 --------------------- .../lustre/lnet/klnds/socklnd/socklnd_lib-linux.h | 86 --- .../lustre/lnet/klnds/socklnd/socklnd_lib.c | 710 ++++++++++++++++++++ .../lustre/lnet/klnds/socklnd/socklnd_modparams.c | 33 +- .../lustre/lnet/klnds/socklnd/socklnd_proto.c | 171 +++-- drivers/staging/lustre/lnet/lnet/Makefile | 7 +- drivers/staging/lustre/lnet/lnet/acceptor.c | 91 ++- drivers/staging/lustre/lnet/lnet/api-ni.c | 325 ++++------ drivers/staging/lustre/lnet/lnet/config.c | 310 ++++----- drivers/staging/lustre/lnet/lnet/lib-eq.c | 46 +- drivers/staging/lustre/lnet/lnet/lib-md.c | 32 +- drivers/staging/lustre/lnet/lnet/lib-me.c | 26 +- drivers/staging/lustre/lnet/lnet/lib-move.c | 264 ++++---- drivers/staging/lustre/lnet/lnet/lib-msg.c | 52 +- drivers/staging/lustre/lnet/lnet/lib-ptl.c | 116 ++-- drivers/staging/lustre/lnet/lnet/lib-socket.c | 594 +++++++++++++++++ drivers/staging/lustre/lnet/lnet/lo.c | 2 +- drivers/staging/lustre/lnet/lnet/module.c | 28 +- drivers/staging/lustre/lnet/lnet/peer.c | 50 +- drivers/staging/lustre/lnet/lnet/router.c | 355 +++------- drivers/staging/lustre/lnet/lnet/router_proc.c | 210 +++--- drivers/staging/lustre/lnet/selftest/brw_test.c | 60 +- drivers/staging/lustre/lnet/selftest/conctl.c | 54 +- drivers/staging/lustre/lnet/selftest/conrpc.c | 124 ++-- drivers/staging/lustre/lnet/selftest/conrpc.h | 34 +- drivers/staging/lustre/lnet/selftest/console.c | 246 +++---- drivers/staging/lustre/lnet/selftest/console.h | 186 +++--- drivers/staging/lustre/lnet/selftest/framework.c | 160 ++--- drivers/staging/lustre/lnet/selftest/module.c | 10 +- drivers/staging/lustre/lnet/selftest/ping_test.c | 24 +- drivers/staging/lustre/lnet/selftest/rpc.c | 152 ++--- drivers/staging/lustre/lnet/selftest/rpc.h | 141 ++-- drivers/staging/lustre/lnet/selftest/selftest.h | 311 ++++----- drivers/staging/lustre/lnet/selftest/timer.c | 20 +- drivers/staging/lustre/lnet/selftest/timer.h | 16 +- 43 files changed, 4610 insertions(+), 4327 deletions(-) delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c delete mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h create mode 100644 drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c create mode 100644 drivers/staging/lustre/lnet/lnet/lib-socket.c (limited to 'drivers/staging/lustre/lnet') diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 3bad441de..4eb24a11b 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -38,25 +38,26 @@ * Author: Eric Barton */ -#include "o2iblnd.h" #include +#include +#include "o2iblnd.h" static lnd_t the_o2iblnd = { - .lnd_type = O2IBLND, - .lnd_startup = kiblnd_startup, - .lnd_shutdown = kiblnd_shutdown, - .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, - .lnd_send = kiblnd_send, - .lnd_recv = kiblnd_recv, + .lnd_type = O2IBLND, + .lnd_startup = kiblnd_startup, + .lnd_shutdown = kiblnd_shutdown, + .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, + .lnd_send = kiblnd_send, + .lnd_recv = kiblnd_recv, }; -kib_data_t kiblnd_data; +kib_data_t kiblnd_data; static __u32 kiblnd_cksum(void *ptr, int nob) { - char *c = ptr; - __u32 sum = 0; + char *c = ptr; + __u32 sum = 0; while (nob-- > 0) sum = ((sum << 1) | (sum >> 31)) + *c++; @@ -138,10 +139,10 @@ static int kiblnd_msgtype2size(int type) static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) { - kib_rdma_desc_t *rd; - int nob; - int n; - int i; + kib_rdma_desc_t *rd; + int nob; + int n; + int i; LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || msg->ibm_type == IBLND_MSG_PUT_ACK); @@ -210,10 +211,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, int kiblnd_unpack_msg(kib_msg_t *msg, int nob) { const int hdr_size = offsetof(kib_msg_t, ibm_u); - __u32 msg_cksum; - __u16 version; - int msg_nob; - int flip; + __u32 msg_cksum; + __u16 version; + int msg_nob; + int flip; /* 6 bytes are enough to have received magic + version */ if (nob < 6) { @@ -320,10 +321,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob) int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) { - kib_peer_t *peer; - kib_net_t *net = ni->ni_data; - int cpt = lnet_cpt_of_nid(nid); - unsigned long flags; + kib_peer_t *peer; + kib_net_t *net = ni->ni_data; + int cpt = lnet_cpt_of_nid(nid); + unsigned long flags; LASSERT(net != NULL); LASSERT(nid != LNET_NID_ANY); @@ -385,9 +386,9 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) { /* the caller is responsible for accounting the additional reference * that this creates */ - struct list_head *peer_list = kiblnd_nid2peerlist(nid); - struct list_head *tmp; - kib_peer_t *peer; + struct list_head *peer_list = kiblnd_nid2peerlist(nid); + struct list_head *tmp; + kib_peer_t *peer; list_for_each(tmp, peer_list) { @@ -422,10 +423,10 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer) static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, lnet_nid_t *nidp, int *count) { - kib_peer_t *peer; - struct list_head *ptmp; - int i; - unsigned long flags; + kib_peer_t *peer; + struct list_head *ptmp; + int i; + unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -459,9 +460,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, static void kiblnd_del_peer_locked(kib_peer_t *peer) { - struct list_head *ctmp; - struct list_head *cnxt; - kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + kib_conn_t *conn; if (list_empty(&peer->ibp_conns)) { kiblnd_unlink_peer_locked(peer); @@ -480,14 +481,14 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer) static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) { LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - kib_peer_t *peer; - int lo; - int hi; - int i; - unsigned long flags; - int rc = -ENOENT; + struct list_head *ptmp; + struct list_head *pnxt; + kib_peer_t *peer; + int lo; + int hi; + int i; + unsigned long flags; + int rc = -ENOENT; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -532,12 +533,12 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) { - kib_peer_t *peer; - struct list_head *ptmp; - kib_conn_t *conn; - struct list_head *ctmp; - int i; - unsigned long flags; + kib_peer_t *peer; + struct list_head *ptmp; + kib_conn_t *conn; + struct list_head *ctmp; + int i; + unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -593,7 +594,7 @@ int kiblnd_translate_mtu(int value) static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) { - int mtu; + int mtu; /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ if (cmid->route.path_rec == NULL) @@ -607,11 +608,11 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) { - cpumask_t *mask; - int vectors; - int off; - int i; - lnet_nid_t nid = conn->ibc_peer->ibp_nid; + cpumask_t *mask; + int vectors; + int off; + int i; + lnet_nid_t nid = conn->ibc_peer->ibp_nid; vectors = conn->ibc_cmid->device->num_comp_vectors; if (vectors <= 1) @@ -642,17 +643,18 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has * its ref on 'cmid'). */ - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_net_t *net = peer->ibp_ni->ni_data; - kib_dev_t *dev; + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_net_t *net = peer->ibp_ni->ni_data; + kib_dev_t *dev; struct ib_qp_init_attr *init_qp_attr; - struct kib_sched_info *sched; - kib_conn_t *conn; - struct ib_cq *cq; - unsigned long flags; - int cpt; - int rc; - int i; + struct kib_sched_info *sched; + struct ib_cq_init_attr cq_attr = {}; + kib_conn_t *conn; + struct ib_cq *cq; + unsigned long flags; + int cpt; + int rc; + int i; LASSERT(net != NULL); LASSERT(!in_interrupt()); @@ -742,10 +744,11 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kiblnd_map_rx_descs(conn); + cq_attr.cqe = IBLND_CQ_ENTRIES(version); + cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, - IBLND_CQ_ENTRIES(version), - kiblnd_get_completion_vector(conn, cpt)); + &cq_attr); if (IS_ERR(cq)) { CERROR("Can't create CQ: %ld, cqe: %d\n", PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); @@ -837,8 +840,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, void kiblnd_destroy_conn(kib_conn_t *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; - kib_peer_t *peer = conn->ibc_peer; - int rc; + kib_peer_t *peer = conn->ibc_peer; + int rc; LASSERT(!in_interrupt()); LASSERT(atomic_read(&conn->ibc_refcount) == 0); @@ -904,10 +907,10 @@ void kiblnd_destroy_conn(kib_conn_t *conn) int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) { - kib_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; + kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); @@ -926,10 +929,10 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) int kiblnd_close_stale_conns_locked(kib_peer_t *peer, int version, __u64 incarnation) { - kib_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; + kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); @@ -953,14 +956,14 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer, static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) { - kib_peer_t *peer; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - unsigned long flags; - int count = 0; + kib_peer_t *peer; + struct list_head *ptmp; + struct list_head *pnxt; + int lo; + int hi; + int i; + unsigned long flags; + int count = 0; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1001,17 +1004,17 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; - int rc = -EINVAL; + int rc = -EINVAL; switch (cmd) { case IOC_LIBCFS_GET_PEER: { - lnet_nid_t nid = 0; - int count = 0; + lnet_nid_t nid = 0; + int count = 0; rc = kiblnd_get_peer_info(ni, data->ioc_count, &nid, &count); - data->ioc_nid = nid; - data->ioc_count = count; + data->ioc_nid = nid; + data->ioc_count = count; break; } @@ -1053,11 +1056,11 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { - unsigned long last_alive = 0; - unsigned long now = cfs_time_current(); - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer; - unsigned long flags; + unsigned long last_alive = 0; + unsigned long now = cfs_time_current(); + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_peer_t *peer; + unsigned long flags; read_lock_irqsave(glock, flags); @@ -1086,8 +1089,8 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) void kiblnd_free_pages(kib_pages_t *p) { - int npages = p->ibp_npages; - int i; + int npages = p->ibp_npages; + int i; for (i = 0; i < npages; i++) { if (p->ibp_pages[i] != NULL) @@ -1099,8 +1102,8 @@ void kiblnd_free_pages(kib_pages_t *p) int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) { - kib_pages_t *p; - int i; + kib_pages_t *p; + int i; LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, offsetof(kib_pages_t, ibp_pages[npages])); @@ -1130,7 +1133,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) void kiblnd_unmap_rx_descs(kib_conn_t *conn) { kib_rx_t *rx; - int i; + int i; LASSERT(conn->ibc_rxs != NULL); LASSERT(conn->ibc_hdev != NULL); @@ -1153,14 +1156,13 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn) void kiblnd_map_rx_descs(kib_conn_t *conn) { - kib_rx_t *rx; - struct page *pg; - int pg_off; - int ipg; - int i; + kib_rx_t *rx; + struct page *pg; + int pg_off; + int ipg; + int i; - for (pg_off = ipg = i = 0; - i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { pg = conn->ibc_rx_pages->ibp_pages[ipg]; rx = &conn->ibc_rxs[i]; @@ -1177,7 +1179,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", i, rx->rx_msg, rx->rx_msgaddr, - lnet_page2phys(pg) + pg_off); + (__u64)(page_to_phys(pg) + pg_off)); pg_off += IBLND_MSG_SIZE; LASSERT(pg_off <= PAGE_SIZE); @@ -1192,9 +1194,9 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) { - kib_hca_dev_t *hdev = tpo->tpo_hdev; - kib_tx_t *tx; - int i; + kib_hca_dev_t *hdev = tpo->tpo_hdev; + kib_tx_t *tx; + int i; LASSERT(tpo->tpo_pool.po_allocated == 0); @@ -1216,8 +1218,8 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) { kib_hca_dev_t *hdev; - unsigned long flags; - int i = 0; + unsigned long flags; + int i = 0; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); while (dev->ibd_failover) { @@ -1240,15 +1242,15 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) { - kib_pages_t *txpgs = tpo->tpo_tx_pages; - kib_pool_t *pool = &tpo->tpo_pool; - kib_net_t *net = pool->po_owner->ps_net; - kib_dev_t *dev; - struct page *page; - kib_tx_t *tx; - int page_offset; - int ipage; - int i; + kib_pages_t *txpgs = tpo->tpo_tx_pages; + kib_pool_t *pool = &tpo->tpo_pool; + kib_net_t *net = pool->po_owner->ps_net; + kib_dev_t *dev; + struct page *page; + kib_tx_t *tx; + int page_offset; + int ipage; + int i; LASSERT(net != NULL); @@ -1291,7 +1293,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) { - __u64 index; + __u64 index; LASSERT(hdev->ibh_mrs[0] != NULL); @@ -1311,7 +1313,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) { struct ib_mr *prev_mr; struct ib_mr *mr; - int i; + int i; LASSERT(hdev->ibh_mrs[0] != NULL); @@ -1382,18 +1384,18 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo) { /* FMR pool for RDMA */ - kib_dev_t *dev = fps->fps_net->ibn_dev; - kib_fmr_pool_t *fpo; + kib_dev_t *dev = fps->fps_net->ibn_dev; + kib_fmr_pool_t *fpo; struct ib_fmr_pool_param param = { .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, - .page_shift = PAGE_SHIFT, - .access = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE), - .pool_size = fps->fps_pool_size, + .page_shift = PAGE_SHIFT, + .access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE), + .pool_size = fps->fps_pool_size, .dirty_watermark = fps->fps_flush_trigger, .flush_function = NULL, - .flush_arg = NULL, - .cache = !!*kiblnd_tunables.kib_fmr_cache}; + .flush_arg = NULL, + .cache = !!*kiblnd_tunables.kib_fmr_cache}; int rc; LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); @@ -1454,7 +1456,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int flush_trigger) { kib_fmr_pool_t *fpo; - int rc; + int rc; memset(fps, 0, sizeof(kib_fmr_poolset_t)); @@ -1485,11 +1487,11 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) { LIST_HEAD(zombies); - kib_fmr_pool_t *fpo = fmr->fmr_pool; + kib_fmr_pool_t *fpo = fmr->fmr_pool; kib_fmr_poolset_t *fps = fpo->fpo_owner; - unsigned long now = cfs_time_current(); - kib_fmr_pool_t *tmp; - int rc; + unsigned long now = cfs_time_current(); + kib_fmr_pool_t *tmp; + int rc; rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); LASSERT(rc == 0); @@ -1525,9 +1527,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, __u64 iov, kib_fmr_t *fmr) { struct ib_pool_fmr *pfmr; - kib_fmr_pool_t *fpo; - __u64 version; - int rc; + kib_fmr_pool_t *fpo; + __u64 version; + int rc; again: spin_lock(&fps->fps_lock); @@ -1658,13 +1660,13 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, kib_ps_node_init_t nd_init, kib_ps_node_fini_t nd_fini) { - kib_pool_t *pool; - int rc; + kib_pool_t *pool; + int rc; memset(ps, 0, sizeof(kib_poolset_t)); - ps->ps_cpt = cpt; - ps->ps_net = net; + ps->ps_cpt = cpt; + ps->ps_net = net; ps->ps_pool_create = po_create; ps->ps_pool_destroy = po_destroy; ps->ps_node_init = nd_init; @@ -1698,9 +1700,9 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) { LIST_HEAD(zombies); - kib_poolset_t *ps = pool->po_owner; - kib_pool_t *tmp; - unsigned long now = cfs_time_current(); + kib_poolset_t *ps = pool->po_owner; + kib_pool_t *tmp; + unsigned long now = cfs_time_current(); spin_lock(&ps->ps_lock); @@ -1727,9 +1729,9 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) { - struct list_head *node; - kib_pool_t *pool; - int rc; + struct list_head *node; + kib_pool_t *pool; + int rc; again: spin_lock(&ps->ps_lock); @@ -1789,8 +1791,8 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr) { - kib_pmr_pool_t *ppo = pmr->pmr_pool; - struct ib_mr *mr = pmr->pmr_mr; + kib_pmr_pool_t *ppo = pmr->pmr_pool; + struct ib_mr *mr = pmr->pmr_mr; pmr->pmr_mr = NULL; kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list); @@ -1802,9 +1804,9 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr) { kib_phys_mr_t *pmr; - struct list_head *node; - int rc; - int i; + struct list_head *node; + int rc; + int i; node = kiblnd_pool_alloc_node(&pps->pps_poolset); if (node == NULL) { @@ -1846,7 +1848,7 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, static void kiblnd_destroy_pmr_pool(kib_pool_t *pool) { kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool); - kib_phys_mr_t *pmr; + kib_phys_mr_t *pmr; kib_phys_mr_t *tmp; LASSERT(pool->po_allocated == 0); @@ -1881,10 +1883,10 @@ static inline int kiblnd_pmr_pool_size(int ncpts) static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) { - struct kib_pmr_pool *ppo; - struct kib_pool *pool; - kib_phys_mr_t *pmr; - int i; + struct kib_pmr_pool *ppo; + struct kib_pool *pool; + kib_phys_mr_t *pmr; + int i; LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(), ps->ps_cpt, sizeof(kib_pmr_pool_t)); @@ -1923,8 +1925,8 @@ static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, static void kiblnd_destroy_tx_pool(kib_pool_t *pool) { - kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); - int i; + kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); + int i; LASSERT(pool->po_allocated == 0); @@ -1979,9 +1981,9 @@ static int kiblnd_tx_pool_size(int ncpts) static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) { - int i; - int npg; - kib_pool_t *pool; + int i; + int npg; + kib_pool_t *pool; kib_tx_pool_t *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); @@ -2064,19 +2066,19 @@ static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) { kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, tps_poolset); - kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); + kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); tx->tx_cookie = tps->tps_next_tx_cookie++; } static void kiblnd_net_fini_pools(kib_net_t *net) { - int i; + int i; cfs_cpt_for_each(i, lnet_cpt_table()) { - kib_tx_poolset_t *tps; - kib_fmr_poolset_t *fps; - kib_pmr_poolset_t *pps; + kib_tx_poolset_t *tps; + kib_fmr_poolset_t *fps; + kib_pmr_poolset_t *pps; if (net->ibn_tx_ps != NULL) { tps = net->ibn_tx_ps[i]; @@ -2112,16 +2114,15 @@ static void kiblnd_net_fini_pools(kib_net_t *net) static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) { - unsigned long flags; - int cpt; - int rc; - int i; + unsigned long flags; + int cpt; + int rc; + int i; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (*kiblnd_tunables.kib_map_on_demand == 0 && net->ibn_dev->ibd_hdev->ibh_nmrs == 1) { - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } @@ -2241,7 +2242,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) { struct ib_device_attr *attr; - int rc; + int rc; /* It's safe to assume a HCA can handle a page size * matching that of the native system */ @@ -2284,7 +2285,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) { - int i; + int i; if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL) return; @@ -2317,12 +2318,11 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) { struct ib_mr *mr; - int i; - int rc; - __u64 mm_size; - __u64 mr_size; - int acflags = IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE; + int i; + int rc; + __u64 mm_size; + __u64 mr_size; + int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; rc = kiblnd_hdev_get_attr(hdev); if (rc != 0) @@ -2371,11 +2371,11 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) for (i = 0; i < hdev->ibh_nmrs; i++) { struct ib_phys_buf ipb; - __u64 iova; + __u64 iova; ipb.size = hdev->ibh_mr_size; ipb.addr = i * mr_size; - iova = ipb.addr; + iova = ipb.addr; mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova); if (IS_ERR(mr)) { @@ -2406,10 +2406,10 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, static int kiblnd_dev_need_failover(kib_dev_t *dev) { - struct rdma_cm_id *cmid; - struct sockaddr_in srcaddr; - struct sockaddr_in dstaddr; - int rc; + struct rdma_cm_id *cmid; + struct sockaddr_in srcaddr; + struct sockaddr_in dstaddr; + int rc; if (dev->ibd_hdev == NULL || /* initializing */ dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */ @@ -2435,7 +2435,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) } memset(&srcaddr, 0, sizeof(srcaddr)); - srcaddr.sin_family = AF_INET; + srcaddr.sin_family = AF_INET; srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip); memset(&dstaddr, 0, sizeof(dstaddr)); @@ -2464,15 +2464,14 @@ int kiblnd_dev_failover(kib_dev_t *dev) LIST_HEAD(zombie_tpo); LIST_HEAD(zombie_ppo); LIST_HEAD(zombie_fpo); - struct rdma_cm_id *cmid = NULL; - kib_hca_dev_t *hdev = NULL; - kib_hca_dev_t *old; - struct ib_pd *pd; - kib_net_t *net; - struct sockaddr_in addr; - unsigned long flags; - int rc = 0; - int i; + struct rdma_cm_id *cmid = NULL; + kib_hca_dev_t *hdev = NULL; + struct ib_pd *pd; + kib_net_t *net; + struct sockaddr_in addr; + unsigned long flags; + int rc = 0; + int i; LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || dev->ibd_can_failover || @@ -2558,9 +2557,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - old = dev->ibd_hdev; - dev->ibd_hdev = hdev; /* take over the refcount */ - hdev = old; + swap(dev->ibd_hdev, hdev); /* take over the refcount */ list_for_each_entry(net, &dev->ibd_nets, ibn_list) { cfs_cpt_for_each(i, lnet_cpt_table()) { @@ -2614,13 +2611,13 @@ void kiblnd_destroy_dev(kib_dev_t *dev) static kib_dev_t *kiblnd_create_dev(char *ifname) { struct net_device *netdev; - kib_dev_t *dev; - __u32 netmask; - __u32 ip; - int up; - int rc; + kib_dev_t *dev; + __u32 netmask; + __u32 ip; + int up; + int rc; - rc = libcfs_ipif_query(ifname, &up, &ip, &netmask); + rc = lnet_ipif_query(ifname, &up, &ip, &netmask); if (rc != 0) { CERROR("Can't query IPoIB interface %s: %d\n", ifname, rc); @@ -2665,8 +2662,8 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) static void kiblnd_base_shutdown(void) { - struct kib_sched_info *sched; - int i; + struct kib_sched_info *sched; + int i; LASSERT(list_empty(&kiblnd_data.kib_devs)); @@ -2732,10 +2729,10 @@ static void kiblnd_base_shutdown(void) void kiblnd_shutdown(lnet_ni_t *ni) { - kib_net_t *net = ni->ni_data; - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - int i; - unsigned long flags; + kib_net_t *net = ni->ni_data; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + int i; + unsigned long flags; LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); @@ -2804,9 +2801,9 @@ out: static int kiblnd_base_startup(void) { - struct kib_sched_info *sched; - int rc; - int i; + struct kib_sched_info *sched; + int rc; + int i; LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); @@ -2821,8 +2818,7 @@ static int kiblnd_base_startup(void) kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; LIBCFS_ALLOC(kiblnd_data.kib_peers, - sizeof(struct list_head) * - kiblnd_data.kib_peer_hash_size); + sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); if (kiblnd_data.kib_peers == NULL) goto failed; for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) @@ -2840,7 +2836,7 @@ static int kiblnd_base_startup(void) goto failed; cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { - int nthrs; + int nthrs; spin_lock_init(&sched->ibs_lock); INIT_LIST_HEAD(&sched->ibs_conns); @@ -2893,9 +2889,9 @@ static int kiblnd_base_startup(void) static int kiblnd_start_schedulers(struct kib_sched_info *sched) { - int rc = 0; - int nthrs; - int i; + int rc = 0; + int nthrs; + int i; if (sched->ibs_nthreads == 0) { if (*kiblnd_tunables.kib_nscheds > 0) { @@ -2913,8 +2909,8 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) } for (i = 0; i < nthrs; i++) { - long id; - char name[20]; + long id; + char name[20]; id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", @@ -2935,9 +2931,9 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts) { - int cpt; - int rc; - int i; + int cpt; + int rc; + int i; for (i = 0; i < ncpts; i++) { struct kib_sched_info *sched; @@ -2960,10 +2956,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, static kib_dev_t *kiblnd_dev_search(char *ifname) { - kib_dev_t *alias = NULL; - kib_dev_t *dev; - char *colon; - char *colon2; + kib_dev_t *alias = NULL; + kib_dev_t *dev; + char *colon; + char *colon2; colon = strchr(ifname, ':'); list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { @@ -2992,13 +2988,13 @@ static kib_dev_t *kiblnd_dev_search(char *ifname) int kiblnd_startup(lnet_ni_t *ni) { - char *ifname; - kib_dev_t *ibdev = NULL; - kib_net_t *net; - struct timeval tv; - unsigned long flags; - int rc; - int newdev; + char *ifname; + kib_dev_t *ibdev = NULL; + kib_net_t *net; + struct timeval tv; + unsigned long flags; + int rc; + int newdev; LASSERT(ni->ni_lnd == &the_o2iblnd); @@ -3091,7 +3087,7 @@ static void __exit kiblnd_module_fini(void) static int __init kiblnd_module_init(void) { - int rc; + int rc; CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index cd664d025..f5d1d9f8f 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -65,7 +65,6 @@ #include "../../../include/linux/libcfs/libcfs.h" #include "../../../include/linux/lnet/lnet.h" #include "../../../include/linux/lnet/lib-lnet.h" -#include "../../../include/linux/lnet/lnet-sysctl.h" #include #include @@ -80,42 +79,47 @@ #define IBLND_N_SCHED_HIGH 4 typedef struct { - int *kib_dev_failover; /* HCA failover */ - unsigned int *kib_service; /* IB service number */ - int *kib_min_reconnect_interval; /* first failed connection retry... */ - int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ - int *kib_cksum; /* checksum kib_msg_t? */ - int *kib_timeout; /* comms timeout (seconds) */ - int *kib_keepalive; /* keepalive timeout (seconds) */ - int *kib_ntx; /* # tx descs */ - int *kib_credits; /* # concurrent sends */ - int *kib_peertxcredits; /* # concurrent sends to 1 peer */ - int *kib_peerrtrcredits; /* # per-peer router buffer credits */ - int *kib_peercredits_hiw; /* # when eagerly to return credits */ - int *kib_peertimeout; /* seconds to consider peer dead */ - char **kib_default_ipif; /* default IPoIB interface */ - int *kib_retry_count; - int *kib_rnr_retry_count; - int *kib_concurrent_sends; /* send work queue sizing */ - int *kib_ib_mtu; /* IB MTU */ - int *kib_map_on_demand; /* map-on-demand if RD has more fragments - * than this value, 0 disable map-on-demand */ - int *kib_pmr_pool_size; /* # physical MR in pool */ - int *kib_fmr_pool_size; /* # FMRs in pool */ - int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ - int *kib_fmr_cache; /* enable FMR pool cache? */ - int *kib_require_priv_port;/* accept only privileged ports */ - int *kib_use_priv_port; /* use privileged port for active connect */ - /* # threads on each CPT */ - int *kib_nscheds; + int *kib_dev_failover; /* HCA failover */ + unsigned int *kib_service; /* IB service number */ + int *kib_min_reconnect_interval; /* first failed connection + * retry... */ + int *kib_max_reconnect_interval; /* ...exponentially increasing + * to this */ + int *kib_cksum; /* checksum kib_msg_t? */ + int *kib_timeout; /* comms timeout (seconds) */ + int *kib_keepalive; /* keepalive timeout (seconds) */ + int *kib_ntx; /* # tx descs */ + int *kib_credits; /* # concurrent sends */ + int *kib_peertxcredits; /* # concurrent sends to 1 peer */ + int *kib_peerrtrcredits; /* # per-peer router buffer + * credits */ + int *kib_peercredits_hiw; /* # when eagerly to return + * credits */ + int *kib_peertimeout; /* seconds to consider peer dead */ + char **kib_default_ipif; /* default IPoIB interface */ + int *kib_retry_count; + int *kib_rnr_retry_count; + int *kib_concurrent_sends; /* send work queue sizing */ + int *kib_ib_mtu; /* IB MTU */ + int *kib_map_on_demand; /* map-on-demand if RD has more + * fragments than this value, 0 + * disable map-on-demand */ + int *kib_pmr_pool_size; /* # physical MR in pool */ + int *kib_fmr_pool_size; /* # FMRs in pool */ + int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ + int *kib_fmr_cache; /* enable FMR pool cache? */ + int *kib_require_priv_port; /* accept only privileged ports */ + int *kib_use_priv_port; /* use privileged port for active + * connect */ + int *kib_nscheds; /* # threads on each CPT */ } kib_tunables_t; extern kib_tunables_t kiblnd_tunables; -#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ -#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ +#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ +#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ -#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ +#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ #define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */ #define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \ @@ -186,34 +190,36 @@ struct kib_hca_dev; #endif typedef struct { - struct list_head ibd_list; /* chain on kib_devs */ - struct list_head ibd_fail_list; /* chain on kib_failed_devs */ - __u32 ibd_ifip; /* IPoIB interface IP */ - /** IPoIB interface name */ - char ibd_ifname[KIB_IFNAME_SIZE]; - int ibd_nnets; /* # nets extant */ - - unsigned long ibd_next_failover; - int ibd_failed_failover; /* # failover failures */ - unsigned int ibd_failover; /* failover in progress */ - unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */ - struct list_head ibd_nets; - struct kib_hca_dev *ibd_hdev; + struct list_head ibd_list; /* chain on kib_devs */ + struct list_head ibd_fail_list; /* chain on kib_failed_devs */ + __u32 ibd_ifip; /* IPoIB interface IP */ + + /* IPoIB interface name */ + char ibd_ifname[KIB_IFNAME_SIZE]; + int ibd_nnets; /* # nets extant */ + + unsigned long ibd_next_failover; + int ibd_failed_failover; /* # failover failures */ + unsigned int ibd_failover; /* failover in progress */ + unsigned int ibd_can_failover; /* IPoIB interface is a bonding + * master */ + struct list_head ibd_nets; + struct kib_hca_dev *ibd_hdev; } kib_dev_t; typedef struct kib_hca_dev { - struct rdma_cm_id *ibh_cmid; /* listener cmid */ - struct ib_device *ibh_ibdev; /* IB device */ - int ibh_page_shift; /* page shift of current HCA */ - int ibh_page_size; /* page size of current HCA */ - __u64 ibh_page_mask; /* page mask of current HCA */ - int ibh_mr_shift; /* bits shift of max MR size */ - __u64 ibh_mr_size; /* size of MR */ - int ibh_nmrs; /* # of global MRs */ - struct ib_mr **ibh_mrs; /* global MR */ - struct ib_pd *ibh_pd; /* PD */ - kib_dev_t *ibh_dev; /* owner */ - atomic_t ibh_ref; /* refcount */ + struct rdma_cm_id *ibh_cmid; /* listener cmid */ + struct ib_device *ibh_ibdev; /* IB device */ + int ibh_page_shift; /* page shift of current HCA */ + int ibh_page_size; /* page size of current HCA */ + __u64 ibh_page_mask; /* page mask of current HCA */ + int ibh_mr_shift; /* bits shift of max MR size */ + __u64 ibh_mr_size; /* size of MR */ + int ibh_nmrs; /* # of global MRs */ + struct ib_mr **ibh_mrs; /* global MR */ + struct ib_pd *ibh_pd; /* PD */ + kib_dev_t *ibh_dev; /* owner */ + atomic_t ibh_ref; /* refcount */ } kib_hca_dev_t; /** # of seconds to keep pool alive */ @@ -222,19 +228,19 @@ typedef struct kib_hca_dev { #define IBLND_POOL_RETRY 1 typedef struct { - int ibp_npages; /* # pages */ - struct page *ibp_pages[0]; /* page array */ + int ibp_npages; /* # pages */ + struct page *ibp_pages[0]; /* page array */ } kib_pages_t; struct kib_pmr_pool; typedef struct { - struct list_head pmr_list; /* chain node */ - struct ib_phys_buf *pmr_ipb; /* physical buffer */ - struct ib_mr *pmr_mr; /* IB MR */ - struct kib_pmr_pool *pmr_pool; /* owner of this MR */ - __u64 pmr_iova; /* Virtual I/O address */ - int pmr_refcount; /* reference count */ + struct list_head pmr_list; /* chain node */ + struct ib_phys_buf *pmr_ipb; /* physical buffer */ + struct ib_mr *pmr_mr; /* IB MR */ + struct kib_pmr_pool *pmr_pool; /* owner of this MR */ + __u64 pmr_iova; /* Virtual I/O address */ + int pmr_refcount; /* reference count */ } kib_phys_mr_t; struct kib_pool; @@ -251,97 +257,99 @@ struct kib_net; #define IBLND_POOL_NAME_LEN 32 typedef struct kib_poolset { - spinlock_t ps_lock; /* serialize */ - struct kib_net *ps_net; /* network it belongs to */ - char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ - struct list_head ps_pool_list; /* list of pools */ - struct list_head ps_failed_pool_list; /* failed pool list */ - unsigned long ps_next_retry; /* time stamp for retry if failed to allocate */ - int ps_increasing; /* is allocating new pool */ - int ps_pool_size; /* new pool size */ - int ps_cpt; /* CPT id */ - - kib_ps_pool_create_t ps_pool_create; /* create a new pool */ - kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ - kib_ps_node_init_t ps_node_init; /* initialize new allocated node */ - kib_ps_node_fini_t ps_node_fini; /* finalize node */ + spinlock_t ps_lock; /* serialize */ + struct kib_net *ps_net; /* network it belongs to */ + char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ + struct list_head ps_pool_list; /* list of pools */ + struct list_head ps_failed_pool_list;/* failed pool list */ + unsigned long ps_next_retry; /* time stamp for retry if + * failed to allocate */ + int ps_increasing; /* is allocating new pool */ + int ps_pool_size; /* new pool size */ + int ps_cpt; /* CPT id */ + + kib_ps_pool_create_t ps_pool_create; /* create a new pool */ + kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ + kib_ps_node_init_t ps_node_init; /* initialize new allocated + * node */ + kib_ps_node_fini_t ps_node_fini; /* finalize node */ } kib_poolset_t; typedef struct kib_pool { - struct list_head po_list; /* chain on pool list */ - struct list_head po_free_list; /* pre-allocated node */ - kib_poolset_t *po_owner; /* pool_set of this pool */ - unsigned long po_deadline; /* deadline of this pool */ - int po_allocated; /* # of elements in use */ - int po_failed; /* pool is created on failed HCA */ - int po_size; /* # of pre-allocated elements */ + struct list_head po_list; /* chain on pool list */ + struct list_head po_free_list; /* pre-allocated node */ + kib_poolset_t *po_owner; /* pool_set of this pool */ + unsigned long po_deadline; /* deadline of this pool */ + int po_allocated; /* # of elements in use */ + int po_failed; /* pool is created on failed + * HCA */ + int po_size; /* # of pre-allocated elements */ } kib_pool_t; typedef struct { - kib_poolset_t tps_poolset; /* pool-set */ - __u64 tps_next_tx_cookie; /* cookie of TX */ + kib_poolset_t tps_poolset; /* pool-set */ + __u64 tps_next_tx_cookie; /* cookie of TX */ } kib_tx_poolset_t; typedef struct { - kib_pool_t tpo_pool; /* pool */ - struct kib_hca_dev *tpo_hdev; /* device for this pool */ - struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ - kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ + kib_pool_t tpo_pool; /* pool */ + struct kib_hca_dev *tpo_hdev; /* device for this pool */ + struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ + kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ } kib_tx_pool_t; typedef struct { - kib_poolset_t pps_poolset; /* pool-set */ + kib_poolset_t pps_poolset; /* pool-set */ } kib_pmr_poolset_t; typedef struct kib_pmr_pool { - struct kib_hca_dev *ppo_hdev; /* device for this pool */ - kib_pool_t ppo_pool; /* pool */ + struct kib_hca_dev *ppo_hdev; /* device for this pool */ + kib_pool_t ppo_pool; /* pool */ } kib_pmr_pool_t; typedef struct { - spinlock_t fps_lock; /* serialize */ - struct kib_net *fps_net; /* IB network */ - struct list_head fps_pool_list; /* FMR pool list */ - struct list_head fps_failed_pool_list; /* FMR pool list */ - __u64 fps_version; /* validity stamp */ - int fps_cpt; /* CPT id */ - int fps_pool_size; - int fps_flush_trigger; - /* is allocating new pool */ - int fps_increasing; - /* time stamp for retry if failed to allocate */ - unsigned long fps_next_retry; + spinlock_t fps_lock; /* serialize */ + struct kib_net *fps_net; /* IB network */ + struct list_head fps_pool_list; /* FMR pool list */ + struct list_head fps_failed_pool_list;/* FMR pool list */ + __u64 fps_version; /* validity stamp */ + int fps_cpt; /* CPT id */ + int fps_pool_size; + int fps_flush_trigger; + int fps_increasing; /* is allocating new pool */ + unsigned long fps_next_retry; /* time stamp for retry if + * failed to allocate */ } kib_fmr_poolset_t; typedef struct { - struct list_head fpo_list; /* chain on pool list */ - struct kib_hca_dev *fpo_hdev; /* device for this pool */ - kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ - struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ - unsigned long fpo_deadline; /* deadline of this pool */ - int fpo_failed; /* fmr pool is failed */ - int fpo_map_count; /* # of mapped FMR */ + struct list_head fpo_list; /* chain on pool list */ + struct kib_hca_dev *fpo_hdev; /* device for this pool */ + kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ + struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ + unsigned long fpo_deadline; /* deadline of this pool */ + int fpo_failed; /* fmr pool is failed */ + int fpo_map_count; /* # of mapped FMR */ } kib_fmr_pool_t; typedef struct { - struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ - kib_fmr_pool_t *fmr_pool; /* pool of FMR */ + struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ + kib_fmr_pool_t *fmr_pool; /* pool of FMR */ } kib_fmr_t; typedef struct kib_net { - struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */ - __u64 ibn_incarnation; /* my epoch */ - int ibn_init; /* initialisation state */ - int ibn_shutdown; /* shutting down? */ + struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */ + __u64 ibn_incarnation;/* my epoch */ + int ibn_init; /* initialisation state */ + int ibn_shutdown; /* shutting down? */ - atomic_t ibn_npeers; /* # peers extant */ - atomic_t ibn_nconns; /* # connections extant */ + atomic_t ibn_npeers; /* # peers extant */ + atomic_t ibn_nconns; /* # connections extant */ - kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ - kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ - kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */ + kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ + kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ + kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */ - kib_dev_t *ibn_dev; /* underlying IB device */ + kib_dev_t *ibn_dev; /* underlying IB device */ } kib_net_t; #define KIB_THREAD_SHIFT 16 @@ -350,51 +358,45 @@ typedef struct kib_net { #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1)) struct kib_sched_info { - /* serialise */ - spinlock_t ibs_lock; - /* schedulers sleep here */ - wait_queue_head_t ibs_waitq; - /* conns to check for rx completions */ - struct list_head ibs_conns; - /* number of scheduler threads */ - int ibs_nthreads; - /* max allowed scheduler threads */ - int ibs_nthreads_max; - int ibs_cpt; /* CPT id */ + spinlock_t ibs_lock; /* serialise */ + wait_queue_head_t ibs_waitq; /* schedulers sleep here */ + struct list_head ibs_conns; /* conns to check for rx completions */ + int ibs_nthreads; /* number of scheduler threads */ + int ibs_nthreads_max; /* max allowed scheduler threads */ + int ibs_cpt; /* CPT id */ }; typedef struct { - int kib_init; /* initialisation state */ - int kib_shutdown; /* shut down? */ - struct list_head kib_devs; /* IB devices extant */ - /* list head of failed devices */ - struct list_head kib_failed_devs; - /* schedulers sleep here */ - wait_queue_head_t kib_failover_waitq; - atomic_t kib_nthreads; /* # live threads */ - /* stabilize net/dev/peer/conn ops */ - rwlock_t kib_global_lock; - /* hash table of all my known peers */ - struct list_head *kib_peers; - /* size of kib_peers */ - int kib_peer_hash_size; - /* the connd task (serialisation assertions) */ - void *kib_connd; - /* connections to setup/teardown */ - struct list_head kib_connd_conns; - /* connections with zero refcount */ - struct list_head kib_connd_zombies; - /* connection daemon sleeps here */ - wait_queue_head_t kib_connd_waitq; - spinlock_t kib_connd_lock; /* serialise */ - struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ - /* percpt data for schedulers */ - struct kib_sched_info **kib_scheds; + int kib_init; /* initialisation state */ + int kib_shutdown; /* shut down? */ + struct list_head kib_devs; /* IB devices extant */ + struct list_head kib_failed_devs; /* list head of failed + * devices */ + wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */ + atomic_t kib_nthreads; /* # live threads */ + rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn + * ops */ + struct list_head *kib_peers; /* hash table of all my known + * peers */ + int kib_peer_hash_size; /* size of kib_peers */ + void *kib_connd; /* the connd task + * (serialisation assertions) + */ + struct list_head kib_connd_conns; /* connections to + * setup/teardown */ + struct list_head kib_connd_zombies; /* connections with zero + * refcount */ + wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps + * here */ + spinlock_t kib_connd_lock; /* serialise */ + struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ + struct kib_sched_info **kib_scheds; /* percpt data for schedulers + */ } kib_data_t; -#define IBLND_INIT_NOTHING 0 -#define IBLND_INIT_DATA 1 -#define IBLND_INIT_ALL 2 +#define IBLND_INIT_NOTHING 0 +#define IBLND_INIT_DATA 1 +#define IBLND_INIT_ALL 2 /************************************************************************ * IB Wire message format. @@ -402,228 +404,243 @@ typedef struct { */ typedef struct kib_connparams { - __u16 ibcp_queue_depth; - __u16 ibcp_max_frags; - __u32 ibcp_max_msg_size; + __u16 ibcp_queue_depth; + __u16 ibcp_max_frags; + __u32 ibcp_max_msg_size; } WIRE_ATTR kib_connparams_t; typedef struct { - lnet_hdr_t ibim_hdr; /* portals header */ - char ibim_payload[0]; /* piggy-backed payload */ + lnet_hdr_t ibim_hdr; /* portals header */ + char ibim_payload[0]; /* piggy-backed payload */ } WIRE_ATTR kib_immediate_msg_t; typedef struct { - __u32 rf_nob; /* # bytes this frag */ - __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ + __u32 rf_nob; /* # bytes this frag */ + __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ } WIRE_ATTR kib_rdma_frag_t; typedef struct { - __u32 rd_key; /* local/remote key */ - __u32 rd_nfrags; /* # fragments */ - kib_rdma_frag_t rd_frags[0]; /* buffer frags */ + __u32 rd_key; /* local/remote key */ + __u32 rd_nfrags; /* # fragments */ + kib_rdma_frag_t rd_frags[0]; /* buffer frags */ } WIRE_ATTR kib_rdma_desc_t; typedef struct { - lnet_hdr_t ibprm_hdr; /* portals header */ - __u64 ibprm_cookie; /* opaque completion cookie */ + lnet_hdr_t ibprm_hdr; /* portals header */ + __u64 ibprm_cookie; /* opaque completion cookie */ } WIRE_ATTR kib_putreq_msg_t; typedef struct { - __u64 ibpam_src_cookie; /* reflected completion cookie */ - __u64 ibpam_dst_cookie; /* opaque completion cookie */ - kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */ + __u64 ibpam_src_cookie; /* reflected completion cookie */ + __u64 ibpam_dst_cookie; /* opaque completion cookie */ + kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */ } WIRE_ATTR kib_putack_msg_t; typedef struct { - lnet_hdr_t ibgm_hdr; /* portals header */ - __u64 ibgm_cookie; /* opaque completion cookie */ - kib_rdma_desc_t ibgm_rd; /* rdma descriptor */ + lnet_hdr_t ibgm_hdr; /* portals header */ + __u64 ibgm_cookie; /* opaque completion cookie */ + kib_rdma_desc_t ibgm_rd; /* rdma descriptor */ } WIRE_ATTR kib_get_msg_t; typedef struct { - __u64 ibcm_cookie; /* opaque completion cookie */ - __s32 ibcm_status; /* < 0 failure: >= 0 length */ + __u64 ibcm_cookie; /* opaque completion cookie */ + __s32 ibcm_status; /* < 0 failure: >= 0 length */ } WIRE_ATTR kib_completion_msg_t; typedef struct { /* First 2 fields fixed FOR ALL TIME */ - __u32 ibm_magic; /* I'm an ibnal message */ - __u16 ibm_version; /* this is my version number */ - - __u8 ibm_type; /* msg type */ - __u8 ibm_credits; /* returned credits */ - __u32 ibm_nob; /* # bytes in whole message */ - __u32 ibm_cksum; /* checksum (0 == no checksum) */ - __u64 ibm_srcnid; /* sender's NID */ - __u64 ibm_srcstamp; /* sender's incarnation */ - __u64 ibm_dstnid; /* destination's NID */ - __u64 ibm_dststamp; /* destination's incarnation */ + __u32 ibm_magic; /* I'm an ibnal message */ + __u16 ibm_version; /* this is my version number */ + + __u8 ibm_type; /* msg type */ + __u8 ibm_credits; /* returned credits */ + __u32 ibm_nob; /* # bytes in whole message */ + __u32 ibm_cksum; /* checksum (0 == no checksum) */ + __u64 ibm_srcnid; /* sender's NID */ + __u64 ibm_srcstamp; /* sender's incarnation */ + __u64 ibm_dstnid; /* destination's NID */ + __u64 ibm_dststamp; /* destination's incarnation */ union { - kib_connparams_t connparams; - kib_immediate_msg_t immediate; - kib_putreq_msg_t putreq; - kib_putack_msg_t putack; - kib_get_msg_t get; - kib_completion_msg_t completion; + kib_connparams_t connparams; + kib_immediate_msg_t immediate; + kib_putreq_msg_t putreq; + kib_putack_msg_t putack; + kib_get_msg_t get; + kib_completion_msg_t completion; } WIRE_ATTR ibm_u; } WIRE_ATTR kib_msg_t; -#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ +#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ -#define IBLND_MSG_VERSION_1 0x11 -#define IBLND_MSG_VERSION_2 0x12 -#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2 +#define IBLND_MSG_VERSION_1 0x11 +#define IBLND_MSG_VERSION_2 0x12 +#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2 -#define IBLND_MSG_CONNREQ 0xc0 /* connection request */ -#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */ -#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */ -#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */ -#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */ -#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */ -#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */ -#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */ -#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ -#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ +#define IBLND_MSG_CONNREQ 0xc0 /* connection request */ +#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */ +#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */ +#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */ +#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */ +#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */ +#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */ +#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */ +#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ +#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ typedef struct { - __u32 ibr_magic; /* sender's magic */ - __u16 ibr_version; /* sender's version */ - __u8 ibr_why; /* reject reason */ - __u8 ibr_padding; /* padding */ - __u64 ibr_incarnation; /* incarnation of peer */ - kib_connparams_t ibr_cp; /* connection parameters */ + __u32 ibr_magic; /* sender's magic */ + __u16 ibr_version; /* sender's version */ + __u8 ibr_why; /* reject reason */ + __u8 ibr_padding; /* padding */ + __u64 ibr_incarnation; /* incarnation of peer */ + kib_connparams_t ibr_cp; /* connection parameters */ } WIRE_ATTR kib_rej_t; /* connection rejection reasons */ -#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ -#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ -#define IBLND_REJECT_FATAL 3 /* Anything else */ - -#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ -#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ - -#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */ -#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */ +#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ +#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ +#define IBLND_REJECT_FATAL 3 /* Anything else */ +#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ +#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ +#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match + * mine */ +#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't + * match mine */ /***********************************************************************/ -typedef struct kib_rx /* receive message */ +typedef struct kib_rx /* receive message */ { - struct list_head rx_list; /* queue for attention */ - struct kib_conn *rx_conn; /* owning conn */ - int rx_nob; /* # bytes received (-1 while posted) */ - enum ib_wc_status rx_status; /* completion status */ - kib_msg_t *rx_msg; /* message buffer (host vaddr) */ - __u64 rx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */ - struct ib_recv_wr rx_wrq; /* receive work item... */ - struct ib_sge rx_sge; /* ...and its memory */ + struct list_head rx_list; /* queue for attention */ + struct kib_conn *rx_conn; /* owning conn */ + int rx_nob; /* # bytes received (-1 while + * posted) */ + enum ib_wc_status rx_status; /* completion status */ + kib_msg_t *rx_msg; /* message buffer (host vaddr) */ + __u64 rx_msgaddr; /* message buffer (I/O addr) */ + DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */ + struct ib_recv_wr rx_wrq; /* receive work item... */ + struct ib_sge rx_sge; /* ...and its memory */ } kib_rx_t; -#define IBLND_POSTRX_DONT_POST 0 /* don't post */ -#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ -#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ -#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */ +#define IBLND_POSTRX_DONT_POST 0 /* don't post */ +#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ +#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ +#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved + * credit */ -typedef struct kib_tx /* transmit message */ +typedef struct kib_tx /* transmit message */ { - struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */ - kib_tx_pool_t *tx_pool; /* pool I'm from */ - struct kib_conn *tx_conn; /* owning conn */ - short tx_sending; /* # tx callbacks outstanding */ - short tx_queued; /* queued for sending */ - short tx_waiting; /* waiting for peer */ - int tx_status; /* LNET completion status */ - unsigned long tx_deadline; /* completion deadline */ - __u64 tx_cookie; /* completion cookie */ - lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */ - kib_msg_t *tx_msg; /* message buffer (host vaddr) */ - __u64 tx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */ - int tx_nwrq; /* # send work items */ - struct ib_send_wr *tx_wrq; /* send work items... */ - struct ib_sge *tx_sge; /* ...and their memory */ - kib_rdma_desc_t *tx_rd; /* rdma descriptor */ - int tx_nfrags; /* # entries in... */ - struct scatterlist *tx_frags; /* dma_map_sg descriptor */ - __u64 *tx_pages; /* rdma phys page addrs */ + struct list_head tx_list; /* queue on idle_txs ibc_tx_queue + * etc. */ + kib_tx_pool_t *tx_pool; /* pool I'm from */ + struct kib_conn *tx_conn; /* owning conn */ + short tx_sending; /* # tx callbacks outstanding */ + short tx_queued; /* queued for sending */ + short tx_waiting; /* waiting for peer */ + int tx_status; /* LNET completion status */ + unsigned long tx_deadline; /* completion deadline */ + __u64 tx_cookie; /* completion cookie */ + lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on + * completion */ + kib_msg_t *tx_msg; /* message buffer (host vaddr) */ + __u64 tx_msgaddr; /* message buffer (I/O addr) */ + DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */ + int tx_nwrq; /* # send work items */ + struct ib_send_wr *tx_wrq; /* send work items... */ + struct ib_sge *tx_sge; /* ...and their memory */ + kib_rdma_desc_t *tx_rd; /* rdma descriptor */ + int tx_nfrags; /* # entries in... */ + struct scatterlist *tx_frags; /* dma_map_sg descriptor */ + __u64 *tx_pages; /* rdma phys page addrs */ union { - kib_phys_mr_t *pmr; /* MR for physical buffer */ - kib_fmr_t fmr; /* FMR */ - } tx_u; - int tx_dmadir; /* dma direction */ + kib_phys_mr_t *pmr; /* MR for physical buffer */ + kib_fmr_t fmr; /* FMR */ + } tx_u; + int tx_dmadir; /* dma direction */ } kib_tx_t; typedef struct kib_connvars { - /* connection-in-progress variables */ - kib_msg_t cv_msg; + kib_msg_t cv_msg; /* connection-in-progress variables */ } kib_connvars_t; typedef struct kib_conn { - struct kib_sched_info *ibc_sched; /* scheduler information */ - struct kib_peer *ibc_peer; /* owning peer */ - kib_hca_dev_t *ibc_hdev; /* HCA bound on */ - struct list_head ibc_list; /* stash on peer's conn list */ - struct list_head ibc_sched_list; /* schedule for attention */ - __u16 ibc_version; /* version of connection */ - __u64 ibc_incarnation; /* which instance of the peer */ - atomic_t ibc_refcount; /* # users */ - int ibc_state; /* what's happening */ - int ibc_nsends_posted; /* # uncompleted sends */ - int ibc_noops_posted; /* # uncompleted NOOPs */ - int ibc_credits; /* # credits I have */ - int ibc_outstanding_credits; /* # credits to return */ - int ibc_reserved_credits;/* # ACK/DONE msg credits */ - int ibc_comms_error; /* set on comms error */ - unsigned int ibc_nrx:16; /* receive buffers owned */ - unsigned int ibc_scheduled:1; /* scheduled for attention */ - unsigned int ibc_ready:1; /* CQ callback fired */ - /* time of last send */ - unsigned long ibc_last_send; - /** link chain for kiblnd_check_conns only */ - struct list_head ibc_connd_list; - /** rxs completed before ESTABLISHED */ - struct list_head ibc_early_rxs; - /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */ - struct list_head ibc_tx_noops; - struct list_head ibc_tx_queue; /* sends that need a credit */ - struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */ - struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */ - struct list_head ibc_active_txs; /* active tx awaiting completion */ - spinlock_t ibc_lock; /* serialise */ - kib_rx_t *ibc_rxs; /* the rx descs */ - kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ - - struct rdma_cm_id *ibc_cmid; /* CM id */ - struct ib_cq *ibc_cq; /* completion queue */ - - kib_connvars_t *ibc_connvars; /* in-progress connection state */ + struct kib_sched_info *ibc_sched; /* scheduler information */ + struct kib_peer *ibc_peer; /* owning peer */ + kib_hca_dev_t *ibc_hdev; /* HCA bound on */ + struct list_head ibc_list; /* stash on peer's conn + * list */ + struct list_head ibc_sched_list; /* schedule for attention */ + __u16 ibc_version; /* version of connection */ + __u64 ibc_incarnation; /* which instance of the + * peer */ + atomic_t ibc_refcount; /* # users */ + int ibc_state; /* what's happening */ + int ibc_nsends_posted; /* # uncompleted sends */ + int ibc_noops_posted; /* # uncompleted NOOPs */ + int ibc_credits; /* # credits I have */ + int ibc_outstanding_credits; /* # credits to return */ + int ibc_reserved_credits; /* # ACK/DONE msg credits */ + int ibc_comms_error; /* set on comms error */ + unsigned int ibc_nrx:16; /* receive buffers owned */ + unsigned int ibc_scheduled:1; /* scheduled for attention + */ + unsigned int ibc_ready:1; /* CQ callback fired */ + unsigned long ibc_last_send; /* time of last send */ + struct list_head ibc_connd_list; /* link chain for + * kiblnd_check_conns only + */ + struct list_head ibc_early_rxs; /* rxs completed before + * ESTABLISHED */ + struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for + * IBLND_MSG_VERSION_1 */ + struct list_head ibc_tx_queue; /* sends that need a credit + */ + struct list_head ibc_tx_queue_nocred; /* sends that don't need a + * credit */ + struct list_head ibc_tx_queue_rsrvd; /* sends that need to + * reserve an ACK/DONE msg + */ + struct list_head ibc_active_txs; /* active tx awaiting + * completion */ + spinlock_t ibc_lock; /* serialise */ + kib_rx_t *ibc_rxs; /* the rx descs */ + kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ + + struct rdma_cm_id *ibc_cmid; /* CM id */ + struct ib_cq *ibc_cq; /* completion queue */ + + kib_connvars_t *ibc_connvars; /* in-progress connection + * state */ } kib_conn_t; -#define IBLND_CONN_INIT 0 /* being initialised */ -#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ -#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ -#define IBLND_CONN_ESTABLISHED 3 /* connection established */ -#define IBLND_CONN_CLOSING 4 /* being closed */ -#define IBLND_CONN_DISCONNECTED 5 /* disconnected */ +#define IBLND_CONN_INIT 0 /* being initialised */ +#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ +#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ +#define IBLND_CONN_ESTABLISHED 3 /* connection established */ +#define IBLND_CONN_CLOSING 4 /* being closed */ +#define IBLND_CONN_DISCONNECTED 5 /* disconnected */ typedef struct kib_peer { - struct list_head ibp_list; /* stash on global peer list */ - lnet_nid_t ibp_nid; /* who's on the other end(s) */ - lnet_ni_t *ibp_ni; /* LNet interface */ - atomic_t ibp_refcount; /* # users */ - struct list_head ibp_conns; /* all active connections */ - struct list_head ibp_tx_queue; /* msgs waiting for a conn */ - __u16 ibp_version; /* version of peer */ - __u64 ibp_incarnation; /* incarnation of peer */ - int ibp_connecting; /* current active connection attempts */ - int ibp_accepting; /* current passive connection attempts */ - int ibp_error; /* errno on closing this peer */ - unsigned long ibp_last_alive; /* when (in jiffies) I was last alive */ + struct list_head ibp_list; /* stash on global peer list */ + lnet_nid_t ibp_nid; /* who's on the other end(s) */ + lnet_ni_t *ibp_ni; /* LNet interface */ + atomic_t ibp_refcount; /* # users */ + struct list_head ibp_conns; /* all active connections */ + struct list_head ibp_tx_queue; /* msgs waiting for a conn */ + __u16 ibp_version; /* version of peer */ + __u64 ibp_incarnation; /* incarnation of peer */ + int ibp_connecting; /* current active connection attempts + */ + int ibp_accepting; /* current passive connection attempts + */ + int ibp_error; /* errno on closing this peer */ + unsigned long ibp_last_alive; /* when (in jiffies) I was last alive + */ } kib_peer_t; -extern kib_data_t kiblnd_data; +extern kib_data_t kiblnd_data; extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev); @@ -941,8 +958,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, * right because OFED1.2 defines it as const, to use it we have to add * (void *) cast to overcome "const" */ -#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) -#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) +#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) +#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index dbf374983..477aa8b76 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -44,9 +44,9 @@ static void kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) { lnet_msg_t *lntmsg[2]; - kib_net_t *net = ni->ni_data; - int rc; - int i; + kib_net_t *net = ni->ni_data; + int rc; + int i; LASSERT(net != NULL); LASSERT(!in_interrupt()); @@ -102,10 +102,10 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) static kib_tx_t * kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) { - kib_net_t *net = (kib_net_t *)ni->ni_data; - struct list_head *node; - kib_tx_t *tx; - kib_tx_poolset_t *tps; + kib_net_t *net = (kib_net_t *)ni->ni_data; + struct list_head *node; + kib_tx_t *tx; + kib_tx_poolset_t *tps; tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); @@ -130,9 +130,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) static void kiblnd_drop_rx(kib_rx_t *rx) { - kib_conn_t *conn = rx->rx_conn; - struct kib_sched_info *sched = conn->ibc_sched; - unsigned long flags; + kib_conn_t *conn = rx->rx_conn; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; spin_lock_irqsave(&sched->ibs_lock, flags); LASSERT(conn->ibc_nrx > 0); @@ -145,11 +145,11 @@ kiblnd_drop_rx(kib_rx_t *rx) int kiblnd_post_rx(kib_rx_t *rx, int credit) { - kib_conn_t *conn = rx->rx_conn; - kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; - struct ib_recv_wr *bad_wrq = NULL; - struct ib_mr *mr; - int rc; + kib_conn_t *conn = rx->rx_conn; + kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; + struct ib_recv_wr *bad_wrq = NULL; + struct ib_mr *mr; + int rc; LASSERT(net != NULL); LASSERT(!in_interrupt()); @@ -164,10 +164,10 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) rx->rx_sge.addr = rx->rx_msgaddr; rx->rx_sge.length = IBLND_MSG_SIZE; - rx->rx_wrq.next = NULL; + rx->rx_wrq.next = NULL; rx->rx_wrq.sg_list = &rx->rx_sge; rx->rx_wrq.num_sge = 1; - rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); + rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); LASSERT(conn->ibc_state >= IBLND_CONN_INIT); LASSERT(rx->rx_nob >= 0); /* not posted */ @@ -212,7 +212,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) static kib_tx_t * kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) { - struct list_head *tmp; + struct list_head *tmp; list_for_each(tmp, &conn->ibc_active_txs) { kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); @@ -237,9 +237,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) static void kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) { - kib_tx_t *tx; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - int idle; + kib_tx_t *tx; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int idle; spin_lock(&conn->ibc_lock); @@ -276,8 +276,8 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) static void kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) { - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for completion %x for %s\n", @@ -295,14 +295,14 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) static void kiblnd_handle_rx(kib_rx_t *rx) { - kib_msg_t *msg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - int credits = msg->ibm_credits; - kib_tx_t *tx; - int rc = 0; - int rc2; - int post_credit; + kib_msg_t *msg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int credits = msg->ibm_credits; + kib_tx_t *tx; + int rc = 0; + int rc2; + int post_credit; LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -456,12 +456,12 @@ kiblnd_handle_rx(kib_rx_t *rx) static void kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) { - kib_msg_t *msg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_net_t *net = ni->ni_data; - int rc; - int err = -EIO; + kib_msg_t *msg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_net_t *net = ni->ni_data; + int rc; + int err = -EIO; LASSERT(net != NULL); LASSERT(rx->rx_nob < 0); /* was posted */ @@ -502,8 +502,8 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) /* racing with connection establishment/teardown! */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; write_lock_irqsave(g_lock, flags); /* must check holding global lock to eliminate race */ @@ -550,19 +550,19 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) static int kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { - kib_hca_dev_t *hdev; - __u64 *pages = tx->tx_pages; - kib_fmr_poolset_t *fps; - int npages; - int size; - int cpt; - int rc; - int i; + kib_hca_dev_t *hdev; + __u64 *pages = tx->tx_pages; + kib_fmr_poolset_t *fps; + int npages; + int size; + int cpt; + int rc; + int i; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); - hdev = tx->tx_pool->tpo_hdev; + hdev = tx->tx_pool->tpo_hdev; for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { for (size = 0; size < rd->rd_frags[i].rf_nob; @@ -586,7 +586,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : tx->tx_u.fmr.fmr_pfmr->fmr->lkey; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; - rd->rd_frags[0].rf_nob = nob; + rd->rd_frags[0].rf_nob = nob; rd->rd_nfrags = 1; return 0; @@ -595,11 +595,11 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) static int kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { - kib_hca_dev_t *hdev; - kib_pmr_poolset_t *pps; - __u64 iova; - int cpt; - int rc; + kib_hca_dev_t *hdev; + kib_pmr_poolset_t *pps; + __u64 iova; + int cpt; + int rc; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); @@ -623,7 +623,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) tx->tx_u.pmr->pmr_mr->lkey; rd->rd_nfrags = 1; rd->rd_frags[0].rf_addr = iova; - rd->rd_frags[0].rf_nob = nob; + rd->rd_frags[0].rf_nob = nob; return 0; } @@ -631,7 +631,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { - kib_net_t *net = ni->ni_data; + kib_net_t *net = ni->ni_data; LASSERT(net != NULL); @@ -655,20 +655,19 @@ int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - kib_net_t *net = ni->ni_data; - struct ib_mr *mr = NULL; - __u32 nob; - int i; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + kib_net_t *net = ni->ni_data; + struct ib_mr *mr = NULL; + __u32 nob; + int i; /* If rd is not tx_rd, it's going to get sent to a peer and I'm the * RDMA sink */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; - rd->rd_nfrags = - kiblnd_dma_map_sg(hdev->ibh_ibdev, - tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, + tx->tx_nfrags, tx->tx_dmadir); for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( @@ -699,12 +698,12 @@ static int kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, unsigned int niov, struct kvec *iov, int offset, int nob) { - kib_net_t *net = ni->ni_data; - struct page *page; + kib_net_t *net = ni->ni_data; + struct page *page; struct scatterlist *sg; - unsigned long vaddr; - int fragnob; - int page_offset; + unsigned long vaddr; + int fragnob; + int page_offset; LASSERT(nob > 0); LASSERT(niov > 0); @@ -752,9 +751,9 @@ static int kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) { - kib_net_t *net = ni->ni_data; + kib_net_t *net = ni->ni_data; struct scatterlist *sg; - int fragnob; + int fragnob; CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); @@ -793,11 +792,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) __releases(conn->ibc_lock) __acquires(conn->ibc_lock) { - kib_msg_t *msg = tx->tx_msg; - kib_peer_t *peer = conn->ibc_peer; - int ver = conn->ibc_version; - int rc; - int done; + kib_msg_t *msg = tx->tx_msg; + kib_peer_t *peer = conn->ibc_peer; + int ver = conn->ibc_version; + int rc; + int done; struct ib_send_wr *bad_wrq; LASSERT(tx->tx_queued); @@ -878,8 +877,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) /* close_conn will launch failover */ rc = -ENETDOWN; } else { - rc = ib_post_send(conn->ibc_cmid->qp, - tx->tx_wrq, &bad_wrq); + rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq); } conn->ibc_last_send = jiffies; @@ -925,9 +923,9 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) void kiblnd_check_sends(kib_conn_t *conn) { - int ver = conn->ibc_version; + int ver = conn->ibc_version; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx; + kib_tx_t *tx; /* Don't send anything until after the connection is established */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { @@ -997,9 +995,9 @@ kiblnd_check_sends(kib_conn_t *conn) static void kiblnd_tx_complete(kib_tx_t *tx, int status) { - int failed = (status != IB_WC_SUCCESS); - kib_conn_t *conn = tx->tx_conn; - int idle; + int failed = (status != IB_WC_SUCCESS); + kib_conn_t *conn = tx->tx_conn; + int idle; LASSERT(tx->tx_sending > 0); @@ -1051,11 +1049,11 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof(kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr; + int nob = offsetof(kib_msg_t, ibm_u) + body_nob; + struct ib_mr *mr; LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); @@ -1086,14 +1084,14 @@ int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { - kib_msg_t *ibmsg = tx->tx_msg; - kib_rdma_desc_t *srcrd = tx->tx_rd; - struct ib_sge *sge = &tx->tx_sge[0]; + kib_msg_t *ibmsg = tx->tx_msg; + kib_rdma_desc_t *srcrd = tx->tx_rd; + struct ib_sge *sge = &tx->tx_sge[0]; struct ib_send_wr *wrq = &tx->tx_wrq[0]; - int rc = resid; - int srcidx; - int dstidx; - int wrknob; + int rc = resid; + int srcidx; + int dstidx; + int wrknob; LASSERT(!in_interrupt()); LASSERT(tx->tx_nwrq == 0); @@ -1144,7 +1142,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, wrq->send_flags = 0; wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); - wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); + wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); @@ -1170,7 +1168,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { - struct list_head *q; + struct list_head *q; LASSERT(tx->tx_nwrq > 0); /* work items set up */ LASSERT(!tx->tx_queued); /* not queued for sending already */ @@ -1271,11 +1269,11 @@ static void kiblnd_connect_peer(kib_peer_t *peer) { struct rdma_cm_id *cmid; - kib_dev_t *dev; - kib_net_t *net = peer->ibp_ni->ni_data; + kib_dev_t *dev; + kib_net_t *net = peer->ibp_ni->ni_data; struct sockaddr_in srcaddr; struct sockaddr_in dstaddr; - int rc; + int rc; LASSERT(net != NULL); LASSERT(peer->ibp_connecting > 0); @@ -1335,12 +1333,12 @@ kiblnd_connect_peer(kib_peer_t *peer) void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { - kib_peer_t *peer; - kib_peer_t *peer2; - kib_conn_t *conn; - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - int rc; + kib_peer_t *peer; + kib_peer_t *peer2; + kib_conn_t *conn; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; + int rc; /* If I get here, I've committed to send, so I complete the tx with * failure on any problems */ @@ -1456,20 +1454,20 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - lnet_hdr_t *hdr = &lntmsg->msg_hdr; - int type = lntmsg->msg_type; + lnet_hdr_t *hdr = &lntmsg->msg_hdr; + int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; - int target_is_router = lntmsg->msg_target_is_router; - int routing = lntmsg->msg_routing; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - kib_msg_t *ibmsg; - kib_tx_t *tx; - int nob; - int rc; + int target_is_router = lntmsg->msg_target_is_router; + int routing = lntmsg->msg_routing; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + kib_msg_t *ibmsg; + kib_tx_t *tx; + int nob; + int rc; /* NB 'private' is different depending on what we're sending.... */ @@ -1628,13 +1626,13 @@ static void kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; - unsigned int niov = lntmsg->msg_niov; - struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; - unsigned int offset = lntmsg->msg_offset; - unsigned int nob = lntmsg->msg_len; - kib_tx_t *tx; - int rc; + unsigned int niov = lntmsg->msg_niov; + struct kvec *iov = lntmsg->msg_iov; + lnet_kiov_t *kiov = lntmsg->msg_kiov; + unsigned int offset = lntmsg->msg_offset; + unsigned int nob = lntmsg->msg_len; + kib_tx_t *tx; + int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { @@ -1691,14 +1689,14 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - kib_rx_t *rx = private; - kib_msg_t *rxmsg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - kib_tx_t *tx; - kib_msg_t *txmsg; - int nob; - int post_credit = IBLND_POSTRX_PEER_CREDIT; - int rc = 0; + kib_rx_t *rx = private; + kib_msg_t *rxmsg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + kib_tx_t *tx; + kib_msg_t *txmsg; + int nob; + int post_credit = IBLND_POSTRX_PEER_CREDIT; + int rc = 0; LASSERT(mlen <= rlen); LASSERT(!in_interrupt()); @@ -1828,8 +1826,8 @@ kiblnd_peer_alive(kib_peer_t *peer) static void kiblnd_peer_notify(kib_peer_t *peer) { - int error = 0; - unsigned long last_alive = 0; + int error = 0; + unsigned long last_alive = 0; unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1860,9 +1858,9 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). * Caller holds kib_global_lock exclusively in irq context */ - kib_peer_t *peer = conn->ibc_peer; - kib_dev_t *dev; - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + kib_dev_t *dev; + unsigned long flags; LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -1934,8 +1932,8 @@ kiblnd_close_conn(kib_conn_t *conn, int error) static void kiblnd_handle_early_rxs(kib_conn_t *conn) { - unsigned long flags; - kib_rx_t *rx; + unsigned long flags; + kib_rx_t *rx; kib_rx_t *tmp; LASSERT(!in_interrupt()); @@ -1957,9 +1955,9 @@ static void kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) { LIST_HEAD(zombies); - struct list_head *tmp; - struct list_head *nxt; - kib_tx_t *tx; + struct list_head *tmp; + struct list_head *nxt; + kib_tx_t *tx; spin_lock(&conn->ibc_lock); @@ -2018,7 +2016,7 @@ void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { LIST_HEAD(zombies); - unsigned long flags; + unsigned long flags; LASSERT(error != 0); LASSERT(!in_interrupt()); @@ -2071,12 +2069,12 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) void kiblnd_connreq_done(kib_conn_t *conn, int status) { - kib_peer_t *peer = conn->ibc_peer; - kib_tx_t *tx; + kib_peer_t *peer = conn->ibc_peer; + kib_tx_t *tx; kib_tx_t *tmp; - struct list_head txs; - unsigned long flags; - int active; + struct list_head txs; + unsigned long flags; + int active; active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2166,7 +2164,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) static void kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) { - int rc; + int rc; rc = rdma_reject(cmid, rej, sizeof(*rej)); @@ -2177,22 +2175,22 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) static int kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - kib_msg_t *reqmsg = priv; - kib_msg_t *ackmsg; - kib_dev_t *ibdev; - kib_peer_t *peer; - kib_peer_t *peer2; - kib_conn_t *conn; - lnet_ni_t *ni = NULL; - kib_net_t *net = NULL; - lnet_nid_t nid; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + kib_msg_t *reqmsg = priv; + kib_msg_t *ackmsg; + kib_dev_t *ibdev; + kib_peer_t *peer; + kib_peer_t *peer2; + kib_conn_t *conn; + lnet_ni_t *ni = NULL; + kib_net_t *net = NULL; + lnet_nid_t nid; struct rdma_conn_param cp; - kib_rej_t rej; - int version = IBLND_MSG_VERSION; - unsigned long flags; - int rc; - struct sockaddr_in *peer_addr; + kib_rej_t rej; + int version = IBLND_MSG_VERSION; + unsigned long flags; + int rc; + struct sockaddr_in *peer_addr; LASSERT(!in_interrupt()); /* cmid inherits 'context' from the corresponding listener id */ @@ -2200,8 +2198,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) LASSERT(ibdev != NULL); memset(&rej, 0, sizeof(rej)); - rej.ibr_magic = IBLND_MSG_MAGIC; - rej.ibr_why = IBLND_REJECT_FATAL; + rej.ibr_magic = IBLND_MSG_MAGIC; + rej.ibr_why = IBLND_REJECT_FATAL; rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); @@ -2243,7 +2241,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } nid = reqmsg->ibm_srcnid; - ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); + ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); if (ni != NULL) { net = (kib_net_t *)ni->ni_data; @@ -2394,7 +2392,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) * CM callback doesn't destroy cmid. */ conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); + conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(version)); @@ -2412,12 +2410,12 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) memset(&cp, 0, sizeof(cp)); cp.private_data = ackmsg; - cp.private_data_len = ackmsg->ibm_nob; + cp.private_data_len = ackmsg->ibm_nob; cp.responder_resources = 0; /* No atomic ops or RDMA reads */ - cp.initiator_depth = 0; + cp.initiator_depth = 0; cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; - cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; + cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); @@ -2439,7 +2437,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (ni != NULL) lnet_ni_decref(ni); - rej.ibr_version = version; + rej.ibr_version = version; rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_reject(cmid, &rej); @@ -2451,10 +2449,10 @@ static void kiblnd_reconnect(kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) { - kib_peer_t *peer = conn->ibc_peer; - char *reason; - int retry = 0; - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + char *reason; + int retry = 0; + unsigned long flags; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ @@ -2513,7 +2511,7 @@ kiblnd_reconnect(kib_conn_t *conn, int version, static void kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; + kib_peer_t *peer = conn->ibc_peer; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2532,10 +2530,10 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) case IB_CM_REJ_CONSUMER_DEFINED: if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) { - kib_rej_t *rej = priv; - kib_connparams_t *cp = NULL; - int flip = 0; - __u64 incarnation = -1; + kib_rej_t *rej = priv; + kib_connparams_t *cp = NULL; + int flip = 0; + __u64 incarnation = -1; /* NB. default incarnation is -1 because: * a) V1 will ignore dst incarnation in connreq. @@ -2652,13 +2650,13 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) static void kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; - lnet_ni_t *ni = peer->ibp_ni; - kib_net_t *net = ni->ni_data; - kib_msg_t *msg = priv; - int ver = conn->ibc_version; - int rc = kiblnd_unpack_msg(msg, priv_nob); - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + lnet_ni_t *ni = peer->ibp_ni; + kib_net_t *net = ni->ni_data; + kib_msg_t *msg = priv; + int ver = conn->ibc_version; + int rc = kiblnd_unpack_msg(msg, priv_nob); + unsigned long flags; LASSERT(net != NULL); @@ -2726,8 +2724,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = + conn->ibc_incarnation = msg->ibm_srcstamp; + conn->ibc_credits = conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(ver)); @@ -2749,20 +2747,20 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) static int kiblnd_active_connect(struct rdma_cm_id *cmid) { - kib_peer_t *peer = (kib_peer_t *)cmid->context; - kib_conn_t *conn; - kib_msg_t *msg; - struct rdma_conn_param cp; - int version; - __u64 incarnation; - unsigned long flags; - int rc; + kib_peer_t *peer = (kib_peer_t *)cmid->context; + kib_conn_t *conn; + kib_msg_t *msg; + struct rdma_conn_param cp; + int version; + __u64 incarnation; + unsigned long flags; + int rc; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : - peer->ibp_version; + version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : + peer->ibp_version; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2793,8 +2791,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) cp.private_data_len = msg->ibm_nob; cp.responder_resources = 0; /* No atomic ops or RDMA reads */ cp.initiator_depth = 0; - cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.flow_control = 1; + cp.retry_count = *kiblnd_tunables.kib_retry_count; cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; LASSERT(cmid->context == (void *)conn); @@ -2814,9 +2812,9 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { - kib_peer_t *peer; - kib_conn_t *conn; - int rc; + kib_peer_t *peer; + kib_conn_t *conn; + int rc; switch (event->event) { default: @@ -2983,8 +2981,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) static int kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) { - kib_tx_t *tx; - struct list_head *ttmp; + kib_tx_t *tx; + struct list_head *ttmp; list_for_each(ttmp, txs) { tx = list_entry(ttmp, kib_tx_t, tx_list); @@ -3022,13 +3020,13 @@ kiblnd_check_conns(int idx) { LIST_HEAD(closes); LIST_HEAD(checksends); - struct list_head *peers = &kiblnd_data.kib_peers[idx]; - struct list_head *ptmp; - kib_peer_t *peer; - kib_conn_t *conn; + struct list_head *peers = &kiblnd_data.kib_peers[idx]; + struct list_head *ptmp; + kib_peer_t *peer; + kib_conn_t *conn; kib_conn_t *tmp; - struct list_head *ctmp; - unsigned long flags; + struct list_head *ctmp; + unsigned long flags; /* NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we @@ -3114,14 +3112,14 @@ kiblnd_disconnect_conn(kib_conn_t *conn) int kiblnd_connd(void *arg) { - wait_queue_t wait; - unsigned long flags; - kib_conn_t *conn; - int timeout; - int i; - int dropped_lock; - int peer_index = 0; - unsigned long deadline = jiffies; + wait_queue_t wait; + unsigned long flags; + kib_conn_t *conn; + int timeout; + int i; + int dropped_lock; + int peer_index = 0; + unsigned long deadline = jiffies; cfs_block_allsigs(); @@ -3169,7 +3167,7 @@ kiblnd_connd(void *arg) if (timeout <= 0) { const int n = 4; const int p = 1; - int chunk = kiblnd_data.kib_peer_hash_size; + int chunk = kiblnd_data.kib_peer_hash_size; spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); dropped_lock = 1; @@ -3273,9 +3271,9 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg) * consuming my CQ I could be called after all completions have * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 * and this CQ is about to be destroyed so I NOOP. */ - kib_conn_t *conn = (kib_conn_t *)arg; - struct kib_sched_info *sched = conn->ibc_sched; - unsigned long flags; + kib_conn_t *conn = (kib_conn_t *)arg; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; LASSERT(cq == conn->ibc_cq); @@ -3309,15 +3307,15 @@ kiblnd_cq_event(struct ib_event *event, void *arg) int kiblnd_scheduler(void *arg) { - long id = (long)arg; - struct kib_sched_info *sched; - kib_conn_t *conn; - wait_queue_t wait; - unsigned long flags; - struct ib_wc wc; - int did_something; - int busy_loops = 0; - int rc; + long id = (long)arg; + struct kib_sched_info *sched; + kib_conn_t *conn; + wait_queue_t wait; + unsigned long flags; + struct ib_wc wc; + int did_something; + int busy_loops = 0; + int rc; cfs_block_allsigs(); @@ -3432,11 +3430,11 @@ kiblnd_scheduler(void *arg) int kiblnd_failover_thread(void *arg) { - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_dev_t *dev; - wait_queue_t wait; - unsigned long flags; - int rc; + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_dev_t *dev; + wait_queue_t wait; + unsigned long flags; + int rc; LASSERT(*kiblnd_tunables.kib_dev_failover != 0); @@ -3446,8 +3444,8 @@ kiblnd_failover_thread(void *arg) write_lock_irqsave(glock, flags); while (!kiblnd_data.kib_shutdown) { - int do_failover = 0; - int long_sleep; + int do_failover = 0; + int long_sleep; list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, ibd_fail_list) { diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c index eedf01afd..b0e00361c 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -150,30 +150,30 @@ module_param(use_privileged_port, int, 0644); MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection"); kib_tunables_t kiblnd_tunables = { - .kib_dev_failover = &dev_failover, - .kib_service = &service, - .kib_cksum = &cksum, - .kib_timeout = &timeout, - .kib_keepalive = &keepalive, - .kib_ntx = &ntx, - .kib_credits = &credits, - .kib_peertxcredits = &peer_credits, - .kib_peercredits_hiw = &peer_credits_hiw, - .kib_peerrtrcredits = &peer_buffer_credits, - .kib_peertimeout = &peer_timeout, - .kib_default_ipif = &ipif_name, - .kib_retry_count = &retry_count, - .kib_rnr_retry_count = &rnr_retry_count, - .kib_concurrent_sends = &concurrent_sends, - .kib_ib_mtu = &ib_mtu, - .kib_map_on_demand = &map_on_demand, - .kib_fmr_pool_size = &fmr_pool_size, - .kib_fmr_flush_trigger = &fmr_flush_trigger, - .kib_fmr_cache = &fmr_cache, - .kib_pmr_pool_size = &pmr_pool_size, - .kib_require_priv_port = &require_privileged_port, - .kib_use_priv_port = &use_privileged_port, - .kib_nscheds = &nscheds + .kib_dev_failover = &dev_failover, + .kib_service = &service, + .kib_cksum = &cksum, + .kib_timeout = &timeout, + .kib_keepalive = &keepalive, + .kib_ntx = &ntx, + .kib_credits = &credits, + .kib_peertxcredits = &peer_credits, + .kib_peercredits_hiw = &peer_credits_hiw, + .kib_peerrtrcredits = &peer_buffer_credits, + .kib_peertimeout = &peer_timeout, + .kib_default_ipif = &ipif_name, + .kib_retry_count = &retry_count, + .kib_rnr_retry_count = &rnr_retry_count, + .kib_concurrent_sends = &concurrent_sends, + .kib_ib_mtu = &ib_mtu, + .kib_map_on_demand = &map_on_demand, + .kib_fmr_pool_size = &fmr_pool_size, + .kib_fmr_flush_trigger = &fmr_flush_trigger, + .kib_fmr_cache = &fmr_cache, + .kib_pmr_pool_size = &pmr_pool_size, + .kib_require_priv_port = &require_privileged_port, + .kib_use_priv_port = &use_privileged_port, + .kib_nscheds = &nscheds }; int diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile index f3fb8778c..c011581d3 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile +++ b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_LNET) += ksocklnd.o -ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib-linux.o +ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib.o diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 7586b7e40..4128a9221 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -49,8 +49,8 @@ ksock_nal_data_t ksocknal_data; static ksock_interface_t * ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip) { - ksock_net_t *net = ni->ni_data; - int i; + ksock_net_t *net = ni->ni_data; + int i; ksock_interface_t *iface; for (i = 0; i < net->ksnn_ninterfaces; i++) { @@ -102,8 +102,8 @@ ksocknal_destroy_route(ksock_route_t *route) static int ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) { - ksock_net_t *net = ni->ni_data; - ksock_peer_t *peer; + ksock_net_t *net = ni->ni_data; + ksock_peer_t *peer; LASSERT(id.nid != LNET_NID_ANY); LASSERT(id.pid != LNET_PID_ANY); @@ -149,7 +149,7 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) void ksocknal_destroy_peer(ksock_peer_t *peer) { - ksock_net_t *net = peer->ksnp_ni->ni_data; + ksock_net_t *net = peer->ksnp_ni->ni_data; CDEBUG(D_NET, "peer %s %p deleted\n", libcfs_id2str(peer->ksnp_id), peer); @@ -175,9 +175,9 @@ ksocknal_destroy_peer(ksock_peer_t *peer) ksock_peer_t * ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) { - struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); - struct list_head *tmp; - ksock_peer_t *peer; + struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); + struct list_head *tmp; + ksock_peer_t *peer; list_for_each(tmp, peer_list) { @@ -203,7 +203,7 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) ksock_peer_t * ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) { - ksock_peer_t *peer; + ksock_peer_t *peer; read_lock(&ksocknal_data.ksnd_global_lock); peer = ksocknal_find_peer_locked(ni, id); @@ -217,8 +217,8 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) static void ksocknal_unlink_peer_locked(ksock_peer_t *peer) { - int i; - __u32 ip; + int i; + __u32 ip; ksock_interface_t *iface; for (i = 0; i < peer->ksnp_n_passive_ips; i++) { @@ -249,13 +249,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port, int *conn_count, int *share_count) { - ksock_peer_t *peer; - struct list_head *ptmp; - ksock_route_t *route; - struct list_head *rtmp; - int i; - int j; - int rc = -ENOENT; + ksock_peer_t *peer; + struct list_head *ptmp; + ksock_route_t *route; + struct list_head *rtmp; + int i; + int j; + int rc = -ENOENT; read_lock(&ksocknal_data.ksnd_global_lock); @@ -322,8 +322,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, static void ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) { - ksock_peer_t *peer = route->ksnr_peer; - int type = conn->ksnc_type; + ksock_peer_t *peer = route->ksnr_peer; + int type = conn->ksnc_type; ksock_interface_t *iface; conn->ksnc_route = route; @@ -366,9 +366,9 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) static void ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) { - struct list_head *tmp; - ksock_conn_t *conn; - ksock_route_t *route2; + struct list_head *tmp; + ksock_conn_t *conn; + ksock_route_t *route2; LASSERT(!peer->ksnp_closing); LASSERT(route->ksnr_peer == NULL); @@ -407,11 +407,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) static void ksocknal_del_route_locked(ksock_route_t *route) { - ksock_peer_t *peer = route->ksnr_peer; + ksock_peer_t *peer = route->ksnr_peer; ksock_interface_t *iface; - ksock_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; LASSERT(!route->ksnr_deleted); @@ -447,12 +447,12 @@ ksocknal_del_route_locked(ksock_route_t *route) int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) { - struct list_head *tmp; - ksock_peer_t *peer; - ksock_peer_t *peer2; - ksock_route_t *route; - ksock_route_t *route2; - int rc; + struct list_head *tmp; + ksock_peer_t *peer; + ksock_peer_t *peer2; + ksock_route_t *route; + ksock_route_t *route2; + int rc; if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY) @@ -509,11 +509,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) static void ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) { - ksock_conn_t *conn; - ksock_route_t *route; - struct list_head *tmp; - struct list_head *nxt; - int nshared; + ksock_conn_t *conn; + ksock_route_t *route; + struct list_head *tmp; + struct list_head *nxt; + int nshared; LASSERT(!peer->ksnp_closing); @@ -565,13 +565,13 @@ static int ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) { LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - ksock_peer_t *peer; - int lo; - int hi; - int i; - int rc = -ENOENT; + struct list_head *ptmp; + struct list_head *pnxt; + ksock_peer_t *peer; + int lo; + int hi; + int i; + int rc = -ENOENT; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -623,11 +623,11 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) static ksock_conn_t * ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) { - ksock_peer_t *peer; - struct list_head *ptmp; - ksock_conn_t *conn; - struct list_head *ctmp; - int i; + ksock_peer_t *peer; + struct list_head *ptmp; + ksock_conn_t *conn; + struct list_head *ctmp; + int i; read_lock(&ksocknal_data.ksnd_global_lock); @@ -661,8 +661,8 @@ static ksock_sched_t * ksocknal_choose_scheduler_locked(unsigned int cpt) { struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt]; - ksock_sched_t *sched; - int i; + ksock_sched_t *sched; + int i; LASSERT(info->ksi_nthreads > 0); @@ -683,9 +683,9 @@ ksocknal_choose_scheduler_locked(unsigned int cpt) static int ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) { - ksock_net_t *net = ni->ni_data; - int i; - int nip; + ksock_net_t *net = ni->ni_data; + int i; + int nip; read_lock(&ksocknal_data.ksnd_global_lock); @@ -711,12 +711,12 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) static int ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) { - int best_netmatch = 0; - int best_xor = 0; - int best = -1; - int this_xor; - int this_netmatch; - int i; + int best_netmatch = 0; + int best_xor = 0; + int best = -1; + int this_xor; + int this_netmatch; + int i; for (i = 0; i < nips; i++) { if (ips[i] == 0) @@ -743,19 +743,19 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) static int ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) { - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - ksock_net_t *net = peer->ksnp_ni->ni_data; - ksock_interface_t *iface; - ksock_interface_t *best_iface; - int n_ips; - int i; - int j; - int k; - __u32 ip; - __u32 xor; - int this_netmatch; - int best_netmatch; - int best_npeers; + rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; + ksock_net_t *net = peer->ksnp_ni->ni_data; + ksock_interface_t *iface; + ksock_interface_t *best_iface; + int n_ips; + int i; + int j; + int k; + __u32 ip; + __u32 xor; + int this_netmatch; + int best_netmatch; + int best_npeers; /* CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only @@ -846,19 +846,19 @@ static void ksocknal_create_routes(ksock_peer_t *peer, int port, __u32 *peer_ipaddrs, int npeer_ipaddrs) { - ksock_route_t *newroute = NULL; - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - lnet_ni_t *ni = peer->ksnp_ni; - ksock_net_t *net = ni->ni_data; - struct list_head *rtmp; - ksock_route_t *route; - ksock_interface_t *iface; - ksock_interface_t *best_iface; - int best_netmatch; - int this_netmatch; - int best_nroutes; - int i; - int j; + ksock_route_t *newroute = NULL; + rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; + lnet_ni_t *ni = peer->ksnp_ni; + ksock_net_t *net = ni->ni_data; + struct list_head *rtmp; + ksock_route_t *route; + ksock_interface_t *iface; + ksock_interface_t *best_iface; + int best_netmatch; + int this_netmatch; + int best_nroutes; + int i; + int j; /* CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only @@ -963,12 +963,12 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, int ksocknal_accept(lnet_ni_t *ni, struct socket *sock) { - ksock_connreq_t *cr; - int rc; - __u32 peer_ip; - int peer_port; + ksock_connreq_t *cr; + int rc; + __u32 peer_ip; + int peer_port; - rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); + rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); LASSERT(rc == 0); /* we succeeded before */ LIBCFS_ALLOC(cr, sizeof(*cr)); @@ -994,7 +994,7 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock) static int ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) { - ksock_route_t *route; + ksock_route_t *route; list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { @@ -1008,23 +1008,23 @@ int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, struct socket *sock, int type) { - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; + rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; LIST_HEAD(zombies); - lnet_process_id_t peerid; - struct list_head *tmp; - __u64 incarnation; - ksock_conn_t *conn; - ksock_conn_t *conn2; - ksock_peer_t *peer = NULL; - ksock_peer_t *peer2; - ksock_sched_t *sched; + lnet_process_id_t peerid; + struct list_head *tmp; + __u64 incarnation; + ksock_conn_t *conn; + ksock_conn_t *conn2; + ksock_peer_t *peer = NULL; + ksock_peer_t *peer2; + ksock_sched_t *sched; ksock_hello_msg_t *hello; - int cpt; - ksock_tx_t *tx; - ksock_tx_t *txtmp; - int rc; - int active; - char *warn = NULL; + int cpt; + ksock_tx_t *tx; + ksock_tx_t *txtmp; + int rc; + int active; + char *warn = NULL; active = (route != NULL); @@ -1378,15 +1378,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_txlist_done(ni, &zombies, 1); ksocknal_peer_decref(peer); - failed_1: +failed_1: if (hello != NULL) LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); LIBCFS_FREE(conn, sizeof(*conn)); - failed_0: - libcfs_sock_release(sock); +failed_0: + sock_release(sock); return rc; } @@ -1396,10 +1396,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) /* This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. * Caller holds ksnd_global_lock exclusively in irq context */ - ksock_peer_t *peer = conn->ksnc_peer; - ksock_route_t *route; - ksock_conn_t *conn2; - struct list_head *tmp; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_route_t *route; + ksock_conn_t *conn2; + struct list_head *tmp; LASSERT(peer->ksnp_error == 0); LASSERT(!conn->ksnc_closing); @@ -1479,7 +1479,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) void ksocknal_peer_failed(ksock_peer_t *peer) { - int notify = 0; + int notify = 0; unsigned long last_alive = 0; /* There has been a connection failure or comms error; but I'll only @@ -1506,9 +1506,9 @@ ksocknal_peer_failed(ksock_peer_t *peer) void ksocknal_finalize_zcreq(ksock_conn_t *conn) { - ksock_peer_t *peer = conn->ksnc_peer; - ksock_tx_t *tx; - ksock_tx_t *tmp; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_tx_t *tx; + ksock_tx_t *tmp; LIST_HEAD(zlist); /* NB safe to finalize TXs because closing of socket will @@ -1546,9 +1546,9 @@ ksocknal_terminate_conn(ksock_conn_t *conn) * disengage the socket from its callbacks and close it. * ksnc_refcount will eventually hit zero, and then the reaper will * destroy it. */ - ksock_peer_t *peer = conn->ksnc_peer; - ksock_sched_t *sched = conn->ksnc_scheduler; - int failed = 0; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_sched_t *sched = conn->ksnc_scheduler; + int failed = 0; LASSERT(conn->ksnc_closing); @@ -1617,7 +1617,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn) void ksocknal_destroy_conn(ksock_conn_t *conn) { - unsigned long last_rcv; + unsigned long last_rcv; /* Final coup-de-grace of the reaper */ CDEBUG(D_NET, "connection %p\n", conn); @@ -1677,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn) int ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) { - ksock_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); @@ -1698,9 +1698,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) { - ksock_peer_t *peer = conn->ksnc_peer; - __u32 ipaddr = conn->ksnc_ipaddr; - int count; + ksock_peer_t *peer = conn->ksnc_peer; + __u32 ipaddr = conn->ksnc_ipaddr; + int count; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -1714,13 +1714,13 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) { - ksock_peer_t *peer; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - int count = 0; + ksock_peer_t *peer; + struct list_head *ptmp; + struct list_head *pnxt; + int lo; + int hi; + int i; + int count = 0; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -1762,7 +1762,7 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) { /* The router is telling me she's been notified of a change in * gateway state.... */ - lnet_process_id_t id = {0}; + lnet_process_id_t id = {0}; id.nid = gw_nid; id.pid = LNET_PID_ANY; @@ -1783,20 +1783,20 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) void ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { - int connect = 1; - unsigned long last_alive = 0; - unsigned long now = cfs_time_current(); - ksock_peer_t *peer = NULL; - rwlock_t *glock = &ksocknal_data.ksnd_global_lock; - lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; + int connect = 1; + unsigned long last_alive = 0; + unsigned long now = cfs_time_current(); + ksock_peer_t *peer = NULL; + rwlock_t *glock = &ksocknal_data.ksnd_global_lock; + lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; read_lock(glock); peer = ksocknal_find_peer_locked(ni, id); if (peer != NULL) { - struct list_head *tmp; - ksock_conn_t *conn; - int bufnob; + struct list_head *tmp; + ksock_conn_t *conn; + int bufnob; list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); @@ -1844,10 +1844,10 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) static void ksocknal_push_peer(ksock_peer_t *peer) { - int index; - int i; - struct list_head *tmp; - ksock_conn_t *conn; + int index; + int i; + struct list_head *tmp; + ksock_conn_t *conn; for (index = 0; ; index++) { read_lock(&ksocknal_data.ksnd_global_lock); @@ -1877,12 +1877,12 @@ ksocknal_push_peer(ksock_peer_t *peer) static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) { - ksock_peer_t *peer; - struct list_head *tmp; - int index; - int i; - int j; - int rc = -ENOENT; + ksock_peer_t *peer; + struct list_head *tmp; + int index; + int i; + int j; + int rc = -ENOENT; for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { for (j = 0; ; j++) { @@ -1926,15 +1926,15 @@ ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) static int ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) { - ksock_net_t *net = ni->ni_data; + ksock_net_t *net = ni->ni_data; ksock_interface_t *iface; - int rc; - int i; - int j; - struct list_head *ptmp; - ksock_peer_t *peer; - struct list_head *rtmp; - ksock_route_t *route; + int rc; + int i; + int j; + struct list_head *ptmp; + ksock_peer_t *peer; + struct list_head *rtmp; + ksock_route_t *route; if (ipaddress == 0 || netmask == 0) @@ -1988,12 +1988,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) static void ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) { - struct list_head *tmp; - struct list_head *nxt; - ksock_route_t *route; - ksock_conn_t *conn; - int i; - int j; + struct list_head *tmp; + struct list_head *nxt; + ksock_route_t *route; + ksock_conn_t *conn; + int i; + int j; for (i = 0; i < peer->ksnp_n_passive_ips; i++) if (peer->ksnp_passive_ips[i] == ipaddr) { @@ -2029,14 +2029,14 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) static int ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) { - ksock_net_t *net = ni->ni_data; - int rc = -ENOENT; - struct list_head *tmp; - struct list_head *nxt; - ksock_peer_t *peer; - __u32 this_ip; - int i; - int j; + ksock_net_t *net = ni->ni_data; + int rc = -ENOENT; + struct list_head *tmp; + struct list_head *nxt; + ksock_peer_t *peer; + __u32 this_ip; + int i; + int j; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -2114,11 +2114,11 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) data->ioc_u32[0]); /* IP address */ case IOC_LIBCFS_GET_PEER: { - __u32 myip = 0; - __u32 ip = 0; - int port = 0; - int conn_count = 0; - int share_count = 0; + __u32 myip = 0; + __u32 ip = 0; + int port = 0; + int conn_count = 0; + int share_count = 0; rc = ksocknal_get_peer_info(ni, data->ioc_count, &id, &myip, &ip, &port, @@ -2150,9 +2150,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) data->ioc_u32[0]); /* IP */ case IOC_LIBCFS_GET_CONN: { - int txmem; - int rxmem; - int nagle; + int txmem; + int rxmem; + int nagle; ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); if (conn == NULL) @@ -2207,8 +2207,8 @@ ksocknal_free_buffers(void) LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); if (ksocknal_data.ksnd_sched_info != NULL) { - struct ksock_sched_info *info; - int i; + struct ksock_sched_info *info; + int i; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { if (info->ksi_scheds != NULL) { @@ -2227,8 +2227,8 @@ ksocknal_free_buffers(void) spin_lock(&ksocknal_data.ksnd_tx_lock); if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - struct list_head zlist; - ksock_tx_t *tx; + struct list_head zlist; + ksock_tx_t *tx; list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); list_del_init(&ksocknal_data.ksnd_idle_noop_txs); @@ -2248,9 +2248,9 @@ static void ksocknal_base_shutdown(void) { struct ksock_sched_info *info; - ksock_sched_t *sched; - int i; - int j; + ksock_sched_t *sched; + int i; + int j; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", atomic_read(&libcfs_kmemory)); @@ -2351,8 +2351,8 @@ static int ksocknal_base_startup(void) { struct ksock_sched_info *info; - int rc; - int i; + int rc; + int i; LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); LASSERT(ksocknal_data.ksnd_nnets == 0); @@ -2398,8 +2398,8 @@ ksocknal_base_startup(void) goto failed; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - ksock_sched_t *sched; - int nthrs; + ksock_sched_t *sched; + int nthrs; nthrs = cfs_cpt_weight(lnet_cpt_table(), i); if (*ksocknal_tunables.ksnd_nscheds > 0) { @@ -2430,9 +2430,9 @@ ksocknal_base_startup(void) } } - ksocknal_data.ksnd_connd_starting = 0; - ksocknal_data.ksnd_connd_failed_stamp = 0; - ksocknal_data.ksnd_connd_starting_stamp = get_seconds(); + ksocknal_data.ksnd_connd_starting = 0; + ksocknal_data.ksnd_connd_failed_stamp = 0; + ksocknal_data.ksnd_connd_starting_stamp = get_seconds(); /* must have at least 2 connds to remain responsive to accepts while * connecting */ if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1) @@ -2482,9 +2482,9 @@ ksocknal_base_startup(void) static void ksocknal_debug_peerhash(lnet_ni_t *ni) { - ksock_peer_t *peer = NULL; - struct list_head *tmp; - int i; + ksock_peer_t *peer = NULL; + struct list_head *tmp; + int i; read_lock(&ksocknal_data.ksnd_global_lock); @@ -2536,12 +2536,12 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) void ksocknal_shutdown(lnet_ni_t *ni) { - ksock_net_t *net = ni->ni_data; - int i; + ksock_net_t *net = ni->ni_data; + int i; lnet_process_id_t anyid = {0}; - anyid.nid = LNET_NID_ANY; - anyid.pid = LNET_PID_ANY; + anyid.nid = LNET_NID_ANY; + anyid.pid = LNET_PID_ANY; LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL); LASSERT(ksocknal_data.ksnd_nnets > 0); @@ -2588,27 +2588,27 @@ ksocknal_shutdown(lnet_ni_t *ni) static int ksocknal_enumerate_interfaces(ksock_net_t *net) { - char **names; - int i; - int j; - int rc; - int n; + char **names; + int i; + int j; + int rc; + int n; - n = libcfs_ipif_enumerate(&names); + n = lnet_ipif_enumerate(&names); if (n <= 0) { CERROR("Can't enumerate interfaces: %d\n", n); return n; } for (i = j = 0; i < n; i++) { - int up; - __u32 ip; - __u32 mask; + int up; + __u32 ip; + __u32 mask; if (!strcmp(names[i], "lo")) /* skip the loopback IF */ continue; - rc = libcfs_ipif_query(names[i], &up, &ip, &mask); + rc = lnet_ipif_query(names[i], &up, &ip, &mask); if (rc != 0) { CWARN("Can't get interface %s info: %d\n", names[i], rc); @@ -2634,7 +2634,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) j++; } - libcfs_ipif_free_enumeration(names, n); + lnet_ipif_free_enumeration(names, n); if (j == 0) CERROR("Can't find any usable interfaces\n"); @@ -2645,15 +2645,15 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) static int ksocknal_search_new_ipif(ksock_net_t *net) { - int new_ipif = 0; - int i; + int new_ipif = 0; + int i; for (i = 0; i < net->ksnn_ninterfaces; i++) { - char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; - char *colon = strchr(ifnam, ':'); - int found = 0; - ksock_net_t *tmp; - int j; + char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; + char *colon = strchr(ifnam, ':'); + int found = 0; + ksock_net_t *tmp; + int j; if (colon != NULL) /* ignore alias device */ *colon = 0; @@ -2687,9 +2687,9 @@ ksocknal_search_new_ipif(ksock_net_t *net) static int ksocknal_start_schedulers(struct ksock_sched_info *info) { - int nthrs; - int rc = 0; - int i; + int nthrs; + int rc = 0; + int i; if (info->ksi_nthreads == 0) { if (*ksocknal_tunables.ksnd_nscheds > 0) { @@ -2708,9 +2708,9 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) } for (i = 0; i < nthrs; i++) { - long id; - char name[20]; - ksock_sched_t *sched; + long id; + char name[20]; + ksock_sched_t *sched; id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i); sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; snprintf(name, sizeof(name), "socknal_sd%02d_%02d", @@ -2733,14 +2733,14 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) static int ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) { - int newif = ksocknal_search_new_ipif(net); - int rc; - int i; + int newif = ksocknal_search_new_ipif(net); + int rc; + int i; LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table())); for (i = 0; i < ncpts; i++) { - struct ksock_sched_info *info; + struct ksock_sched_info *info; int cpt = (cpts == NULL) ? i : cpts[i]; LASSERT(cpt < cfs_cpt_number(lnet_cpt_table())); @@ -2759,9 +2759,9 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) int ksocknal_startup(lnet_ni_t *ni) { - ksock_net_t *net; - int rc; - int i; + ksock_net_t *net; + int rc; + int i; LASSERT(ni->ni_lnd == &the_ksocklnd); @@ -2791,13 +2791,12 @@ ksocknal_startup(lnet_ni_t *ni) net->ksnn_ninterfaces = 1; } else { for (i = 0; i < LNET_MAX_INTERFACES; i++) { - int up; + int up; if (ni->ni_interfaces[i] == NULL) break; - rc = libcfs_ipif_query( - ni->ni_interfaces[i], &up, + rc = lnet_ipif_query(ni->ni_interfaces[i], &up, &net->ksnn_interfaces[i].ksni_ipaddr, &net->ksnn_interfaces[i].ksni_netmask); @@ -2851,7 +2850,7 @@ ksocknal_module_fini(void) static int __init ksocknal_module_init(void) { - int rc; + int rc; /* check ksnr_connected/connecting field large enough */ CLASSERT(SOCKLND_CONN_NTYPES <= 4); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index c54c99551..8a9d4a0de 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -25,26 +25,48 @@ * */ +#ifndef _SOCKLND_SOCKLND_H_ +#define _SOCKLND_SOCKLND_H_ + #define DEBUG_PORTAL_ALLOC #define DEBUG_SUBSYSTEM S_LND -#include "socklnd_lib-linux.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "../../../include/linux/libcfs/libcfs.h" #include "../../../include/linux/lnet/lnet.h" #include "../../../include/linux/lnet/lib-lnet.h" #include "../../../include/linux/lnet/socklnd.h" -#include "../../../include/linux/lnet/lnet-sysctl.h" -#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ -#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ -#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ -#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */ +/* assume one thread for each connection type */ +#define SOCKNAL_NSCHEDS 3 +#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) + +#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ +#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ +#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ +#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */ -#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ -#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ +#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ +#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ -#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ +#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ /* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). * no risk if we're not running on a CONFIG_HIGHMEM platform. */ @@ -58,33 +80,31 @@ struct ksock_sched_info; typedef struct /* per scheduler state */ { - spinlock_t kss_lock; /* serialise */ - struct list_head kss_rx_conns; /* conn waiting to be read */ - /* conn waiting to be written */ - struct list_head kss_tx_conns; - /* zombie noop tx list */ - struct list_head kss_zombie_noop_txs; - wait_queue_head_t kss_waitq; /* where scheduler sleeps */ - /* # connections assigned to this scheduler */ - int kss_nconns; - struct ksock_sched_info *kss_info; /* owner of it */ - struct page *kss_rx_scratch_pgs[LNET_MAX_IOV]; - struct kvec kss_scratch_iov[LNET_MAX_IOV]; + spinlock_t kss_lock; /* serialise */ + struct list_head kss_rx_conns; /* conn waiting to be read */ + struct list_head kss_tx_conns; /* conn waiting to be written */ + struct list_head kss_zombie_noop_txs; /* zombie noop tx list */ + wait_queue_head_t kss_waitq; /* where scheduler sleeps */ + int kss_nconns; /* # connections assigned to + * this scheduler */ + struct ksock_sched_info *kss_info; /* owner of it */ + struct page *kss_rx_scratch_pgs[LNET_MAX_IOV]; + struct kvec kss_scratch_iov[LNET_MAX_IOV]; } ksock_sched_t; struct ksock_sched_info { - int ksi_nthreads_max; /* max allowed threads */ - int ksi_nthreads; /* number of threads */ - int ksi_cpt; /* CPT id */ - ksock_sched_t *ksi_scheds; /* array of schedulers */ + int ksi_nthreads_max; /* max allowed threads */ + int ksi_nthreads; /* number of threads */ + int ksi_cpt; /* CPT id */ + ksock_sched_t *ksi_scheds; /* array of schedulers */ }; -#define KSOCK_CPT_SHIFT 16 -#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid)) -#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT) -#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1)) +#define KSOCK_CPT_SHIFT 16 +#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid)) +#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT) +#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1)) -typedef struct /* in-use interface */ +typedef struct /* in-use interface */ { __u32 ksni_ipaddr; /* interface's IP address */ __u32 ksni_netmask; /* interface's network mask */ @@ -94,35 +114,48 @@ typedef struct /* in-use interface */ } ksock_interface_t; typedef struct { - /* "stuck" socket timeout (seconds) */ - int *ksnd_timeout; - /* # scheduler threads in each pool while starting */ - int *ksnd_nscheds; - int *ksnd_nconnds; /* # connection daemons */ - int *ksnd_nconnds_max; /* max # connection daemons */ - int *ksnd_min_reconnectms; /* first connection retry after (ms)... */ - int *ksnd_max_reconnectms; /* ...exponentially increasing to this */ - int *ksnd_eager_ack; /* make TCP ack eagerly? */ - int *ksnd_typed_conns; /* drive sockets by type? */ - int *ksnd_min_bulk; /* smallest "large" message */ - int *ksnd_tx_buffer_size; /* socket tx buffer size */ - int *ksnd_rx_buffer_size; /* socket rx buffer size */ - int *ksnd_nagle; /* enable NAGLE? */ - int *ksnd_round_robin; /* round robin for multiple interfaces */ - int *ksnd_keepalive; /* # secs for sending keepalive NOOP */ - int *ksnd_keepalive_idle; /* # idle secs before 1st probe */ - int *ksnd_keepalive_count; /* # probes */ - int *ksnd_keepalive_intvl; /* time between probes */ - int *ksnd_credits; /* # concurrent sends */ - int *ksnd_peertxcredits; /* # concurrent sends to 1 peer */ - int *ksnd_peerrtrcredits; /* # per-peer router buffer credits */ - int *ksnd_peertimeout; /* seconds to consider peer dead */ - int *ksnd_enable_csum; /* enable check sum */ - int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */ - int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */ - unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */ - int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */ - int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */ + int *ksnd_timeout; /* "stuck" socket timeout + * (seconds) */ + int *ksnd_nscheds; /* # scheduler threads in each + * pool while starting */ + int *ksnd_nconnds; /* # connection daemons */ + int *ksnd_nconnds_max; /* max # connection daemons */ + int *ksnd_min_reconnectms; /* first connection retry after + * (ms)... */ + int *ksnd_max_reconnectms; /* ...exponentially increasing to + * this */ + int *ksnd_eager_ack; /* make TCP ack eagerly? */ + int *ksnd_typed_conns; /* drive sockets by type? */ + int *ksnd_min_bulk; /* smallest "large" message */ + int *ksnd_tx_buffer_size; /* socket tx buffer size */ + int *ksnd_rx_buffer_size; /* socket rx buffer size */ + int *ksnd_nagle; /* enable NAGLE? */ + int *ksnd_round_robin; /* round robin for multiple + * interfaces */ + int *ksnd_keepalive; /* # secs for sending keepalive + * NOOP */ + int *ksnd_keepalive_idle; /* # idle secs before 1st probe + */ + int *ksnd_keepalive_count; /* # probes */ + int *ksnd_keepalive_intvl; /* time between probes */ + int *ksnd_credits; /* # concurrent sends */ + int *ksnd_peertxcredits; /* # concurrent sends to 1 peer + */ + int *ksnd_peerrtrcredits; /* # per-peer router buffer + * credits */ + int *ksnd_peertimeout; /* seconds to consider peer dead + */ + int *ksnd_enable_csum; /* enable check sum */ + int *ksnd_inject_csum_error; /* set non-zero to inject + * checksum error */ + int *ksnd_nonblk_zcack; /* always send zc-ack on + * non-blocking connection */ + unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload + * size */ + int *ksnd_zc_recv; /* enable ZC receive (for + * Chelsio TOE) */ + int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to + * enable ZC receive */ } ksock_tunables_t; typedef struct { @@ -141,55 +174,67 @@ typedef struct { #define SOCKNAL_CONND_RESV 1 typedef struct { - int ksnd_init; /* initialisation state */ - int ksnd_nnets; /* # networks set up */ - struct list_head ksnd_nets; /* list of nets */ - /* stabilize peer/conn ops */ - rwlock_t ksnd_global_lock; - /* hash table of all my known peers */ - struct list_head *ksnd_peers; - int ksnd_peer_hash_size; /* size of ksnd_peers */ - - int ksnd_nthreads; /* # live threads */ - int ksnd_shuttingdown; /* tell threads to exit */ - /* schedulers information */ - struct ksock_sched_info **ksnd_sched_info; - - atomic_t ksnd_nactive_txs; /* #active txs */ - - struct list_head ksnd_deathrow_conns; /* conns to close: reaper_lock*/ - struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */ - struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/ - wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ - unsigned long ksnd_reaper_waketime;/* when reaper will wake */ - spinlock_t ksnd_reaper_lock; /* serialise */ - - int ksnd_enomem_tx; /* test ENOMEM sender */ - int ksnd_stall_tx; /* test sluggish sender */ - int ksnd_stall_rx; /* test sluggish receiver */ - - struct list_head ksnd_connd_connreqs; /* incoming connection requests */ - struct list_head ksnd_connd_routes; /* routes waiting to be connected */ - wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ - int ksnd_connd_connecting;/* # connds connecting */ - /** time stamp of the last failed connecting attempt */ - long ksnd_connd_failed_stamp; - /** # starting connd */ - unsigned ksnd_connd_starting; - /** time stamp of the last starting connd */ - long ksnd_connd_starting_stamp; - /** # running connd */ - unsigned ksnd_connd_running; - spinlock_t ksnd_connd_lock; /* serialise */ - - struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */ - spinlock_t ksnd_tx_lock; /* serialise, g_lock unsafe */ + int ksnd_init; /* initialisation state + */ + int ksnd_nnets; /* # networks set up */ + struct list_head ksnd_nets; /* list of nets */ + rwlock_t ksnd_global_lock; /* stabilize peer/conn + * ops */ + struct list_head *ksnd_peers; /* hash table of all my + * known peers */ + int ksnd_peer_hash_size; /* size of ksnd_peers */ + + int ksnd_nthreads; /* # live threads */ + int ksnd_shuttingdown; /* tell threads to exit + */ + struct ksock_sched_info **ksnd_sched_info; /* schedulers info */ + + atomic_t ksnd_nactive_txs; /* #active txs */ + + struct list_head ksnd_deathrow_conns; /* conns to close: + * reaper_lock*/ + struct list_head ksnd_zombie_conns; /* conns to free: + * reaper_lock */ + struct list_head ksnd_enomem_conns; /* conns to retry: + * reaper_lock*/ + wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ + unsigned long ksnd_reaper_waketime; /* when reaper will wake + */ + spinlock_t ksnd_reaper_lock; /* serialise */ + + int ksnd_enomem_tx; /* test ENOMEM sender */ + int ksnd_stall_tx; /* test sluggish sender + */ + int ksnd_stall_rx; /* test sluggish + * receiver */ + + struct list_head ksnd_connd_connreqs; /* incoming connection + * requests */ + struct list_head ksnd_connd_routes; /* routes waiting to be + * connected */ + wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ + int ksnd_connd_connecting; /* # connds connecting + */ + long ksnd_connd_failed_stamp;/* time stamp of the + * last failed + * connecting attempt */ + unsigned ksnd_connd_starting; /* # starting connd */ + long ksnd_connd_starting_stamp;/* time stamp of the + * last starting connd + */ + unsigned ksnd_connd_running; /* # running connd */ + spinlock_t ksnd_connd_lock; /* serialise */ + + struct list_head ksnd_idle_noop_txs; /* list head for freed + * noop tx */ + spinlock_t ksnd_tx_lock; /* serialise, g_lock + * unsafe */ } ksock_nal_data_t; -#define SOCKNAL_INIT_NOTHING 0 -#define SOCKNAL_INIT_DATA 1 -#define SOCKNAL_INIT_ALL 2 +#define SOCKNAL_INIT_NOTHING 0 +#define SOCKNAL_INIT_DATA 1 +#define SOCKNAL_INIT_ALL 2 /* A packet just assembled for transmission is represented by 1 or more * struct iovec fragments (the first frag contains the portals header), @@ -200,43 +245,45 @@ typedef struct { * received into either struct iovec or lnet_kiov_t fragments, depending on * what the header matched or whether the message needs forwarding. */ -struct ksock_conn; /* forward ref */ -struct ksock_peer; /* forward ref */ -struct ksock_route; /* forward ref */ -struct ksock_proto; /* forward ref */ +struct ksock_conn; /* forward ref */ +struct ksock_peer; /* forward ref */ +struct ksock_route; /* forward ref */ +struct ksock_proto; /* forward ref */ -typedef struct /* transmit packet */ +typedef struct /* transmit packet */ { - struct list_head tx_list; /* queue on conn for transmission etc */ - struct list_head tx_zc_list; /* queue on peer for ZC request */ - atomic_t tx_refcount; /* tx reference count */ - int tx_nob; /* # packet bytes */ - int tx_resid; /* residual bytes */ - int tx_niov; /* # packet iovec frags */ - struct kvec *tx_iov; /* packet iovec frags */ - int tx_nkiov; /* # packet page frags */ - unsigned short tx_zc_aborted; /* aborted ZC request */ - unsigned short tx_zc_capable:1; /* payload is large enough for ZC */ - unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */ - unsigned short tx_nonblk:1; /* it's a non-blocking ACK */ - lnet_kiov_t *tx_kiov; /* packet page frags */ - struct ksock_conn *tx_conn; /* owning conn */ - lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */ - unsigned long tx_deadline; /* when (in jiffies) tx times out */ - ksock_msg_t tx_msg; /* socklnd message buffer */ - int tx_desc_size; /* size of this descriptor */ + struct list_head tx_list; /* queue on conn for transmission etc + */ + struct list_head tx_zc_list; /* queue on peer for ZC request */ + atomic_t tx_refcount; /* tx reference count */ + int tx_nob; /* # packet bytes */ + int tx_resid; /* residual bytes */ + int tx_niov; /* # packet iovec frags */ + struct kvec *tx_iov; /* packet iovec frags */ + int tx_nkiov; /* # packet page frags */ + unsigned short tx_zc_aborted; /* aborted ZC request */ + unsigned short tx_zc_capable:1; /* payload is large enough for ZC */ + unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */ + unsigned short tx_nonblk:1; /* it's a non-blocking ACK */ + lnet_kiov_t *tx_kiov; /* packet page frags */ + struct ksock_conn *tx_conn; /* owning conn */ + lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() + */ + unsigned long tx_deadline; /* when (in jiffies) tx times out */ + ksock_msg_t tx_msg; /* socklnd message buffer */ + int tx_desc_size; /* size of this descriptor */ union { struct { - struct kvec iov; /* virt hdr */ - lnet_kiov_t kiov[0]; /* paged payload */ - } paged; + struct kvec iov; /* virt hdr */ + lnet_kiov_t kiov[0]; /* paged payload */ + } paged; struct { - struct kvec iov[1]; /* virt hdr + payload */ - } virt; - } tx_frags; + struct kvec iov[1]; /* virt hdr + payload */ + } virt; + } tx_frags; } ksock_tx_t; -#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) +#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) /* network zero copy callback descriptor embedded in ksock_tx_t */ @@ -247,153 +294,205 @@ typedef union { lnet_kiov_t kiov[LNET_MAX_IOV]; } ksock_rxiovspace_t; -#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ -#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */ -#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */ -#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */ -#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */ -#define SOCKNAL_RX_SLOP 6 /* skipping body */ +#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ +#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */ +#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */ +#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */ +#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */ +#define SOCKNAL_RX_SLOP 6 /* skipping body */ typedef struct ksock_conn { - struct ksock_peer *ksnc_peer; /* owning peer */ - struct ksock_route *ksnc_route; /* owning route */ - struct list_head ksnc_list; /* stash on peer's conn list */ - struct socket *ksnc_sock; /* actual socket */ - void *ksnc_saved_data_ready; /* socket's original data_ready() callback */ - void *ksnc_saved_write_space; /* socket's original write_space() callback */ - atomic_t ksnc_conn_refcount; /* conn refcount */ - atomic_t ksnc_sock_refcount; /* sock refcount */ - ksock_sched_t *ksnc_scheduler; /* who schedules this connection */ - __u32 ksnc_myipaddr; /* my IP */ - __u32 ksnc_ipaddr; /* peer's IP */ - int ksnc_port; /* peer's port */ - signed int ksnc_type:3; /* type of connection, - * should be signed value */ - unsigned int ksnc_closing:1; /* being shut down */ - unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ - unsigned int ksnc_zc_capable:1; /* enable to ZC */ - struct ksock_proto *ksnc_proto; /* protocol for the connection */ + struct ksock_peer *ksnc_peer; /* owning peer */ + struct ksock_route *ksnc_route; /* owning route */ + struct list_head ksnc_list; /* stash on peer's conn list */ + struct socket *ksnc_sock; /* actual socket */ + void *ksnc_saved_data_ready; /* socket's original + * data_ready() callback */ + void *ksnc_saved_write_space; /* socket's original + * write_space() callback */ + atomic_t ksnc_conn_refcount;/* conn refcount */ + atomic_t ksnc_sock_refcount;/* sock refcount */ + ksock_sched_t *ksnc_scheduler; /* who schedules this connection + */ + __u32 ksnc_myipaddr; /* my IP */ + __u32 ksnc_ipaddr; /* peer's IP */ + int ksnc_port; /* peer's port */ + signed int ksnc_type:3; /* type of connection, should be + * signed value */ + unsigned int ksnc_closing:1; /* being shut down */ + unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ + unsigned int ksnc_zc_capable:1; /* enable to ZC */ + struct ksock_proto *ksnc_proto; /* protocol for the connection */ /* reader */ - struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */ - unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */ - __u8 ksnc_rx_started; /* started receiving a message */ - __u8 ksnc_rx_ready; /* data ready to read */ - __u8 ksnc_rx_scheduled;/* being progressed */ - __u8 ksnc_rx_state; /* what is being read */ - int ksnc_rx_nob_left; /* # bytes to next hdr/body */ - int ksnc_rx_nob_wanted; /* bytes actually wanted */ - int ksnc_rx_niov; /* # iovec frags */ - struct kvec *ksnc_rx_iov; /* the iovec frags */ - int ksnc_rx_nkiov; /* # page frags */ - lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ - ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */ - __u32 ksnc_rx_csum; /* partial checksum for incoming data */ - void *ksnc_cookie; /* rx lnet_finalize passthru arg */ - ksock_msg_t ksnc_msg; /* incoming message buffer: - * V2.x message takes the - * whole struct - * V1.x message is a bare - * lnet_hdr_t, it's stored in - * ksnc_msg.ksm_u.lnetmsg */ + struct list_head ksnc_rx_list; /* where I enq waiting input or a + * forwarding descriptor */ + unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times + * out */ + __u8 ksnc_rx_started; /* started receiving a message */ + __u8 ksnc_rx_ready; /* data ready to read */ + __u8 ksnc_rx_scheduled; /* being progressed */ + __u8 ksnc_rx_state; /* what is being read */ + int ksnc_rx_nob_left; /* # bytes to next hdr/body */ + int ksnc_rx_nob_wanted;/* bytes actually wanted */ + int ksnc_rx_niov; /* # iovec frags */ + struct kvec *ksnc_rx_iov; /* the iovec frags */ + int ksnc_rx_nkiov; /* # page frags */ + lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ + ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */ + __u32 ksnc_rx_csum; /* partial checksum for incoming + * data */ + void *ksnc_cookie; /* rx lnet_finalize passthru arg + */ + ksock_msg_t ksnc_msg; /* incoming message buffer: + * V2.x message takes the + * whole struct + * V1.x message is a bare + * lnet_hdr_t, it's stored in + * ksnc_msg.ksm_u.lnetmsg */ /* WRITER */ - struct list_head ksnc_tx_list; /* where I enq waiting for output space */ - struct list_head ksnc_tx_queue; /* packets waiting to be sent */ - ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */ - unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */ - int ksnc_tx_bufnob; /* send buffer marker */ - atomic_t ksnc_tx_nob; /* # bytes queued */ - int ksnc_tx_ready; /* write space */ - int ksnc_tx_scheduled; /* being progressed */ - unsigned long ksnc_tx_last_post; /* time stamp of the last posted TX */ + struct list_head ksnc_tx_list; /* where I enq waiting for output + * space */ + struct list_head ksnc_tx_queue; /* packets waiting to be sent */ + ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet + * message or ZC-ACK */ + unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out + */ + int ksnc_tx_bufnob; /* send buffer marker */ + atomic_t ksnc_tx_nob; /* # bytes queued */ + int ksnc_tx_ready; /* write space */ + int ksnc_tx_scheduled; /* being progressed */ + unsigned long ksnc_tx_last_post; /* time stamp of the last posted + * TX */ } ksock_conn_t; typedef struct ksock_route { - struct list_head ksnr_list; /* chain on peer route list */ - struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ - struct ksock_peer *ksnr_peer; /* owning peer */ - atomic_t ksnr_refcount; /* # users */ - unsigned long ksnr_timeout; /* when (in jiffies) reconnection can happen next */ - long ksnr_retry_interval; /* how long between retries */ - __u32 ksnr_myipaddr; /* my IP */ - __u32 ksnr_ipaddr; /* IP address to connect to */ - int ksnr_port; /* port to connect to */ - unsigned int ksnr_scheduled:1; /* scheduled for attention */ - unsigned int ksnr_connecting:1;/* connection establishment in progress */ - unsigned int ksnr_connected:4; /* connections established by type */ - unsigned int ksnr_deleted:1; /* been removed from peer? */ - unsigned int ksnr_share_count; /* created explicitly? */ - int ksnr_conn_count; /* # conns established by this route */ + struct list_head ksnr_list; /* chain on peer route list */ + struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ + struct ksock_peer *ksnr_peer; /* owning peer */ + atomic_t ksnr_refcount; /* # users */ + unsigned long ksnr_timeout; /* when (in jiffies) reconnection + * can happen next */ + long ksnr_retry_interval; /* how long between retries */ + __u32 ksnr_myipaddr; /* my IP */ + __u32 ksnr_ipaddr; /* IP address to connect to */ + int ksnr_port; /* port to connect to */ + unsigned int ksnr_scheduled:1; /* scheduled for attention */ + unsigned int ksnr_connecting:1; /* connection establishment in + * progress */ + unsigned int ksnr_connected:4; /* connections established by + * type */ + unsigned int ksnr_deleted:1; /* been removed from peer? */ + unsigned int ksnr_share_count; /* created explicitly? */ + int ksnr_conn_count; /* # conns established by this + * route */ } ksock_route_t; -#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ +#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ typedef struct ksock_peer { - struct list_head ksnp_list; /* stash on global peer list */ - unsigned long ksnp_last_alive; /* when (in jiffies) I was last alive */ - lnet_process_id_t ksnp_id; /* who's on the other end(s) */ - atomic_t ksnp_refcount; /* # users */ - int ksnp_sharecount; /* lconf usage counter */ - int ksnp_closing; /* being closed */ - int ksnp_accepting;/* # passive connections pending */ - int ksnp_error; /* errno on closing last conn */ - __u64 ksnp_zc_next_cookie;/* ZC completion cookie */ - __u64 ksnp_incarnation; /* latest known peer incarnation */ - struct ksock_proto *ksnp_proto; /* latest known peer protocol */ - struct list_head ksnp_conns; /* all active connections */ - struct list_head ksnp_routes; /* routes */ - struct list_head ksnp_tx_queue; /* waiting packets */ - spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ - struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */ - unsigned long ksnp_send_keepalive; /* time to send keepalive */ - lnet_ni_t *ksnp_ni; /* which network */ - int ksnp_n_passive_ips; /* # of... */ - __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */ + struct list_head ksnp_list; /* stash on global peer list */ + unsigned long ksnp_last_alive; /* when (in jiffies) I was last + * alive */ + lnet_process_id_t ksnp_id; /* who's on the other end(s) */ + atomic_t ksnp_refcount; /* # users */ + int ksnp_sharecount; /* lconf usage counter */ + int ksnp_closing; /* being closed */ + int ksnp_accepting; /* # passive connections pending + */ + int ksnp_error; /* errno on closing last conn */ + __u64 ksnp_zc_next_cookie; /* ZC completion cookie */ + __u64 ksnp_incarnation; /* latest known peer incarnation + */ + struct ksock_proto *ksnp_proto; /* latest known peer protocol */ + struct list_head ksnp_conns; /* all active connections */ + struct list_head ksnp_routes; /* routes */ + struct list_head ksnp_tx_queue; /* waiting packets */ + spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ + struct list_head ksnp_zc_req_list; /* zero copy requests wait for + * ACK */ + unsigned long ksnp_send_keepalive; /* time to send keepalive */ + lnet_ni_t *ksnp_ni; /* which network */ + int ksnp_n_passive_ips; /* # of... */ + + /* preferred local interfaces */ + __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; } ksock_peer_t; typedef struct ksock_connreq { - struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */ - lnet_ni_t *ksncr_ni; /* chosen NI */ - struct socket *ksncr_sock; /* accepted socket */ + struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */ + lnet_ni_t *ksncr_ni; /* chosen NI */ + struct socket *ksncr_sock; /* accepted socket */ } ksock_connreq_t; extern ksock_nal_data_t ksocknal_data; extern ksock_tunables_t ksocknal_tunables; -#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ -#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ -#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */ +#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ +#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ +#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not + * preferred */ typedef struct ksock_proto { - int pro_version; /* version number of protocol */ - int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); /* handshake function */ - int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);/* handshake function */ - void (*pro_pack)(ksock_tx_t *); /* message pack */ - void (*pro_unpack)(ksock_msg_t *); /* message unpack */ - ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */ - int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); /* queue ZC ack on the connection */ - int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); /* handle ZC request */ - int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); /* handle ZC ACK */ - int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); /* msg type matches the connection type: - * return value: - * return MATCH_NO : no - * return MATCH_YES : matching type - * return MATCH_MAY : can be backup */ + /* version number of protocol */ + int pro_version; + + /* handshake function */ + int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); + + /* handshake function */ + int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int); + + /* message pack */ + void (*pro_pack)(ksock_tx_t *); + + /* message unpack */ + void (*pro_unpack)(ksock_msg_t *); + + /* queue tx on the connection */ + ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); + + /* queue ZC ack on the connection */ + int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); + + /* handle ZC request */ + int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); + + /* handle ZC ACK */ + int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); + + /* msg type matches the connection type: + * return value: + * return MATCH_NO : no + * return MATCH_YES : matching type + * return MATCH_MAY : can be backup */ + int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); } ksock_proto_t; extern ksock_proto_t ksocknal_protocol_v1x; extern ksock_proto_t ksocknal_protocol_v2x; extern ksock_proto_t ksocknal_protocol_v3x; -#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR -#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR -#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR +#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR +#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR +#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR #ifndef CPU_MASK_NONE #define CPU_MASK_NONE 0UL #endif +static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len) +{ +#if 1 + return crc32_le(crc, p, len); +#else + while (len-- > 0) + crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ; + return crc; +#endif +} + static inline int ksocknal_route_mask(void) { @@ -434,7 +533,7 @@ ksocknal_conn_decref(ksock_conn_t *conn) static inline int ksocknal_connsock_addref(ksock_conn_t *conn) { - int rc = -ESHUTDOWN; + int rc = -ESHUTDOWN; read_lock(&ksocknal_data.ksnd_global_lock); if (!conn->ksnc_closing) { @@ -453,7 +552,7 @@ ksocknal_connsock_decref(ksock_conn_t *conn) LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0); if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) { LASSERT(conn->ksnc_closing); - libcfs_sock_release(conn->ksnc_sock); + sock_release(conn->ksnc_sock); conn->ksnc_sock = NULL; ksocknal_finalize_zcreq(conn); } @@ -586,3 +685,5 @@ extern void ksocknal_lib_csum_tx(ksock_tx_t *tx); extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn); extern int ksocknal_lib_bind_thread_to_cpu(int id); + +#endif /* _SOCKLND_SOCKLND_H_ */ diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index fa7ad883b..fe2a83a54 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -75,13 +75,13 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) return NULL; } - tx->tx_conn = NULL; - tx->tx_lnetmsg = NULL; - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1; - tx->tx_nonblk = nonblk; + tx->tx_conn = NULL; + tx->tx_lnetmsg = NULL; + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_iov = tx->tx_frags.virt.iov; + tx->tx_niov = 1; + tx->tx_nonblk = nonblk; socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); tx->tx_msg.ksm_zc_cookies[1] = cookie; @@ -110,11 +110,11 @@ ksocknal_free_tx (ksock_tx_t *tx) static int ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { - struct kvec *iov = tx->tx_iov; - int nob; - int rc; + struct kvec *iov = tx->tx_iov; + int nob; + int rc; - LASSERT (tx->tx_niov > 0); + LASSERT(tx->tx_niov > 0); /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ rc = ksocknal_lib_send_iov(conn, tx); @@ -128,7 +128,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) /* "consume" iov */ do { - LASSERT (tx->tx_niov > 0); + LASSERT(tx->tx_niov > 0); if (nob < (int) iov->iov_len) { iov->iov_base = (void *)((char *)iov->iov_base + nob); @@ -147,12 +147,12 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) static int ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { - lnet_kiov_t *kiov = tx->tx_kiov; - int nob; - int rc; + lnet_kiov_t *kiov = tx->tx_kiov; + int nob; + int rc; - LASSERT (tx->tx_niov == 0); - LASSERT (tx->tx_nkiov > 0); + LASSERT(tx->tx_niov == 0); + LASSERT(tx->tx_nkiov > 0); /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ rc = ksocknal_lib_send_kiov(conn, tx); @@ -185,15 +185,15 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) static int ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; - int bufnob; + int rc; + int bufnob; if (ksocknal_data.ksnd_stall_tx != 0) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); } - LASSERT (tx->tx_resid != 0); + LASSERT(tx->tx_resid != 0); rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -252,10 +252,10 @@ static int ksocknal_recv_iov (ksock_conn_t *conn) { struct kvec *iov = conn->ksnc_rx_iov; - int nob; - int rc; + int nob; + int rc; - LASSERT (conn->ksnc_rx_niov > 0); + LASSERT(conn->ksnc_rx_niov > 0); /* Never touch conn->ksnc_rx_iov or change connection * status inside ksocknal_lib_recv_iov */ @@ -277,7 +277,7 @@ ksocknal_recv_iov (ksock_conn_t *conn) conn->ksnc_rx_nob_left -= nob; do { - LASSERT (conn->ksnc_rx_niov > 0); + LASSERT(conn->ksnc_rx_niov > 0); if (nob < (int)iov->iov_len) { iov->iov_len -= nob; @@ -296,10 +296,10 @@ ksocknal_recv_iov (ksock_conn_t *conn) static int ksocknal_recv_kiov (ksock_conn_t *conn) { - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; - int nob; - int rc; - LASSERT (conn->ksnc_rx_nkiov > 0); + lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + int nob; + int rc; + LASSERT(conn->ksnc_rx_nkiov > 0); /* Never touch conn->ksnc_rx_kiov or change connection * status inside ksocknal_lib_recv_iov */ @@ -321,7 +321,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) conn->ksnc_rx_nob_left -= nob; do { - LASSERT (conn->ksnc_rx_nkiov > 0); + LASSERT(conn->ksnc_rx_nkiov > 0); if (nob < (int) kiov->kiov_len) { kiov->kiov_offset += nob; @@ -343,7 +343,7 @@ ksocknal_receive (ksock_conn_t *conn) /* Return 1 on success, 0 on EOF, < 0 on error. * Caller checks ksnc_rx_nob_wanted to determine * progress/completion. */ - int rc; + int rc; if (ksocknal_data.ksnd_stall_rx != 0) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -388,8 +388,8 @@ ksocknal_receive (ksock_conn_t *conn) void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) { - lnet_msg_t *lnetmsg = tx->tx_lnetmsg; - int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; + lnet_msg_t *lnetmsg = tx->tx_lnetmsg; + int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; LASSERT(ni != NULL || tx->tx_conn != NULL); @@ -410,7 +410,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) ksock_tx_t *tx; while (!list_empty (txlist)) { - tx = list_entry (txlist->next, ksock_tx_t, tx_list); + tx = list_entry(txlist->next, ksock_tx_t, tx_list); if (error && tx->tx_lnetmsg != NULL) { CNETERR("Deleting packet type %d len %d %s->%s\n", @@ -422,18 +422,18 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) CNETERR("Deleting noop packet\n"); } - list_del (&tx->tx_list); + list_del(&tx->tx_list); - LASSERT (atomic_read(&tx->tx_refcount) == 1); - ksocknal_tx_done (ni, tx); + LASSERT(atomic_read(&tx->tx_refcount) == 1); + ksocknal_tx_done(ni, tx); } } static void ksocknal_check_zc_req(ksock_tx_t *tx) { - ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + ksock_conn_t *conn = tx->tx_conn; + ksock_peer_t *peer = conn->ksnc_peer; /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent @@ -441,8 +441,8 @@ ksocknal_check_zc_req(ksock_tx_t *tx) * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ - LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT (tx->tx_zc_capable); + LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + LASSERT(tx->tx_zc_capable); tx->tx_zc_checked = 1; @@ -461,7 +461,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx) tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; @@ -476,7 +476,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx) static void ksocknal_uncheck_zc_req(ksock_tx_t *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + ksock_peer_t *peer = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); @@ -502,14 +502,14 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) static int ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; + int rc; if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_check_zc_req(tx); rc = ksocknal_transmit (conn, tx); - CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); + CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc); if (tx->tx_resid == 0) { /* Sent everything OK */ @@ -546,7 +546,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } /* Actual error */ - LASSERT (rc < 0); + LASSERT(rc < 0); if (!conn->ksnc_closing) { switch (rc) { @@ -582,9 +582,9 @@ ksocknal_launch_connection_locked (ksock_route_t *route) /* called holding write lock on ksnd_global_lock */ - LASSERT (!route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); - LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0); + LASSERT(!route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); + LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0); route->ksnr_scheduled = 1; /* scheduling conn for connd */ ksocknal_route_addref(route); /* extra ref for connd */ @@ -617,22 +617,22 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) ksock_conn_t * ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) { - struct list_head *tmp; - ksock_conn_t *conn; - ksock_conn_t *typed = NULL; - ksock_conn_t *fallback = NULL; - int tnob = 0; - int fnob = 0; + struct list_head *tmp; + ksock_conn_t *conn; + ksock_conn_t *typed = NULL; + ksock_conn_t *fallback = NULL; + int tnob = 0; + int fnob = 0; list_for_each (tmp, &peer->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); - int nob = atomic_read(&c->ksnc_tx_nob) + - c->ksnc_sock->sk->sk_wmem_queued; - int rc; + int nob = atomic_read(&c->ksnc_tx_nob) + + c->ksnc_sock->sk->sk_wmem_queued; + int rc; - LASSERT (!c->ksnc_closing); - LASSERT (c->ksnc_proto != NULL && - c->ksnc_proto->pro_match_tx != NULL); + LASSERT(!c->ksnc_closing); + LASSERT(c->ksnc_proto != NULL && + c->ksnc_proto->pro_match_tx != NULL); rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); @@ -656,7 +656,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) (fnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { fallback = c; - fnob = nob; + fnob = nob; } break; } @@ -685,9 +685,9 @@ void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; - ksock_msg_t *msg = &tx->tx_msg; - ksock_tx_t *ztx = NULL; - int bufnob = 0; + ksock_msg_t *msg = &tx->tx_msg; + ksock_tx_t *ztx = NULL; + int bufnob = 0; /* called holding global lock (read or irq-write) and caller may * not have dropped this lock between finding conn and calling me, @@ -708,11 +708,11 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) * * We always expect at least 1 mapped fragment containing the * complete ksocknal message header. */ - LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) + - lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == - (unsigned int)tx->tx_nob); - LASSERT (tx->tx_niov >= 1); - LASSERT (tx->tx_resid == tx->tx_nob); + LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) + + lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == + (unsigned int)tx->tx_nob); + LASSERT(tx->tx_niov >= 1); + LASSERT(tx->tx_resid == tx->tx_nob); CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type: @@ -739,8 +739,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) if (msg->ksm_type == KSOCK_MSG_NOOP) { /* The packet is noop ZC ACK, try to piggyback the ack_cookie * on a normal packet so I don't need to send it */ - LASSERT (msg->ksm_zc_cookies[1] != 0); - LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL); + LASSERT(msg->ksm_zc_cookies[1] != 0); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ @@ -748,8 +748,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } else { /* It's a normal packet - can it piggback a noop zc-ack that * has been queued already? */ - LASSERT (msg->ksm_zc_cookies[1] == 0); - LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL); + LASSERT(msg->ksm_zc_cookies[1] == 0); + LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL); ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); /* ztx will be released later */ @@ -777,14 +777,14 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) ksock_route_t * ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { - unsigned long now = cfs_time_current(); - struct list_head *tmp; + unsigned long now = cfs_time_current(); + struct list_head *tmp; ksock_route_t *route; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); if (route->ksnr_scheduled) /* connections being established */ continue; @@ -813,13 +813,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) ksock_route_t * ksocknal_find_connecting_route_locked (ksock_peer_t *peer) { - struct list_head *tmp; - ksock_route_t *route; + struct list_head *tmp; + ksock_route_t *route; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); if (route->ksnr_scheduled) return route; @@ -831,13 +831,13 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) int ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { - ksock_peer_t *peer; - ksock_conn_t *conn; - rwlock_t *g_lock; - int retry; - int rc; + ksock_peer_t *peer; + ksock_conn_t *conn; + rwlock_t *g_lock; + int retry; + int rc; - LASSERT (tx->tx_conn == NULL); + LASSERT(tx->tx_conn == NULL); g_lock = &ksocknal_data.ksnd_global_lock; @@ -922,17 +922,17 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - int mpflag = 1; - int type = lntmsg->msg_type; + int mpflag = 1; + int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - ksock_tx_t *tx; - int desc_size; - int rc; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + ksock_tx_t *tx; + int desc_size; + int rc; /* NB 'private' is different depending on what we're sending. * Just ignore it... */ @@ -940,8 +940,8 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); LASSERT (!in_interrupt ()); @@ -1028,9 +1028,9 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; - int nob; - unsigned int niov; - int skipped; + int nob; + unsigned int niov; + int skipped; LASSERT(conn->ksnc_proto != NULL); @@ -1063,7 +1063,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); break; default: @@ -1108,18 +1108,18 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) static int ksocknal_process_receive (ksock_conn_t *conn) { - lnet_hdr_t *lhdr; + lnet_hdr_t *lhdr; lnet_process_id_t *id; - int rc; + int rc; LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); /* NB: sched lock NOT held */ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_SLOP); + LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_SLOP); again: if (conn->ksnc_rx_nob_wanted != 0) { rc = ksocknal_receive(conn); @@ -1229,7 +1229,7 @@ ksocknal_process_receive (ksock_conn_t *conn) if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { /* Userspace peer */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; + id = &conn->ksnc_peer->ksnp_id; /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); @@ -1277,7 +1277,7 @@ ksocknal_process_receive (ksock_conn_t *conn) LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; + id = &conn->ksnc_peer->ksnp_id; rc = conn->ksnc_proto->pro_handle_zcreq(conn, conn->ksnc_msg.ksm_zc_cookies[0], @@ -1305,7 +1305,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } /* Not Reached */ - LBUG (); + LBUG(); return -EINVAL; /* keep gcc happy */ } @@ -1314,15 +1314,15 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - ksock_conn_t *conn = (ksock_conn_t *)private; + ksock_conn_t *conn = (ksock_conn_t *)private; ksock_sched_t *sched = conn->ksnc_scheduler; - LASSERT (mlen <= rlen); - LASSERT (niov <= LNET_MAX_IOV); + LASSERT(mlen <= rlen); + LASSERT(niov <= LNET_MAX_IOV); conn->ksnc_cookie = msg; conn->ksnc_rx_nob_wanted = mlen; - conn->ksnc_rx_nob_left = rlen; + conn->ksnc_rx_nob_left = rlen; if (mlen == 0 || iov != NULL) { conn->ksnc_rx_nkiov = 0; @@ -1333,18 +1333,18 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, niov, iov, offset, mlen); } else { conn->ksnc_rx_niov = 0; - conn->ksnc_rx_iov = NULL; + conn->ksnc_rx_iov = NULL; conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; conn->ksnc_rx_nkiov = lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, niov, kiov, offset, mlen); } - LASSERT (mlen == - lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + - lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); + LASSERT(mlen == + lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) + + lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); - LASSERT (conn->ksnc_rx_scheduled); + LASSERT(conn->ksnc_rx_scheduled); spin_lock_bh(&sched->kss_lock); @@ -1370,7 +1370,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, static inline int ksocknal_sched_cansleep(ksock_sched_t *sched) { - int rc; + int rc; spin_lock_bh(&sched->kss_lock); @@ -1384,13 +1384,13 @@ ksocknal_sched_cansleep(ksock_sched_t *sched) int ksocknal_scheduler(void *arg) { - struct ksock_sched_info *info; - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; - int rc; - int nloops = 0; - long id = (long)arg; + struct ksock_sched_info *info; + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; + int rc; + int nloops = 0; + long id = (long)arg; info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)]; sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; @@ -1455,7 +1455,7 @@ int ksocknal_scheduler(void *arg) } if (!list_empty (&sched->kss_tx_conns)) { - LIST_HEAD (zlist); + LIST_HEAD(zlist); if (!list_empty(&sched->kss_zombie_noop_txs)) { list_add(&zlist, @@ -1513,9 +1513,9 @@ int ksocknal_scheduler(void *arg) /* Do nothing; after a short timeout, this * conn will be reposted on kss_tx_conns. */ } else if (conn->ksnc_tx_ready && - !list_empty (&conn->ksnc_tx_queue)) { + !list_empty(&conn->ksnc_tx_queue)) { /* reschedule for tx */ - list_add_tail (&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; @@ -1606,7 +1606,7 @@ void ksocknal_write_callback (ksock_conn_t *conn) static ksock_proto_t * ksocknal_parse_proto_version (ksock_hello_msg_t *hello) { - __u32 version = 0; + __u32 version = 0; if (hello->kshm_magic == LNET_PROTO_MAGIC) version = hello->kshm_version; @@ -1634,8 +1634,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; - CLASSERT (sizeof (lnet_magicversion_t) == - offsetof (ksock_hello_msg_t, kshm_src_nid)); + CLASSERT(sizeof (lnet_magicversion_t) == + offsetof (ksock_hello_msg_t, kshm_src_nid)); if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) @@ -1650,19 +1650,19 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ - ksock_net_t *net = (ksock_net_t *)ni->ni_data; + ksock_net_t *net = (ksock_net_t *)ni->ni_data; - LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES); + LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); /* rely on caller to hold a ref on socket so it wouldn't disappear */ - LASSERT (conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto != NULL); - hello->kshm_src_nid = ni->ni_nid; - hello->kshm_dst_nid = peer_nid; - hello->kshm_src_pid = the_lnet.ln_pid; + hello->kshm_src_nid = ni->ni_nid; + hello->kshm_dst_nid = peer_nid; + hello->kshm_src_pid = the_lnet.ln_pid; hello->kshm_src_incarnation = net->ksnn_incarnation; - hello->kshm_ctype = conn->ksnc_type; + hello->kshm_ctype = conn->ksnc_type; return conn->ksnc_proto->pro_send_hello(conn, hello); } @@ -1693,21 +1693,21 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, * EALREADY lost connection race * EPROTO protocol version mismatch */ - struct socket *sock = conn->ksnc_sock; - int active = (conn->ksnc_proto != NULL); - int timeout; - int proto_match; - int rc; - ksock_proto_t *proto; - lnet_process_id_t recv_id; + struct socket *sock = conn->ksnc_sock; + int active = (conn->ksnc_proto != NULL); + int timeout; + int proto_match; + int rc; + ksock_proto_t *proto; + lnet_process_id_t recv_id; /* socket type set on active connections - not set on passive */ - LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); + LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); timeout = active ? *ksocknal_tunables.ksnd_timeout : lnet_acceptor_timeout(); - rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); + rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -1726,12 +1726,12 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, return -EPROTO; } - rc = libcfs_sock_read(sock, &hello->kshm_version, - sizeof(hello->kshm_version), timeout); + rc = lnet_sock_read(sock, &hello->kshm_version, + sizeof(hello->kshm_version), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + LASSERT(rc < 0); return rc; } @@ -1765,7 +1765,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (rc != 0) { CERROR("Error %d reading or checking hello from from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + LASSERT(rc < 0); return rc; } @@ -1830,22 +1830,22 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, static int ksocknal_connect (ksock_route_t *route) { - LIST_HEAD (zombies); - ksock_peer_t *peer = route->ksnr_peer; - int type; - int wanted; - struct socket *sock; - unsigned long deadline; - int retry_later = 0; - int rc = 0; + LIST_HEAD(zombies); + ksock_peer_t *peer = route->ksnr_peer; + int type; + int wanted; + struct socket *sock; + unsigned long deadline; + int retry_later = 0; + int rc = 0; deadline = cfs_time_add(cfs_time_current(), cfs_time_seconds(*ksocknal_tunables.ksnd_timeout)); write_lock_bh(&ksocknal_data.ksnd_global_lock); - LASSERT (route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); + LASSERT(route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); route->ksnr_connecting = 1; @@ -2101,7 +2101,7 @@ static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { ksock_route_t *route; - unsigned long now; + unsigned long now; now = cfs_time_current(); @@ -2124,13 +2124,13 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) int ksocknal_connd (void *arg) { - spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; - ksock_connreq_t *cr; - wait_queue_t wait; - int nloops = 0; - int cons_retry = 0; + spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; + ksock_connreq_t *cr; + wait_queue_t wait; + int nloops = 0; + int cons_retry = 0; - cfs_block_allsigs (); + cfs_block_allsigs(); init_waitqueue_entry(&wait, current); @@ -2144,7 +2144,7 @@ ksocknal_connd (void *arg) ksock_route_t *route = NULL; long sec = get_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; + int dropped_lock = 0; if (ksocknal_connd_check_stop(sec, &timeout)) { /* wakeup another one to check stop */ @@ -2236,15 +2236,15 @@ static ksock_conn_t * ksocknal_find_timed_out_conn (ksock_peer_t *peer) { /* We're called with a shared lock on ksnd_global_lock */ - ksock_conn_t *conn; - struct list_head *ctmp; + ksock_conn_t *conn; + struct list_head *ctmp; list_for_each (ctmp, &peer->ksnp_conns) { - int error; + int error; conn = list_entry (ctmp, ksock_conn_t, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ - LASSERT (!conn->ksnc_closing); + LASSERT(!conn->ksnc_closing); /* SOCK_ERROR will reset error code of socket in * some platform (like Darwin8.x) */ @@ -2313,8 +2313,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) static inline void ksocknal_flush_stale_txs(ksock_peer_t *peer) { - ksock_tx_t *tx; - LIST_HEAD (stale_txs); + ksock_tx_t *tx; + LIST_HEAD(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -2338,9 +2338,9 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) static int ksocknal_send_keepalive_locked(ksock_peer_t *peer) { - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ return 0; @@ -2399,10 +2399,10 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) static void ksocknal_check_peer_timeouts (int idx) { - struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; + ksock_peer_t *peer; + ksock_conn_t *conn; + ksock_tx_t *tx; again: /* NB. We expect to have a look at all the peers and not find any @@ -2411,9 +2411,9 @@ ksocknal_check_peer_timeouts (int idx) read_lock(&ksocknal_data.ksnd_global_lock); list_for_each_entry(peer, peers, ksnp_list) { - unsigned long deadline = 0; - int resid = 0; - int n = 0; + unsigned long deadline = 0; + int resid = 0; + int n = 0; if (ksocknal_send_keepalive_locked(peer) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2476,8 +2476,8 @@ ksocknal_check_peer_timeouts (int idx) tx = list_entry(peer->ksnp_zc_req_list.next, ksock_tx_t, tx_zc_list); deadline = tx->tx_deadline; - resid = tx->tx_resid; - conn = tx->tx_conn; + resid = tx->tx_resid; + conn = tx->tx_conn; ksocknal_conn_addref(conn); spin_unlock(&peer->ksnp_lock); @@ -2499,17 +2499,17 @@ ksocknal_check_peer_timeouts (int idx) int ksocknal_reaper (void *arg) { - wait_queue_t wait; - ksock_conn_t *conn; - ksock_sched_t *sched; - struct list_head enomem_conns; - int nenomem_conns; - long timeout; - int i; - int peer_index = 0; - unsigned long deadline = cfs_time_current(); - - cfs_block_allsigs (); + wait_queue_t wait; + ksock_conn_t *conn; + ksock_sched_t *sched; + struct list_head enomem_conns; + int nenomem_conns; + long timeout; + int i; + int peer_index = 0; + unsigned long deadline = cfs_time_current(); + + cfs_block_allsigs(); INIT_LIST_HEAD(&enomem_conns); init_waitqueue_entry(&wait, current); @@ -2580,7 +2580,7 @@ ksocknal_reaper (void *arg) cfs_time_current())) <= 0) { const int n = 4; const int p = 1; - int chunk = ksocknal_data.ksnd_peer_hash_size; + int chunk = ksocknal_data.ksnd_peer_hash_size; /* Time to check for timeouts on a few more peers: I do * checks every 'p' seconds on a proportion of the peer diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c deleted file mode 100644 index f5e8ab060..000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c +++ /dev/null @@ -1,714 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2011, 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#include "socklnd.h" - -int -ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) -{ - int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1, - &conn->ksnc_ipaddr, - &conn->ksnc_port); - - /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ - LASSERT(!conn->ksnc_closing); - - if (rc != 0) { - CERROR("Error %d getting sock peer IP\n", rc); - return rc; - } - - rc = libcfs_sock_getaddr(conn->ksnc_sock, 0, - &conn->ksnc_myipaddr, NULL); - if (rc != 0) { - CERROR("Error %d getting sock local IP\n", rc); - return rc; - } - - return 0; -} - -int -ksocknal_lib_zc_capable(ksock_conn_t *conn) -{ - int caps = conn->ksnc_sock->sk->sk_route_caps; - - if (conn->ksnc_proto == &ksocknal_protocol_v1x) - return 0; - - /* ZC if the socket supports scatter/gather and doesn't need software - * checksums */ - return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0); -} - -int -ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) -{ - struct socket *sock = conn->ksnc_sock; - int nob; - int rc; - - if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ - conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ - tx->tx_nob == tx->tx_resid && /* frist sending */ - tx->tx_msg.ksm_csum == 0) /* not checksummed */ - ksocknal_lib_csum_tx(tx); - - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - - { -#if SOCKNAL_SINGLE_FRAG_TX - struct kvec scratch; - struct kvec *scratchiov = &scratch; - unsigned int niov = 1; -#else - struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - unsigned int niov = tx->tx_niov; -#endif - struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; - int i; - - for (nob = i = 0; i < niov; i++) { - scratchiov[i] = tx->tx_iov[i]; - nob += scratchiov[i].iov_len; - } - - if (!list_empty(&conn->ksnc_tx_queue) || - nob < tx->tx_resid) - msg.msg_flags |= MSG_MORE; - - rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob); - } - return rc; -} - -int -ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) -{ - struct socket *sock = conn->ksnc_sock; - lnet_kiov_t *kiov = tx->tx_kiov; - int rc; - int nob; - - /* Not NOOP message */ - LASSERT(tx->tx_lnetmsg != NULL); - - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - if (tx->tx_msg.ksm_zc_cookies[0] != 0) { - /* Zero copy is enabled */ - struct sock *sk = sock->sk; - struct page *page = kiov->kiov_page; - int offset = kiov->kiov_offset; - int fragsize = kiov->kiov_len; - int msgflg = MSG_DONTWAIT; - - CDEBUG(D_NET, "page %p + offset %x for %d\n", - page, offset, kiov->kiov_len); - - if (!list_empty(&conn->ksnc_tx_queue) || - fragsize < tx->tx_resid) - msgflg |= MSG_MORE; - - if (sk->sk_prot->sendpage != NULL) { - rc = sk->sk_prot->sendpage(sk, page, - offset, fragsize, msgflg); - } else { - rc = cfs_tcp_sendpage(sk, page, offset, fragsize, - msgflg); - } - } else { -#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK - struct kvec scratch; - struct kvec *scratchiov = &scratch; - unsigned int niov = 1; -#else -#ifdef CONFIG_HIGHMEM -#warning "XXX risk of kmap deadlock on multiple frags..." -#endif - struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - unsigned int niov = tx->tx_nkiov; -#endif - struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; - int i; - - for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + - kiov[i].kiov_offset; - nob += scratchiov[i].iov_len = kiov[i].kiov_len; - } - - if (!list_empty(&conn->ksnc_tx_queue) || - nob < tx->tx_resid) - msg.msg_flags |= MSG_MORE; - - rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob); - - for (i = 0; i < niov; i++) - kunmap(kiov[i].kiov_page); - } - return rc; -} - -void -ksocknal_lib_eager_ack(ksock_conn_t *conn) -{ - int opt = 1; - struct socket *sock = conn->ksnc_sock; - - /* Remind the socket to ACK eagerly. If I don't, the socket might - * think I'm about to send something it could piggy-back the ACK - * on, introducing delay in completing zero-copy sends in my - * peer. */ - - kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, - (char *)&opt, sizeof(opt)); -} - -int -ksocknal_lib_recv_iov(ksock_conn_t *conn) -{ -#if SOCKNAL_SINGLE_FRAG_RX - struct kvec scratch; - struct kvec *scratchiov = &scratch; - unsigned int niov = 1; -#else - struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - unsigned int niov = conn->ksnc_rx_niov; -#endif - struct kvec *iov = conn->ksnc_rx_iov; - struct msghdr msg = { - .msg_flags = 0 - }; - int nob; - int i; - int rc; - int fragnob; - int sum; - __u32 saved_csum; - - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - LASSERT(niov > 0); - - for (nob = i = 0; i < niov; i++) { - scratchiov[i] = iov[i]; - nob += scratchiov[i].iov_len; - } - LASSERT(nob <= conn->ksnc_rx_nob_wanted); - - rc = kernel_recvmsg(conn->ksnc_sock, &msg, - scratchiov, niov, nob, MSG_DONTWAIT); - - saved_csum = 0; - if (conn->ksnc_proto == &ksocknal_protocol_v2x) { - saved_csum = conn->ksnc_msg.ksm_csum; - conn->ksnc_msg.ksm_csum = 0; - } - - if (saved_csum != 0) { - /* accumulate checksum */ - for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT(i < niov); - - fragnob = iov[i].iov_len; - if (fragnob > sum) - fragnob = sum; - - conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, - iov[i].iov_base, fragnob); - } - conn->ksnc_msg.ksm_csum = saved_csum; - } - - return rc; -} - -static void -ksocknal_lib_kiov_vunmap(void *addr) -{ - if (addr == NULL) - return; - - vunmap(addr); -} - -static void * -ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, - struct kvec *iov, struct page **pages) -{ - void *addr; - int nob; - int i; - - if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) - return NULL; - - LASSERT(niov <= LNET_MAX_IOV); - - if (niov < 2 || - niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags) - return NULL; - - for (nob = i = 0; i < niov; i++) { - if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) - return NULL; - - pages[i] = kiov[i].kiov_page; - nob += kiov[i].kiov_len; - } - - addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); - if (addr == NULL) - return NULL; - - iov->iov_base = addr + kiov[0].kiov_offset; - iov->iov_len = nob; - - return addr; -} - -int -ksocknal_lib_recv_kiov(ksock_conn_t *conn) -{ -#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK - struct kvec scratch; - struct kvec *scratchiov = &scratch; - struct page **pages = NULL; - unsigned int niov = 1; -#else -#ifdef CONFIG_HIGHMEM -#warning "XXX risk of kmap deadlock on multiple frags..." -#endif - struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs; - unsigned int niov = conn->ksnc_rx_nkiov; -#endif - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; - struct msghdr msg = { - .msg_flags = 0 - }; - int nob; - int i; - int rc; - void *base; - void *addr; - int sum; - int fragnob; - int n; - - /* NB we can't trust socket ops to either consume our iovs - * or leave them alone. */ - addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages); - if (addr != NULL) { - nob = scratchiov[0].iov_len; - n = 1; - - } else { - for (nob = i = 0; i < niov; i++) { - nob += scratchiov[i].iov_len = kiov[i].kiov_len; - scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + - kiov[i].kiov_offset; - } - n = niov; - } - - LASSERT(nob <= conn->ksnc_rx_nob_wanted); - - rc = kernel_recvmsg(conn->ksnc_sock, &msg, - (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT); - - if (conn->ksnc_msg.ksm_csum != 0) { - for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { - LASSERT(i < niov); - - /* Dang! have to kmap again because I have nowhere to stash the - * mapped address. But by doing it while the page is still - * mapped, the kernel just bumps the map count and returns me - * the address it stashed. */ - base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; - fragnob = kiov[i].kiov_len; - if (fragnob > sum) - fragnob = sum; - - conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, - base, fragnob); - - kunmap(kiov[i].kiov_page); - } - } - - if (addr != NULL) { - ksocknal_lib_kiov_vunmap(addr); - } else { - for (i = 0; i < niov; i++) - kunmap(kiov[i].kiov_page); - } - - return rc; -} - -void -ksocknal_lib_csum_tx(ksock_tx_t *tx) -{ - int i; - __u32 csum; - void *base; - - LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); - LASSERT(tx->tx_conn != NULL); - LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); - - tx->tx_msg.ksm_csum = 0; - - csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base, - tx->tx_iov[0].iov_len); - - if (tx->tx_kiov != NULL) { - for (i = 0; i < tx->tx_nkiov; i++) { - base = kmap(tx->tx_kiov[i].kiov_page) + - tx->tx_kiov[i].kiov_offset; - - csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len); - - kunmap(tx->tx_kiov[i].kiov_page); - } - } else { - for (i = 1; i < tx->tx_niov; i++) - csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base, - tx->tx_iov[i].iov_len); - } - - if (*ksocknal_tunables.ksnd_inject_csum_error) { - csum++; - *ksocknal_tunables.ksnd_inject_csum_error = 0; - } - - tx->tx_msg.ksm_csum = csum; -} - -int -ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) -{ - struct socket *sock = conn->ksnc_sock; - int len; - int rc; - - rc = ksocknal_connsock_addref(conn); - if (rc != 0) { - LASSERT(conn->ksnc_closing); - *txmem = *rxmem = *nagle = 0; - return -ESHUTDOWN; - } - - rc = libcfs_sock_getbuf(sock, txmem, rxmem); - if (rc == 0) { - len = sizeof(*nagle); - rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)nagle, &len); - } - - ksocknal_connsock_decref(conn); - - if (rc == 0) - *nagle = !*nagle; - else - *txmem = *rxmem = *nagle = 0; - - return rc; -} - -int -ksocknal_lib_setup_sock(struct socket *sock) -{ - int rc; - int option; - int keep_idle; - int keep_intvl; - int keep_count; - int do_keepalive; - struct linger linger; - - sock->sk->sk_allocation = GFP_NOFS; - - /* Ensure this socket aborts active sends immediately when we close - * it. */ - - linger.l_onoff = 0; - linger.l_linger = 0; - - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, - (char *)&linger, sizeof(linger)); - if (rc != 0) { - CERROR("Can't set SO_LINGER: %d\n", rc); - return rc; - } - - option = -1; - rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, - (char *)&option, sizeof(option)); - if (rc != 0) { - CERROR("Can't set SO_LINGER2: %d\n", rc); - return rc; - } - - if (!*ksocknal_tunables.ksnd_nagle) { - option = 1; - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, - (char *)&option, sizeof(option)); - if (rc != 0) { - CERROR("Can't disable nagle: %d\n", rc); - return rc; - } - } - - rc = libcfs_sock_setbuf(sock, - *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size); - if (rc != 0) { - CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", - *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size, rc); - return rc; - } - -/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */ - - /* snapshot tunables */ - keep_idle = *ksocknal_tunables.ksnd_keepalive_idle; - keep_count = *ksocknal_tunables.ksnd_keepalive_count; - keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl; - - do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); - - option = (do_keepalive ? 1 : 0); - rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, - (char *)&option, sizeof(option)); - if (rc != 0) { - CERROR("Can't set SO_KEEPALIVE: %d\n", rc); - return rc; - } - - if (!do_keepalive) - return 0; - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, - (char *)&keep_idle, sizeof(keep_idle)); - if (rc != 0) { - CERROR("Can't set TCP_KEEPIDLE: %d\n", rc); - return rc; - } - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, - (char *)&keep_intvl, sizeof(keep_intvl)); - if (rc != 0) { - CERROR("Can't set TCP_KEEPINTVL: %d\n", rc); - return rc; - } - - rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, - (char *)&keep_count, sizeof(keep_count)); - if (rc != 0) { - CERROR("Can't set TCP_KEEPCNT: %d\n", rc); - return rc; - } - - return 0; -} - -void -ksocknal_lib_push_conn(ksock_conn_t *conn) -{ - struct sock *sk; - struct tcp_sock *tp; - int nonagle; - int val = 1; - int rc; - - rc = ksocknal_connsock_addref(conn); - if (rc != 0) /* being shut down */ - return; - - sk = conn->ksnc_sock->sk; - tp = tcp_sk(sk); - - lock_sock(sk); - nonagle = tp->nonagle; - tp->nonagle = 1; - release_sock(sk); - - rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, - (char *)&val, sizeof(val)); - LASSERT(rc == 0); - - lock_sock(sk); - tp->nonagle = nonagle; - release_sock(sk); - - ksocknal_connsock_decref(conn); -} - -extern void ksocknal_read_callback(ksock_conn_t *conn); -extern void ksocknal_write_callback(ksock_conn_t *conn); -/* - * socket call back in Linux - */ -static void -ksocknal_data_ready(struct sock *sk) -{ - ksock_conn_t *conn; - - /* interleave correctly with closing sockets... */ - LASSERT(!in_irq()); - read_lock(&ksocknal_data.ksnd_global_lock); - - conn = sk->sk_user_data; - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ - LASSERT(sk->sk_data_ready != &ksocknal_data_ready); - sk->sk_data_ready(sk); - } else - ksocknal_read_callback(conn); - - read_unlock(&ksocknal_data.ksnd_global_lock); -} - -static void -ksocknal_write_space(struct sock *sk) -{ - ksock_conn_t *conn; - int wspace; - int min_wpace; - - /* interleave correctly with closing sockets... */ - LASSERT(!in_irq()); - read_lock(&ksocknal_data.ksnd_global_lock); - - conn = sk->sk_user_data; - wspace = SOCKNAL_WSPACE(sk); - min_wpace = SOCKNAL_MIN_WSPACE(sk); - - CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", - sk, wspace, min_wpace, conn, - (conn == NULL) ? "" : (conn->ksnc_tx_ready ? - " ready" : " blocked"), - (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? - " scheduled" : " idle"), - (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ? - " empty" : " queued")); - - if (conn == NULL) { /* raced with ksocknal_terminate_conn */ - LASSERT(sk->sk_write_space != &ksocknal_write_space); - sk->sk_write_space(sk); - - read_unlock(&ksocknal_data.ksnd_global_lock); - return; - } - - if (wspace >= min_wpace) { /* got enough space */ - ksocknal_write_callback(conn); - - /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the - * ENOMEM check in ksocknal_transmit is race-free (think about - * it). */ - - clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - } - - read_unlock(&ksocknal_data.ksnd_global_lock); -} - -void -ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn) -{ - conn->ksnc_saved_data_ready = sock->sk->sk_data_ready; - conn->ksnc_saved_write_space = sock->sk->sk_write_space; -} - -void -ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) -{ - sock->sk->sk_user_data = conn; - sock->sk->sk_data_ready = ksocknal_data_ready; - sock->sk->sk_write_space = ksocknal_write_space; - return; -} - -void -ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) -{ - /* Remove conn's network callbacks. - * NB I _have_ to restore the callback, rather than storing a noop, - * since the socket could survive past this module being unloaded!! */ - sock->sk->sk_data_ready = conn->ksnc_saved_data_ready; - sock->sk->sk_write_space = conn->ksnc_saved_write_space; - - /* A callback could be in progress already; they hold a read lock - * on ksnd_global_lock (to serialise with me) and NOOP if - * sk_user_data is NULL. */ - sock->sk->sk_user_data = NULL; - - return ; -} - -int -ksocknal_lib_memory_pressure(ksock_conn_t *conn) -{ - int rc = 0; - ksock_sched_t *sched; - - sched = conn->ksnc_scheduler; - spin_lock_bh(&sched->kss_lock); - - if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) && - !conn->ksnc_tx_ready) { - /* SOCK_NOSPACE is set when the socket fills - * and cleared in the write_space callback - * (which also sets ksnc_tx_ready). If - * SOCK_NOSPACE and ksnc_tx_ready are BOTH - * zero, I didn't fill the socket and - * write_space won't reschedule me, so I - * return -ENOMEM to get my caller to retry - * after a timeout */ - rc = -ENOMEM; - } - - spin_unlock_bh(&sched->kss_lock); - - return rc; -} diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h deleted file mode 100644 index f5563881b..000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_PORTAL_ALLOC - -#ifndef __LINUX_SOCKNAL_LIB_H__ -#define __LINUX_SOCKNAL_LIB_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include -#include - -#include "../../../include/linux/libcfs/libcfs.h" - -#include -static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len) -{ -#if 1 - return crc32_le(crc, p, len); -#else - while (len-- > 0) - crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ; - return crc; -#endif -} - -#define SOCKNAL_WSPACE(sk) sk_stream_wspace(sk) -#define SOCKNAL_MIN_WSPACE(sk) sk_stream_min_wspace(sk) - -/* assume one thread for each connection type */ -#define SOCKNAL_NSCHEDS 3 -#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) - -#endif diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c new file mode 100644 index 000000000..340706110 --- /dev/null +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -0,0 +1,710 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. + */ + +#include "socklnd.h" + +int +ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) +{ + int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr, + &conn->ksnc_port); + + /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ + LASSERT(!conn->ksnc_closing); + + if (rc != 0) { + CERROR("Error %d getting sock peer IP\n", rc); + return rc; + } + + rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL); + if (rc != 0) { + CERROR("Error %d getting sock local IP\n", rc); + return rc; + } + + return 0; +} + +int +ksocknal_lib_zc_capable(ksock_conn_t *conn) +{ + int caps = conn->ksnc_sock->sk->sk_route_caps; + + if (conn->ksnc_proto == &ksocknal_protocol_v1x) + return 0; + + /* ZC if the socket supports scatter/gather and doesn't need software + * checksums */ + return ((caps & NETIF_F_SG) != 0 && (caps & NETIF_F_ALL_CSUM) != 0); +} + +int +ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) +{ + struct socket *sock = conn->ksnc_sock; + int nob; + int rc; + + if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ + conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ + tx->tx_nob == tx->tx_resid && /* frist sending */ + tx->tx_msg.ksm_csum == 0) /* not checksummed */ + ksocknal_lib_csum_tx(tx); + + /* NB we can't trust socket ops to either consume our iovs + * or leave them alone. */ + + { +#if SOCKNAL_SINGLE_FRAG_TX + struct kvec scratch; + struct kvec *scratchiov = &scratch; + unsigned int niov = 1; +#else + struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; + unsigned int niov = tx->tx_niov; +#endif + struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; + int i; + + for (nob = i = 0; i < niov; i++) { + scratchiov[i] = tx->tx_iov[i]; + nob += scratchiov[i].iov_len; + } + + if (!list_empty(&conn->ksnc_tx_queue) || + nob < tx->tx_resid) + msg.msg_flags |= MSG_MORE; + + rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob); + } + return rc; +} + +int +ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) +{ + struct socket *sock = conn->ksnc_sock; + lnet_kiov_t *kiov = tx->tx_kiov; + int rc; + int nob; + + /* Not NOOP message */ + LASSERT(tx->tx_lnetmsg != NULL); + + /* NB we can't trust socket ops to either consume our iovs + * or leave them alone. */ + if (tx->tx_msg.ksm_zc_cookies[0] != 0) { + /* Zero copy is enabled */ + struct sock *sk = sock->sk; + struct page *page = kiov->kiov_page; + int offset = kiov->kiov_offset; + int fragsize = kiov->kiov_len; + int msgflg = MSG_DONTWAIT; + + CDEBUG(D_NET, "page %p + offset %x for %d\n", + page, offset, kiov->kiov_len); + + if (!list_empty(&conn->ksnc_tx_queue) || + fragsize < tx->tx_resid) + msgflg |= MSG_MORE; + + if (sk->sk_prot->sendpage != NULL) { + rc = sk->sk_prot->sendpage(sk, page, + offset, fragsize, msgflg); + } else { + rc = tcp_sendpage(sk, page, offset, fragsize, msgflg); + } + } else { +#if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK + struct kvec scratch; + struct kvec *scratchiov = &scratch; + unsigned int niov = 1; +#else +#ifdef CONFIG_HIGHMEM +#warning "XXX risk of kmap deadlock on multiple frags..." +#endif + struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; + unsigned int niov = tx->tx_nkiov; +#endif + struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; + int i; + + for (nob = i = 0; i < niov; i++) { + scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + + kiov[i].kiov_offset; + nob += scratchiov[i].iov_len = kiov[i].kiov_len; + } + + if (!list_empty(&conn->ksnc_tx_queue) || + nob < tx->tx_resid) + msg.msg_flags |= MSG_MORE; + + rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob); + + for (i = 0; i < niov; i++) + kunmap(kiov[i].kiov_page); + } + return rc; +} + +void +ksocknal_lib_eager_ack(ksock_conn_t *conn) +{ + int opt = 1; + struct socket *sock = conn->ksnc_sock; + + /* Remind the socket to ACK eagerly. If I don't, the socket might + * think I'm about to send something it could piggy-back the ACK + * on, introducing delay in completing zero-copy sends in my + * peer. */ + + kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK, + (char *)&opt, sizeof(opt)); +} + +int +ksocknal_lib_recv_iov(ksock_conn_t *conn) +{ +#if SOCKNAL_SINGLE_FRAG_RX + struct kvec scratch; + struct kvec *scratchiov = &scratch; + unsigned int niov = 1; +#else + struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; + unsigned int niov = conn->ksnc_rx_niov; +#endif + struct kvec *iov = conn->ksnc_rx_iov; + struct msghdr msg = { + .msg_flags = 0 + }; + int nob; + int i; + int rc; + int fragnob; + int sum; + __u32 saved_csum; + + /* NB we can't trust socket ops to either consume our iovs + * or leave them alone. */ + LASSERT(niov > 0); + + for (nob = i = 0; i < niov; i++) { + scratchiov[i] = iov[i]; + nob += scratchiov[i].iov_len; + } + LASSERT(nob <= conn->ksnc_rx_nob_wanted); + + rc = kernel_recvmsg(conn->ksnc_sock, &msg, + scratchiov, niov, nob, MSG_DONTWAIT); + + saved_csum = 0; + if (conn->ksnc_proto == &ksocknal_protocol_v2x) { + saved_csum = conn->ksnc_msg.ksm_csum; + conn->ksnc_msg.ksm_csum = 0; + } + + if (saved_csum != 0) { + /* accumulate checksum */ + for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { + LASSERT(i < niov); + + fragnob = iov[i].iov_len; + if (fragnob > sum) + fragnob = sum; + + conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, + iov[i].iov_base, fragnob); + } + conn->ksnc_msg.ksm_csum = saved_csum; + } + + return rc; +} + +static void +ksocknal_lib_kiov_vunmap(void *addr) +{ + if (addr == NULL) + return; + + vunmap(addr); +} + +static void * +ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, + struct kvec *iov, struct page **pages) +{ + void *addr; + int nob; + int i; + + if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) + return NULL; + + LASSERT(niov <= LNET_MAX_IOV); + + if (niov < 2 || + niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags) + return NULL; + + for (nob = i = 0; i < niov; i++) { + if ((kiov[i].kiov_offset != 0 && i > 0) || + (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) + return NULL; + + pages[i] = kiov[i].kiov_page; + nob += kiov[i].kiov_len; + } + + addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL); + if (addr == NULL) + return NULL; + + iov->iov_base = addr + kiov[0].kiov_offset; + iov->iov_len = nob; + + return addr; +} + +int +ksocknal_lib_recv_kiov(ksock_conn_t *conn) +{ +#if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK + struct kvec scratch; + struct kvec *scratchiov = &scratch; + struct page **pages = NULL; + unsigned int niov = 1; +#else +#ifdef CONFIG_HIGHMEM +#warning "XXX risk of kmap deadlock on multiple frags..." +#endif + struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; + struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs; + unsigned int niov = conn->ksnc_rx_nkiov; +#endif + lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + struct msghdr msg = { + .msg_flags = 0 + }; + int nob; + int i; + int rc; + void *base; + void *addr; + int sum; + int fragnob; + int n; + + /* NB we can't trust socket ops to either consume our iovs + * or leave them alone. */ + addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages); + if (addr != NULL) { + nob = scratchiov[0].iov_len; + n = 1; + + } else { + for (nob = i = 0; i < niov; i++) { + nob += scratchiov[i].iov_len = kiov[i].kiov_len; + scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + + kiov[i].kiov_offset; + } + n = niov; + } + + LASSERT(nob <= conn->ksnc_rx_nob_wanted); + + rc = kernel_recvmsg(conn->ksnc_sock, &msg, + (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT); + + if (conn->ksnc_msg.ksm_csum != 0) { + for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { + LASSERT(i < niov); + + /* Dang! have to kmap again because I have nowhere to + * stash the mapped address. But by doing it while the + * page is still mapped, the kernel just bumps the map + * count and returns me the address it stashed. */ + base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; + fragnob = kiov[i].kiov_len; + if (fragnob > sum) + fragnob = sum; + + conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum, + base, fragnob); + + kunmap(kiov[i].kiov_page); + } + } + + if (addr != NULL) { + ksocknal_lib_kiov_vunmap(addr); + } else { + for (i = 0; i < niov; i++) + kunmap(kiov[i].kiov_page); + } + + return rc; +} + +void +ksocknal_lib_csum_tx(ksock_tx_t *tx) +{ + int i; + __u32 csum; + void *base; + + LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); + LASSERT(tx->tx_conn != NULL); + LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); + + tx->tx_msg.ksm_csum = 0; + + csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base, + tx->tx_iov[0].iov_len); + + if (tx->tx_kiov != NULL) { + for (i = 0; i < tx->tx_nkiov; i++) { + base = kmap(tx->tx_kiov[i].kiov_page) + + tx->tx_kiov[i].kiov_offset; + + csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len); + + kunmap(tx->tx_kiov[i].kiov_page); + } + } else { + for (i = 1; i < tx->tx_niov; i++) + csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base, + tx->tx_iov[i].iov_len); + } + + if (*ksocknal_tunables.ksnd_inject_csum_error) { + csum++; + *ksocknal_tunables.ksnd_inject_csum_error = 0; + } + + tx->tx_msg.ksm_csum = csum; +} + +int +ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) +{ + struct socket *sock = conn->ksnc_sock; + int len; + int rc; + + rc = ksocknal_connsock_addref(conn); + if (rc != 0) { + LASSERT(conn->ksnc_closing); + *txmem = *rxmem = *nagle = 0; + return -ESHUTDOWN; + } + + rc = lnet_sock_getbuf(sock, txmem, rxmem); + if (rc == 0) { + len = sizeof(*nagle); + rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY, + (char *)nagle, &len); + } + + ksocknal_connsock_decref(conn); + + if (rc == 0) + *nagle = !*nagle; + else + *txmem = *rxmem = *nagle = 0; + + return rc; +} + +int +ksocknal_lib_setup_sock(struct socket *sock) +{ + int rc; + int option; + int keep_idle; + int keep_intvl; + int keep_count; + int do_keepalive; + struct linger linger; + + sock->sk->sk_allocation = GFP_NOFS; + + /* Ensure this socket aborts active sends immediately when we close + * it. */ + + linger.l_onoff = 0; + linger.l_linger = 0; + + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, + (char *)&linger, sizeof(linger)); + if (rc != 0) { + CERROR("Can't set SO_LINGER: %d\n", rc); + return rc; + } + + option = -1; + rc = kernel_setsockopt(sock, SOL_TCP, TCP_LINGER2, + (char *)&option, sizeof(option)); + if (rc != 0) { + CERROR("Can't set SO_LINGER2: %d\n", rc); + return rc; + } + + if (!*ksocknal_tunables.ksnd_nagle) { + option = 1; + + rc = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, + (char *)&option, sizeof(option)); + if (rc != 0) { + CERROR("Can't disable nagle: %d\n", rc); + return rc; + } + } + + rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size, + *ksocknal_tunables.ksnd_rx_buffer_size); + if (rc != 0) { + CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", + *ksocknal_tunables.ksnd_tx_buffer_size, + *ksocknal_tunables.ksnd_rx_buffer_size, rc); + return rc; + } + +/* TCP_BACKOFF_* sockopt tunables unsupported in stock kernels */ + + /* snapshot tunables */ + keep_idle = *ksocknal_tunables.ksnd_keepalive_idle; + keep_count = *ksocknal_tunables.ksnd_keepalive_count; + keep_intvl = *ksocknal_tunables.ksnd_keepalive_intvl; + + do_keepalive = (keep_idle > 0 && keep_count > 0 && keep_intvl > 0); + + option = (do_keepalive ? 1 : 0); + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, + (char *)&option, sizeof(option)); + if (rc != 0) { + CERROR("Can't set SO_KEEPALIVE: %d\n", rc); + return rc; + } + + if (!do_keepalive) + return 0; + + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE, + (char *)&keep_idle, sizeof(keep_idle)); + if (rc != 0) { + CERROR("Can't set TCP_KEEPIDLE: %d\n", rc); + return rc; + } + + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL, + (char *)&keep_intvl, sizeof(keep_intvl)); + if (rc != 0) { + CERROR("Can't set TCP_KEEPINTVL: %d\n", rc); + return rc; + } + + rc = kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, + (char *)&keep_count, sizeof(keep_count)); + if (rc != 0) { + CERROR("Can't set TCP_KEEPCNT: %d\n", rc); + return rc; + } + + return 0; +} + +void +ksocknal_lib_push_conn(ksock_conn_t *conn) +{ + struct sock *sk; + struct tcp_sock *tp; + int nonagle; + int val = 1; + int rc; + + rc = ksocknal_connsock_addref(conn); + if (rc != 0) /* being shut down */ + return; + + sk = conn->ksnc_sock->sk; + tp = tcp_sk(sk); + + lock_sock(sk); + nonagle = tp->nonagle; + tp->nonagle = 1; + release_sock(sk); + + rc = kernel_setsockopt(conn->ksnc_sock, SOL_TCP, TCP_NODELAY, + (char *)&val, sizeof(val)); + LASSERT(rc == 0); + + lock_sock(sk); + tp->nonagle = nonagle; + release_sock(sk); + + ksocknal_connsock_decref(conn); +} + +extern void ksocknal_read_callback(ksock_conn_t *conn); +extern void ksocknal_write_callback(ksock_conn_t *conn); +/* + * socket call back in Linux + */ +static void +ksocknal_data_ready(struct sock *sk) +{ + ksock_conn_t *conn; + + /* interleave correctly with closing sockets... */ + LASSERT(!in_irq()); + read_lock(&ksocknal_data.ksnd_global_lock); + + conn = sk->sk_user_data; + if (conn == NULL) { /* raced with ksocknal_terminate_conn */ + LASSERT(sk->sk_data_ready != &ksocknal_data_ready); + sk->sk_data_ready(sk); + } else + ksocknal_read_callback(conn); + + read_unlock(&ksocknal_data.ksnd_global_lock); +} + +static void +ksocknal_write_space(struct sock *sk) +{ + ksock_conn_t *conn; + int wspace; + int min_wpace; + + /* interleave correctly with closing sockets... */ + LASSERT(!in_irq()); + read_lock(&ksocknal_data.ksnd_global_lock); + + conn = sk->sk_user_data; + wspace = sk_stream_wspace(sk); + min_wpace = sk_stream_min_wspace(sk); + + CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", + sk, wspace, min_wpace, conn, + (conn == NULL) ? "" : (conn->ksnc_tx_ready ? + " ready" : " blocked"), + (conn == NULL) ? "" : (conn->ksnc_tx_scheduled ? + " scheduled" : " idle"), + (conn == NULL) ? "" : (list_empty(&conn->ksnc_tx_queue) ? + " empty" : " queued")); + + if (conn == NULL) { /* raced with ksocknal_terminate_conn */ + LASSERT(sk->sk_write_space != &ksocknal_write_space); + sk->sk_write_space(sk); + + read_unlock(&ksocknal_data.ksnd_global_lock); + return; + } + + if (wspace >= min_wpace) { /* got enough space */ + ksocknal_write_callback(conn); + + /* Clear SOCK_NOSPACE _after_ ksocknal_write_callback so the + * ENOMEM check in ksocknal_transmit is race-free (think about + * it). */ + + clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + } + + read_unlock(&ksocknal_data.ksnd_global_lock); +} + +void +ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn) +{ + conn->ksnc_saved_data_ready = sock->sk->sk_data_ready; + conn->ksnc_saved_write_space = sock->sk->sk_write_space; +} + +void +ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn) +{ + sock->sk->sk_user_data = conn; + sock->sk->sk_data_ready = ksocknal_data_ready; + sock->sk->sk_write_space = ksocknal_write_space; + return; +} + +void +ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) +{ + /* Remove conn's network callbacks. + * NB I _have_ to restore the callback, rather than storing a noop, + * since the socket could survive past this module being unloaded!! */ + sock->sk->sk_data_ready = conn->ksnc_saved_data_ready; + sock->sk->sk_write_space = conn->ksnc_saved_write_space; + + /* A callback could be in progress already; they hold a read lock + * on ksnd_global_lock (to serialise with me) and NOOP if + * sk_user_data is NULL. */ + sock->sk->sk_user_data = NULL; + + return ; +} + +int +ksocknal_lib_memory_pressure(ksock_conn_t *conn) +{ + int rc = 0; + ksock_sched_t *sched; + + sched = conn->ksnc_scheduler; + spin_lock_bh(&sched->kss_lock); + + if (!test_bit(SOCK_NOSPACE, &conn->ksnc_sock->flags) && + !conn->ksnc_tx_ready) { + /* SOCK_NOSPACE is set when the socket fills + * and cleared in the write_space callback + * (which also sets ksnc_tx_ready). If + * SOCK_NOSPACE and ksnc_tx_ready are BOTH + * zero, I didn't fill the socket and + * write_space won't reschedule me, so I + * return -ENOMEM to get my caller to retry + * after a timeout */ + rc = -ENOMEM; + } + + spin_unlock_bh(&sched->kss_lock); + + return rc; +} diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c index 86b88db1c..c3ac67698 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c @@ -145,40 +145,37 @@ ksock_tunables_t ksocknal_tunables; int ksocknal_tunables_init(void) { - /* initialize ksocknal_tunables structure */ - ksocknal_tunables.ksnd_timeout = &sock_timeout; - ksocknal_tunables.ksnd_nscheds = &nscheds; - ksocknal_tunables.ksnd_nconnds = &nconnds; - ksocknal_tunables.ksnd_nconnds_max = &nconnds_max; + ksocknal_tunables.ksnd_timeout = &sock_timeout; + ksocknal_tunables.ksnd_nscheds = &nscheds; + ksocknal_tunables.ksnd_nconnds = &nconnds; + ksocknal_tunables.ksnd_nconnds_max = &nconnds_max; ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms; ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms; - ksocknal_tunables.ksnd_eager_ack = &eager_ack; - ksocknal_tunables.ksnd_typed_conns = &typed_conns; - ksocknal_tunables.ksnd_min_bulk = &min_bulk; + ksocknal_tunables.ksnd_eager_ack = &eager_ack; + ksocknal_tunables.ksnd_typed_conns = &typed_conns; + ksocknal_tunables.ksnd_min_bulk = &min_bulk; ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size; ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size; - ksocknal_tunables.ksnd_nagle = &nagle; - ksocknal_tunables.ksnd_round_robin = &round_robin; - ksocknal_tunables.ksnd_keepalive = &keepalive; + ksocknal_tunables.ksnd_nagle = &nagle; + ksocknal_tunables.ksnd_round_robin = &round_robin; + ksocknal_tunables.ksnd_keepalive = &keepalive; ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle; ksocknal_tunables.ksnd_keepalive_count = &keepalive_count; ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl; - ksocknal_tunables.ksnd_credits = &credits; + ksocknal_tunables.ksnd_credits = &credits; ksocknal_tunables.ksnd_peertxcredits = &peer_credits; ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits; - ksocknal_tunables.ksnd_peertimeout = &peer_timeout; - ksocknal_tunables.ksnd_enable_csum = &enable_csum; + ksocknal_tunables.ksnd_peertimeout = &peer_timeout; + ksocknal_tunables.ksnd_enable_csum = &enable_csum; ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error; ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack; ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload; - ksocknal_tunables.ksnd_zc_recv = &zc_recv; + ksocknal_tunables.ksnd_zc_recv = &zc_recv; ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags; - - #if SOCKNAL_VERSION_DEBUG - ksocknal_tunables.ksnd_protocol = &protocol; + ksocknal_tunables.ksnd_protocol = &protocol; #endif if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10)) diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index 8596581f5..986bce4c9 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -52,7 +52,7 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg) void ksocknal_next_tx_carrier(ksock_conn_t *conn) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + ksock_tx_t *tx = conn->ksnc_tx_carrier; /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ LASSERT(!list_empty(&conn->ksnc_tx_queue)); @@ -119,7 +119,7 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, static ksock_tx_t * ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + ksock_tx_t *tx = conn->ksnc_tx_carrier; /* * Enqueue tx_msg: @@ -361,10 +361,10 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) static int ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) { - ksock_peer_t *peer = c->ksnc_peer; - ksock_conn_t *conn; - ksock_tx_t *tx; - int rc; + ksock_peer_t *peer = c->ksnc_peer; + ksock_conn_t *conn; + ksock_tx_t *tx; + int rc; read_lock(&ksocknal_data.ksnd_global_lock); @@ -405,11 +405,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) static int ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) { - ksock_peer_t *peer = conn->ksnc_peer; - ksock_tx_t *tx; - ksock_tx_t *tmp; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_tx_t *tx; + ksock_tx_t *tmp; LIST_HEAD(zlist); - int count; + int count; if (cookie1 == 0) cookie1 = cookie2; @@ -452,11 +452,11 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) static int ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) { - struct socket *sock = conn->ksnc_sock; - lnet_hdr_t *hdr; + struct socket *sock = conn->ksnc_sock; + lnet_hdr_t *hdr; lnet_magicversion_t *hmv; - int rc; - int i; + int rc; + int i; CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); @@ -470,7 +470,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) /* Re-organize V2.x message header to V1.x (lnet_hdr_t) * header and send out */ - hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); + hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); @@ -488,16 +488,14 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) LNET_UNLOCK(); } - hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); - hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); - hdr->type = cpu_to_le32 (LNET_MSG_HELLO); + hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); + hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); + hdr->type = cpu_to_le32 (LNET_MSG_HELLO); hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); - rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), - lnet_acceptor_timeout()); - + rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -511,9 +509,9 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); } - rc = libcfs_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); + rc = lnet_sock_write(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), + lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, @@ -529,7 +527,7 @@ static int ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; - int rc; + int rc; hello->kshm_magic = LNET_PROTO_MAGIC; hello->kshm_version = conn->ksnc_proto->pro_version; @@ -544,9 +542,8 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) LNET_UNLOCK(); } - rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), - lnet_acceptor_timeout()); - + rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), + lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -556,9 +553,9 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) if (hello->kshm_nips == 0) return 0; - rc = libcfs_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); + rc = lnet_sock_write(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), + lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, @@ -572,10 +569,10 @@ static int ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { - struct socket *sock = conn->ksnc_sock; - lnet_hdr_t *hdr; - int rc; - int i; + struct socket *sock = conn->ksnc_sock; + lnet_hdr_t *hdr; + int rc; + int i; LIBCFS_ALLOC(hdr, sizeof(*hdr)); if (hdr == NULL) { @@ -583,9 +580,9 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, return -ENOMEM; } - rc = libcfs_sock_read(sock, &hdr->src_nid, - sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), - timeout); + rc = lnet_sock_read(sock, &hdr->src_nid, + sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), + timeout); if (rc != 0) { CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -602,12 +599,12 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, goto out; } - hello->kshm_src_nid = le64_to_cpu(hdr->src_nid); - hello->kshm_src_pid = le32_to_cpu(hdr->src_pid); + hello->kshm_src_nid = le64_to_cpu(hdr->src_nid); + hello->kshm_src_pid = le32_to_cpu(hdr->src_pid); hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation); - hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type); - hello->kshm_nips = le32_to_cpu(hdr->payload_length) / - sizeof(__u32); + hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type); + hello->kshm_nips = le32_to_cpu(hdr->payload_length) / + sizeof(__u32); if (hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %pI4h\n", @@ -619,8 +616,8 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, if (hello->kshm_nips == 0) goto out; - rc = libcfs_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); + rc = lnet_sock_read(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { CERROR("Error %d reading IPs from ip %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -647,19 +644,19 @@ out: static int ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { - struct socket *sock = conn->ksnc_sock; - int rc; - int i; + struct socket *sock = conn->ksnc_sock; + int rc; + int i; if (hello->kshm_magic == LNET_PROTO_MAGIC) conn->ksnc_flip = 0; else conn->ksnc_flip = 1; - rc = libcfs_sock_read(sock, &hello->kshm_src_nid, - offsetof(ksock_hello_msg_t, kshm_ips) - - offsetof(ksock_hello_msg_t, kshm_src_nid), - timeout); + rc = lnet_sock_read(sock, &hello->kshm_src_nid, + offsetof(ksock_hello_msg_t, kshm_ips) - + offsetof(ksock_hello_msg_t, kshm_src_nid), + timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -687,8 +684,8 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout if (hello->kshm_nips == 0) return 0; - rc = libcfs_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); + rc = lnet_sock_read(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { CERROR("Error %d reading IPs from ip %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -746,9 +743,9 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx) static void ksocknal_unpack_msg_v1(ksock_msg_t *msg) { - msg->ksm_csum = 0; - msg->ksm_type = KSOCK_MSG_LNET; - msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; + msg->ksm_csum = 0; + msg->ksm_type = KSOCK_MSG_LNET; + msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; } static void @@ -758,40 +755,40 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg) } ksock_proto_t ksocknal_protocol_v1x = { - .pro_version = KSOCK_PROTO_V1, - .pro_send_hello = ksocknal_send_hello_v1, - .pro_recv_hello = ksocknal_recv_hello_v1, - .pro_pack = ksocknal_pack_msg_v1, - .pro_unpack = ksocknal_unpack_msg_v1, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1, - .pro_handle_zcreq = NULL, - .pro_handle_zcack = NULL, - .pro_queue_tx_zcack = NULL, - .pro_match_tx = ksocknal_match_tx + .pro_version = KSOCK_PROTO_V1, + .pro_send_hello = ksocknal_send_hello_v1, + .pro_recv_hello = ksocknal_recv_hello_v1, + .pro_pack = ksocknal_pack_msg_v1, + .pro_unpack = ksocknal_unpack_msg_v1, + .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1, + .pro_handle_zcreq = NULL, + .pro_handle_zcack = NULL, + .pro_queue_tx_zcack = NULL, + .pro_match_tx = ksocknal_match_tx }; ksock_proto_t ksocknal_protocol_v2x = { - .pro_version = KSOCK_PROTO_V2, - .pro_send_hello = ksocknal_send_hello_v2, - .pro_recv_hello = ksocknal_recv_hello_v2, - .pro_pack = ksocknal_pack_msg_v2, - .pro_unpack = ksocknal_unpack_msg_v2, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, - .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2, - .pro_handle_zcreq = ksocknal_handle_zcreq, - .pro_handle_zcack = ksocknal_handle_zcack, - .pro_match_tx = ksocknal_match_tx + .pro_version = KSOCK_PROTO_V2, + .pro_send_hello = ksocknal_send_hello_v2, + .pro_recv_hello = ksocknal_recv_hello_v2, + .pro_pack = ksocknal_pack_msg_v2, + .pro_unpack = ksocknal_unpack_msg_v2, + .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, + .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2, + .pro_handle_zcreq = ksocknal_handle_zcreq, + .pro_handle_zcack = ksocknal_handle_zcack, + .pro_match_tx = ksocknal_match_tx }; ksock_proto_t ksocknal_protocol_v3x = { - .pro_version = KSOCK_PROTO_V3, - .pro_send_hello = ksocknal_send_hello_v2, - .pro_recv_hello = ksocknal_recv_hello_v2, - .pro_pack = ksocknal_pack_msg_v2, - .pro_unpack = ksocknal_unpack_msg_v2, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, - .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3, - .pro_handle_zcreq = ksocknal_handle_zcreq, - .pro_handle_zcack = ksocknal_handle_zcack, - .pro_match_tx = ksocknal_match_tx_v3 + .pro_version = KSOCK_PROTO_V3, + .pro_send_hello = ksocknal_send_hello_v2, + .pro_recv_hello = ksocknal_recv_hello_v2, + .pro_pack = ksocknal_pack_msg_v2, + .pro_unpack = ksocknal_unpack_msg_v2, + .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, + .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3, + .pro_handle_zcreq = ksocknal_handle_zcreq, + .pro_handle_zcack = ksocknal_handle_zcack, + .pro_match_tx = ksocknal_match_tx_v3 }; diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile index 336b8ea4f..52492fb10 100644 --- a/drivers/staging/lustre/lnet/lnet/Makefile +++ b/drivers/staging/lustre/lnet/lnet/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_LNET) += lnet.o -lnet-y := api-ni.o config.o lib-me.o lib-msg.o lib-eq.o \ - lib-md.o lib-ptl.o lib-move.o module.o lo.o router.o \ - router_proc.o acceptor.o peer.o +lnet-y := api-ni.o config.o \ + lib-me.o lib-msg.o lib-eq.o lib-md.o lib-ptl.o \ + lib-socket.o lib-move.o module.o lo.o \ + router.o router_proc.o acceptor.o peer.o diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c index 72fd1bf70..99f8396f3 100644 --- a/drivers/staging/lustre/lnet/lnet/acceptor.c +++ b/drivers/staging/lustre/lnet/lnet/acceptor.c @@ -35,9 +35,9 @@ */ #define DEBUG_SUBSYSTEM S_LNET +#include #include "../../include/linux/lnet/lib-lnet.h" - static int accept_port = 988; static int accept_backlog = 127; static int accept_timeout = 5; @@ -143,10 +143,10 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, __u32 local_ip, __u32 peer_ip, int peer_port) { lnet_acceptor_connreq_t cr; - struct socket *sock; - int rc; - int port; - int fatal; + struct socket *sock; + int rc; + int port; + int fatal; CLASSERT(sizeof(cr) <= 16); /* not too big to be on the stack */ @@ -155,9 +155,8 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, --port) { /* Iterate through reserved ports. */ - rc = libcfs_sock_connect(&sock, &fatal, - local_ip, port, - peer_ip, peer_port); + rc = lnet_sock_connect(&sock, &fatal, local_ip, port, peer_ip, + peer_port); if (rc != 0) { if (fatal) goto failed; @@ -184,8 +183,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, lnet_net_unlock(LNET_LOCK_EX); } - rc = libcfs_sock_write(sock, &cr, sizeof(cr), - accept_timeout); + rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) goto failed_sock; @@ -197,7 +195,7 @@ lnet_connect(struct socket **sockp, lnet_nid_t peer_nid, goto failed; failed_sock: - libcfs_sock_release(sock); + sock_release(sock); failed: lnet_connect_console_error(rc, peer_nid, peer_ip, peer_port); return rc; @@ -211,16 +209,16 @@ static int lnet_accept(struct socket *sock, __u32 magic) { lnet_acceptor_connreq_t cr; - __u32 peer_ip; - int peer_port; - int rc; - int flip; - lnet_ni_t *ni; - char *str; + __u32 peer_ip; + int peer_port; + int rc; + int flip; + lnet_ni_t *ni; + char *str; LASSERT(sizeof(cr) <= 16); /* not too big for the stack */ - rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); + rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); LASSERT(rc == 0); /* we succeeded before */ if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) { @@ -234,8 +232,8 @@ lnet_accept(struct socket *sock, __u32 magic) memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; - rc = libcfs_sock_write(sock, &cr, sizeof(cr), - accept_timeout); + rc = lnet_sock_write(sock, &cr, sizeof(cr), + accept_timeout); if (rc != 0) CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n", @@ -245,8 +243,6 @@ lnet_accept(struct socket *sock, __u32 magic) if (magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) str = "'old' socknal/tcpnal"; - else if (lnet_accept_magic(magic, LNET_PROTO_RA_MAGIC)) - str = "'old' ranal"; else str = "unrecognised"; @@ -257,9 +253,8 @@ lnet_accept(struct socket *sock, __u32 magic) flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC); - rc = libcfs_sock_read(sock, &cr.acr_version, - sizeof(cr.acr_version), - accept_timeout); + rc = lnet_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version), + accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request version from %pI4h\n", rc, &peer_ip); @@ -280,19 +275,17 @@ lnet_accept(struct socket *sock, __u32 magic) cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; - rc = libcfs_sock_write(sock, &cr, sizeof(cr), - accept_timeout); - + rc = lnet_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n", peer_version, &peer_ip, rc); return -EPROTO; } - rc = libcfs_sock_read(sock, &cr.acr_nid, - sizeof(cr) - - offsetof(lnet_acceptor_connreq_t, acr_nid), - accept_timeout); + rc = lnet_sock_read(sock, &cr.acr_nid, + sizeof(cr) - + offsetof(lnet_acceptor_connreq_t, acr_nid), + accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request from %pI4h\n", rc, &peer_ip); @@ -333,18 +326,18 @@ static int lnet_acceptor(void *arg) { struct socket *newsock; - int rc; - __u32 magic; - __u32 peer_ip; - int peer_port; - int secure = (int)((long_ptr_t)arg); + int rc; + __u32 magic; + __u32 peer_ip; + int peer_port; + int secure = (int)((long_ptr_t)arg); LASSERT(lnet_acceptor_state.pta_sock == NULL); cfs_block_allsigs(); - rc = libcfs_sock_listen(&lnet_acceptor_state.pta_sock, - 0, accept_port, accept_backlog); + rc = lnet_sock_listen(&lnet_acceptor_state.pta_sock, 0, accept_port, + accept_backlog); if (rc != 0) { if (rc == -EADDRINUSE) LCONSOLE_ERROR_MSG(0x122, "Can't start acceptor on port %d: port already in use\n", @@ -367,7 +360,7 @@ lnet_acceptor(void *arg) while (!lnet_acceptor_state.pta_shutdown) { - rc = libcfs_sock_accept(&newsock, lnet_acceptor_state.pta_sock); + rc = lnet_sock_accept(&newsock, lnet_acceptor_state.pta_sock); if (rc != 0) { if (rc != -EAGAIN) { CWARN("Accept error %d: pausing...\n", rc); @@ -377,13 +370,13 @@ lnet_acceptor(void *arg) continue; } - /* maybe we're waken up with libcfs_sock_abort_accept() */ + /* maybe the LNet acceptor thread has been waken */ if (lnet_acceptor_state.pta_shutdown) { - libcfs_sock_release(newsock); + sock_release(newsock); break; } - rc = libcfs_sock_getaddr(newsock, 1, &peer_ip, &peer_port); + rc = lnet_sock_getaddr(newsock, 1, &peer_ip, &peer_port); if (rc != 0) { CERROR("Can't determine new connection's address\n"); goto failed; @@ -395,8 +388,8 @@ lnet_acceptor(void *arg) goto failed; } - rc = libcfs_sock_read(newsock, &magic, sizeof(magic), - accept_timeout); + rc = lnet_sock_read(newsock, &magic, sizeof(magic), + accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request from %pI4h\n", rc, &peer_ip); @@ -410,10 +403,10 @@ lnet_acceptor(void *arg) continue; failed: - libcfs_sock_release(newsock); + sock_release(newsock); } - libcfs_sock_release(lnet_acceptor_state.pta_sock); + sock_release(lnet_acceptor_state.pta_sock); lnet_acceptor_state.pta_sock = NULL; CDEBUG(D_NET, "Acceptor stopping\n"); @@ -444,7 +437,7 @@ accept2secure(const char *acc, long *sec) int lnet_acceptor_start(void) { - int rc; + int rc; long rc2; long secure; @@ -493,7 +486,7 @@ lnet_acceptor_stop(void) return; lnet_acceptor_state.pta_shutdown = 1; - libcfs_sock_abort_accept(lnet_acceptor_state.pta_sock); + wake_up_all(sk_sleep(lnet_acceptor_state.pta_sock->sk)); /* block until acceptor signals exit */ wait_for_completion(&lnet_acceptor_state.pta_signal); diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c index 4a14e5109..d14fe70a5 100644 --- a/drivers/staging/lustre/lnet/lnet/api-ni.c +++ b/drivers/staging/lustre/lnet/lnet/api-ni.c @@ -35,13 +35,14 @@ */ #define DEBUG_SUBSYSTEM S_LNET -#include "../../include/linux/lnet/lib-lnet.h" #include #include +#include "../../include/linux/lnet/lib-lnet.h" + #define D_LNI D_CONSOLE -lnet_t the_lnet; /* THE state of the network */ +lnet_t the_lnet; /* THE state of the network */ EXPORT_SYMBOL(the_lnet); @@ -70,8 +71,8 @@ lnet_get_routes(void) static char * lnet_get_networks(void) { - char *nets; - int rc; + char *nets; + int rc; if (*networks != 0 && *ip2nets != 0) { LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or 'ip2nets' but not both at once\n"); @@ -98,17 +99,11 @@ lnet_init_locks(void) mutex_init(&the_lnet.ln_api_mutex); } -static void -lnet_fini_locks(void) -{ -} - - static int lnet_create_remote_nets_table(void) { - int i; - struct list_head *hash; + int i; + struct list_head *hash; LASSERT(the_lnet.ln_remote_nets_hash == NULL); LASSERT(the_lnet.ln_remote_nets_hbits > 0); @@ -153,8 +148,6 @@ lnet_destroy_locks(void) cfs_percpt_lock_free(the_lnet.ln_net_lock); the_lnet.ln_net_lock = NULL; } - - lnet_fini_locks(); } static int @@ -273,8 +266,8 @@ static void lnet_assert_wire_constants(void) static lnd_t * lnet_find_lnd_by_type(int type) { - lnd_t *lnd; - struct list_head *tmp; + lnd_t *lnd; + struct list_head *tmp; /* holding lnd mutex */ list_for_each(tmp, &the_lnet.ln_lnds) { @@ -290,7 +283,7 @@ lnet_find_lnd_by_type(int type) void lnet_register_lnd(lnd_t *lnd) { - LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex); + mutex_lock(&the_lnet.ln_lnd_mutex); LASSERT(the_lnet.ln_init); LASSERT(libcfs_isknown_lnd(lnd->lnd_type)); @@ -301,14 +294,14 @@ lnet_register_lnd(lnd_t *lnd) CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type)); - LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex); + mutex_unlock(&the_lnet.ln_lnd_mutex); } EXPORT_SYMBOL(lnet_register_lnd); void lnet_unregister_lnd(lnd_t *lnd) { - LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex); + mutex_lock(&the_lnet.ln_lnd_mutex); LASSERT(the_lnet.ln_init); LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd); @@ -317,7 +310,7 @@ lnet_unregister_lnd(lnd_t *lnd) list_del(&lnd->lnd_list); CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type)); - LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex); + mutex_unlock(&the_lnet.ln_lnd_mutex); } EXPORT_SYMBOL(lnet_unregister_lnd); @@ -325,7 +318,7 @@ void lnet_counters_get(lnet_counters_t *counters) { lnet_counters_t *ctr; - int i; + int i; memset(counters, 0, sizeof(*counters)); @@ -353,7 +346,7 @@ void lnet_counters_reset(void) { lnet_counters_t *counters; - int i; + int i; lnet_net_lock(LNET_LOCK_EX); @@ -364,56 +357,6 @@ lnet_counters_reset(void) } EXPORT_SYMBOL(lnet_counters_reset); -#ifdef LNET_USE_LIB_FREELIST - -int -lnet_freelist_init(lnet_freelist_t *fl, int n, int size) -{ - char *space; - - LASSERT(n > 0); - - size += offsetof(lnet_freeobj_t, fo_contents); - - LIBCFS_ALLOC(space, n * size); - if (space == NULL) - return -ENOMEM; - - INIT_LIST_HEAD(&fl->fl_list); - fl->fl_objs = space; - fl->fl_nobjs = n; - fl->fl_objsize = size; - - do { - memset(space, 0, size); - list_add((struct list_head *)space, &fl->fl_list); - space += size; - } while (--n != 0); - - return 0; -} - -void -lnet_freelist_fini(lnet_freelist_t *fl) -{ - struct list_head *el; - int count; - - if (fl->fl_nobjs == 0) - return; - - count = 0; - for (el = fl->fl_list.next; el != &fl->fl_list; el = el->next) - count++; - - LASSERT(count == fl->fl_nobjs); - - LIBCFS_FREE(fl->fl_objs, fl->fl_nobjs * fl->fl_objsize); - memset(fl, 0, sizeof(*fl)); -} - -#endif /* LNET_USE_LIB_FREELIST */ - static __u64 lnet_create_interface_cookie(void) { @@ -441,7 +384,7 @@ lnet_res_type2str(int type) static void lnet_res_container_cleanup(struct lnet_res_container *rec) { - int count = 0; + int count = 0; if (rec->rec_type == 0) /* not set yet, it's uninitialized */ return; @@ -470,9 +413,6 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) count, lnet_res_type2str(rec->rec_type)); } -#ifdef LNET_USE_LIB_FREELIST - lnet_freelist_fini(&rec->rec_freelist); -#endif if (rec->rec_lh_hash != NULL) { LIBCFS_FREE(rec->rec_lh_hash, LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0])); @@ -483,23 +423,15 @@ lnet_res_container_cleanup(struct lnet_res_container *rec) } static int -lnet_res_container_setup(struct lnet_res_container *rec, - int cpt, int type, int objnum, int objsz) +lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type) { - int rc = 0; - int i; + int rc = 0; + int i; LASSERT(rec->rec_type == 0); rec->rec_type = type; INIT_LIST_HEAD(&rec->rec_active); - -#ifdef LNET_USE_LIB_FREELIST - memset(&rec->rec_freelist, 0, sizeof(rec->rec_freelist)); - rc = lnet_freelist_init(&rec->rec_freelist, objnum, objsz); - if (rc != 0) - goto out; -#endif rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type; /* Arbitrary choice of hash table size */ @@ -525,8 +457,8 @@ out: static void lnet_res_containers_destroy(struct lnet_res_container **recs) { - struct lnet_res_container *rec; - int i; + struct lnet_res_container *rec; + int i; cfs_percpt_for_each(rec, i, recs) lnet_res_container_cleanup(rec); @@ -535,12 +467,12 @@ lnet_res_containers_destroy(struct lnet_res_container **recs) } static struct lnet_res_container ** -lnet_res_containers_create(int type, int objnum, int objsz) +lnet_res_containers_create(int type) { - struct lnet_res_container **recs; - struct lnet_res_container *rec; - int rc; - int i; + struct lnet_res_container **recs; + struct lnet_res_container *rec; + int rc; + int i; recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec)); if (recs == NULL) { @@ -550,7 +482,7 @@ lnet_res_containers_create(int type, int objnum, int objsz) } cfs_percpt_for_each(rec, i, recs) { - rc = lnet_res_container_setup(rec, i, type, objnum, objsz); + rc = lnet_res_container_setup(rec, i, type); if (rc != 0) { lnet_res_containers_destroy(recs); return NULL; @@ -564,9 +496,9 @@ lnet_libhandle_t * lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie) { /* ALWAYS called with lnet_res_lock held */ - struct list_head *head; - lnet_libhandle_t *lh; - unsigned int hash; + struct list_head *head; + lnet_libhandle_t *lh; + unsigned int hash; if ((cookie & LNET_COOKIE_MASK) != rec->rec_type) return NULL; @@ -586,8 +518,8 @@ void lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh) { /* ALWAYS called with lnet_res_lock held */ - unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS; - unsigned int hash; + unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS; + unsigned int hash; lh->lh_cookie = rec->rec_lh_cookie; rec->rec_lh_cookie += 1 << ibits; @@ -605,7 +537,7 @@ lnet_prepare(lnet_pid_t requested_pid) { /* Prepare to bring up the network */ struct lnet_res_container **recs; - int rc = 0; + int rc = 0; LASSERT(the_lnet.ln_refcount == 0); @@ -643,13 +575,11 @@ lnet_prepare(lnet_pid_t requested_pid) goto failed; rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0, - LNET_COOKIE_TYPE_EQ, LNET_FL_MAX_EQS, - sizeof(lnet_eq_t)); + LNET_COOKIE_TYPE_EQ); if (rc != 0) goto failed; - recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME, LNET_FL_MAX_MES, - sizeof(lnet_me_t)); + recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME); if (recs == NULL) { rc = -ENOMEM; goto failed; @@ -657,8 +587,7 @@ lnet_prepare(lnet_pid_t requested_pid) the_lnet.ln_me_containers = recs; - recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD, LNET_FL_MAX_MDS, - sizeof(lnet_libmd_t)); + recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD); if (recs == NULL) { rc = -ENOMEM; goto failed; @@ -725,8 +654,8 @@ lnet_unprepare(void) lnet_ni_t * lnet_net2ni_locked(__u32 net, int cpt) { - struct list_head *tmp; - lnet_ni_t *ni; + struct list_head *tmp; + lnet_ni_t *ni; LASSERT(cpt != LNET_LOCK_EX); @@ -758,8 +687,8 @@ EXPORT_SYMBOL(lnet_net2ni); static unsigned int lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number) { - __u64 key = nid; - unsigned int val; + __u64 key = nid; + unsigned int val; LASSERT(number >= 1 && number <= LNET_CPT_NUMBER); @@ -801,8 +730,8 @@ lnet_cpt_of_nid_locked(lnet_nid_t nid) int lnet_cpt_of_nid(lnet_nid_t nid) { - int cpt; - int cpt2; + int cpt; + int cpt2; if (LNET_CPT_NUMBER == 1) return 0; /* the only one */ @@ -821,8 +750,8 @@ EXPORT_SYMBOL(lnet_cpt_of_nid); int lnet_islocalnet(__u32 net) { - struct lnet_ni *ni; - int cpt; + struct lnet_ni *ni; + int cpt; cpt = lnet_net_lock_current(); @@ -838,8 +767,8 @@ lnet_islocalnet(__u32 net) lnet_ni_t * lnet_nid2ni_locked(lnet_nid_t nid, int cpt) { - struct lnet_ni *ni; - struct list_head *tmp; + struct lnet_ni *ni; + struct list_head *tmp; LASSERT(cpt != LNET_LOCK_EX); @@ -858,8 +787,8 @@ lnet_nid2ni_locked(lnet_nid_t nid, int cpt) int lnet_islocalnid(lnet_nid_t nid) { - struct lnet_ni *ni; - int cpt; + struct lnet_ni *ni; + int cpt; cpt = lnet_net_lock_current(); ni = lnet_nid2ni_locked(nid, cpt); @@ -874,10 +803,10 @@ int lnet_count_acceptor_nis(void) { /* Return the # of NIs that need the acceptor. */ - int count = 0; - struct list_head *tmp; - struct lnet_ni *ni; - int cpt; + int count = 0; + struct list_head *tmp; + struct lnet_ni *ni; + int cpt; cpt = lnet_net_lock_current(); list_for_each(tmp, &the_lnet.ln_nis) { @@ -895,7 +824,7 @@ lnet_count_acceptor_nis(void) static int lnet_ni_tq_credits(lnet_ni_t *ni) { - int credits; + int credits; LASSERT(ni->ni_ncpts >= 1); @@ -912,9 +841,9 @@ lnet_ni_tq_credits(lnet_ni_t *ni) static void lnet_shutdown_lndnis(void) { - int i; - int islo; - lnet_ni_t *ni; + int i; + int islo; + lnet_ni_t *ni; /* NB called holding the global mutex */ @@ -968,8 +897,8 @@ lnet_shutdown_lndnis(void) * and shut them down in guaranteed thread context */ i = 2; while (!list_empty(&the_lnet.ln_nis_zombie)) { - int *ref; - int j; + int *ref; + int j; ni = list_entry(the_lnet.ln_nis_zombie.next, lnet_ni_t, ni_list); @@ -1029,15 +958,15 @@ lnet_shutdown_lndnis(void) static int lnet_startup_lndnis(void) { - lnd_t *lnd; - struct lnet_ni *ni; - struct lnet_tx_queue *tq; - struct list_head nilist; - int i; - int rc = 0; - int lnd_type; - int nicount = 0; - char *nets = lnet_get_networks(); + lnd_t *lnd; + struct lnet_ni *ni; + struct lnet_tx_queue *tq; + struct list_head nilist; + int i; + int rc = 0; + int lnd_type; + int nicount = 0; + char *nets = lnet_get_networks(); INIT_LIST_HEAD(&nilist); @@ -1063,18 +992,18 @@ lnet_startup_lndnis(void) goto failed; } - LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex); + mutex_lock(&the_lnet.ln_lnd_mutex); lnd = lnet_find_lnd_by_type(lnd_type); if (lnd == NULL) { - LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex); + mutex_unlock(&the_lnet.ln_lnd_mutex); rc = request_module("%s", libcfs_lnd2modname(lnd_type)); - LNET_MUTEX_LOCK(&the_lnet.ln_lnd_mutex); + mutex_lock(&the_lnet.ln_lnd_mutex); lnd = lnet_find_lnd_by_type(lnd_type); if (lnd == NULL) { - LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex); + mutex_unlock(&the_lnet.ln_lnd_mutex); CERROR("Can't load LND %s, module %s, rc=%d\n", libcfs_lnd2str(lnd_type), libcfs_lnd2modname(lnd_type), rc); @@ -1090,7 +1019,7 @@ lnet_startup_lndnis(void) rc = (lnd->lnd_startup)(ni); - LNET_MUTEX_UNLOCK(&the_lnet.ln_lnd_mutex); + mutex_unlock(&the_lnet.ln_lnd_mutex); if (rc != 0) { LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n", @@ -1172,16 +1101,16 @@ lnet_startup_lndnis(void) * Initialize LNet library. * * Only userspace program needs to call this function - it's automatically - * called in the kernel at module loading time. Caller has to call LNetFini() - * after a call to LNetInit(), if and only if the latter returned 0. It must + * called in the kernel at module loading time. Caller has to call lnet_fini() + * after a call to lnet_init(), if and only if the latter returned 0. It must * be called exactly once. * * \return 0 on success, and -ve on failures. */ int -LNetInit(void) +lnet_init(void) { - int rc; + int rc; lnet_assert_wire_constants(); LASSERT(!the_lnet.ln_init); @@ -1232,7 +1161,7 @@ LNetInit(void) lnet_register_lnd(&the_lolnd); return 0; } -EXPORT_SYMBOL(LNetInit); +EXPORT_SYMBOL(lnet_init); /** * Finalize LNet library. @@ -1240,11 +1169,11 @@ EXPORT_SYMBOL(LNetInit); * Only userspace program needs to call this function. It can be called * at most once. * - * \pre LNetInit() called with success. + * \pre lnet_init() called with success. * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls. */ void -LNetFini(void) +lnet_fini(void) { LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount == 0); @@ -1256,12 +1185,12 @@ LNetFini(void) the_lnet.ln_init = 0; } -EXPORT_SYMBOL(LNetFini); +EXPORT_SYMBOL(lnet_fini); /** * Set LNet PID and start LNet interfaces, routing, and forwarding. * - * Userspace program should call this after a successful call to LNetInit(). + * Userspace program should call this after a successful call to lnet_init(). * Users must call this function at least once before any other functions. * For each successful call there must be a corresponding call to * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is @@ -1277,10 +1206,10 @@ EXPORT_SYMBOL(LNetFini); int LNetNIInit(lnet_pid_t requested_pid) { - int im_a_router = 0; - int rc; + int im_a_router = 0; + int rc; - LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex); + mutex_lock(&the_lnet.ln_api_mutex); LASSERT(the_lnet.ln_init); CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount); @@ -1290,8 +1219,6 @@ LNetNIInit(lnet_pid_t requested_pid) goto out; } - lnet_get_tunables(); - if (requested_pid == LNET_PID_ANY) { /* Don't instantiate LNET just for me */ rc = -ENETDOWN; @@ -1351,7 +1278,7 @@ LNetNIInit(lnet_pid_t requested_pid) failed0: LASSERT(rc < 0); out: - LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex); + mutex_unlock(&the_lnet.ln_api_mutex); return rc; } EXPORT_SYMBOL(LNetNIInit); @@ -1368,7 +1295,7 @@ EXPORT_SYMBOL(LNetNIInit); int LNetNIFini(void) { - LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex); + mutex_lock(&the_lnet.ln_api_mutex); LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -1391,7 +1318,7 @@ LNetNIFini(void) lnet_unprepare(); } - LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex); + mutex_unlock(&the_lnet.ln_api_mutex); return 0; } EXPORT_SYMBOL(LNetNIFini); @@ -1413,9 +1340,9 @@ int LNetCtl(unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; - lnet_process_id_t id = {0}; - lnet_ni_t *ni; - int rc; + lnet_process_id_t id = {0}; + lnet_ni_t *ni; + int rc; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -1531,10 +1458,10 @@ EXPORT_SYMBOL(LNetCtl); int LNetGetId(unsigned int index, lnet_process_id_t *id) { - struct lnet_ni *ni; - struct list_head *tmp; - int cpt; - int rc = -ENOENT; + struct lnet_ni *ni; + struct list_head *tmp; + int cpt; + int rc = -ENOENT; LASSERT(the_lnet.ln_init); @@ -1575,11 +1502,11 @@ EXPORT_SYMBOL(LNetSnprintHandle); static int lnet_create_ping_info(void) { - int i; - int n; - int rc; - unsigned int infosz; - lnet_ni_t *ni; + int i; + int n; + int rc; + unsigned int infosz; + lnet_ni_t *ni; lnet_process_id_t id; lnet_ping_info_t *pinfo; @@ -1633,7 +1560,7 @@ lnet_create_ping_info(void) static void lnet_destroy_ping_info(void) { - struct lnet_ni *ni; + struct lnet_ni *ni; lnet_net_lock(0); @@ -1654,12 +1581,12 @@ lnet_destroy_ping_info(void) int lnet_ping_target_init(void) { - lnet_md_t md = { NULL }; - lnet_handle_me_t meh; + lnet_md_t md = { NULL }; + lnet_handle_me_t meh; lnet_process_id_t id; - int rc; - int rc2; - int infosz; + int rc; + int rc2; + int infosz; rc = lnet_create_ping_info(); if (rc != 0) @@ -1722,11 +1649,11 @@ lnet_ping_target_init(void) void lnet_ping_target_fini(void) { - lnet_event_t event; - int rc; - int which; - int timeout_ms = 1000; - sigset_t blocked = cfs_block_allsigs(); + lnet_event_t event; + int rc; + int which; + int timeout_ms = 1000; + sigset_t blocked = cfs_block_allsigs(); LNetMDUnlink(the_lnet.ln_ping_target_md); /* NB md could be busy; this just starts the unlink */ @@ -1759,22 +1686,22 @@ lnet_ping_target_fini(void) int lnet_ping(lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_ids) { - lnet_handle_eq_t eqh; - lnet_handle_md_t mdh; - lnet_event_t event; - lnet_md_t md = { NULL }; - int which; - int unlinked = 0; - int replied = 0; - const int a_long_time = 60000; /* mS */ - int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]); - lnet_ping_info_t *info; - lnet_process_id_t tmpid; - int i; - int nob; - int rc; - int rc2; - sigset_t blocked; + lnet_handle_eq_t eqh; + lnet_handle_md_t mdh; + lnet_event_t event; + lnet_md_t md = { NULL }; + int which; + int unlinked = 0; + int replied = 0; + const int a_long_time = 60000; /* mS */ + int infosz = offsetof(lnet_ping_info_t, pi_ni[n_ids]); + lnet_ping_info_t *info; + lnet_process_id_t tmpid; + int i; + int nob; + int rc; + int rc2; + sigset_t blocked; if (n_ids <= 0 || id.nid == LNET_NID_ANY || diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c index 2dc4c4a1a..9c576ce2f 100644 --- a/drivers/staging/lustre/lnet/lnet/config.c +++ b/drivers/staging/lustre/lnet/lnet/config.c @@ -38,9 +38,9 @@ #include "../../include/linux/lnet/lib-lnet.h" struct lnet_text_buf_t { /* tmp struct for parsing routes */ - struct list_head ltb_list; /* stash on lists */ - int ltb_size; /* allocated size */ - char ltb_text[0]; /* text buffer */ + struct list_head ltb_list; /* stash on lists */ + int ltb_size; /* allocated size */ + char ltb_text[0]; /* text buffer */ }; static int lnet_tbnob; /* track text buf allocation */ @@ -80,8 +80,8 @@ lnet_issep(char c) static int lnet_net_unique(__u32 net, struct list_head *nilist) { - struct list_head *tmp; - lnet_ni_t *ni; + struct list_head *tmp; + lnet_ni_t *ni; list_for_each(tmp, nilist) { ni = list_entry(tmp, lnet_ni_t, ni_list); @@ -111,10 +111,10 @@ lnet_ni_free(struct lnet_ni *ni) static lnet_ni_t * lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, struct list_head *nilist) { - struct lnet_tx_queue *tq; - struct lnet_ni *ni; - int rc; - int i; + struct lnet_tx_queue *tq; + struct lnet_ni *ni; + int rc; + int i; if (!lnet_net_unique(net, nilist)) { LCONSOLE_ERROR_MSG(0x111, "Duplicate network specified: %s\n", @@ -178,13 +178,13 @@ int lnet_parse_networks(struct list_head *nilist, char *networks) { struct cfs_expr_list *el = NULL; - int tokensize = strlen(networks) + 1; - char *tokens; - char *str; - char *tmp; - struct lnet_ni *ni; - __u32 net; - int nnets = 0; + int tokensize = strlen(networks) + 1; + char *tokens; + char *str; + char *tmp; + struct lnet_ni *ni; + __u32 net; + int nnets = 0; if (strlen(networks) > LNET_SINGLE_TEXTBUF_NOB) { /* _WAY_ conservative */ @@ -210,12 +210,12 @@ lnet_parse_networks(struct list_head *nilist, char *networks) goto failed; while (str != NULL && *str != 0) { - char *comma = strchr(str, ','); - char *bracket = strchr(str, '('); - char *square = strchr(str, '['); - char *iface; - int niface; - int rc; + char *comma = strchr(str, ','); + char *bracket = strchr(str, '('); + char *square = strchr(str, '['); + char *iface; + int niface; + int rc; /* NB we don't check interface conflicts here; it's the LNDs * responsibility (if it cares at all) */ @@ -369,7 +369,7 @@ static struct lnet_text_buf_t * lnet_new_text_buf(int str_len) { struct lnet_text_buf_t *ltb; - int nob; + int nob; /* NB allocate space for the terminating 0 */ nob = offsetof(struct lnet_text_buf_t, ltb_text[str_len + 1]); @@ -404,7 +404,7 @@ lnet_free_text_buf(struct lnet_text_buf_t *ltb) static void lnet_free_text_bufs(struct list_head *tbs) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf_t *ltb; while (!list_empty(tbs)) { ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list); @@ -417,11 +417,11 @@ lnet_free_text_bufs(struct list_head *tbs) static int lnet_str2tbs_sep(struct list_head *tbs, char *str) { - struct list_head pending; - char *sep; - int nob; - int i; - struct lnet_text_buf_t *ltb; + struct list_head pending; + char *sep; + int nob; + int i; + struct lnet_text_buf_t *ltb; INIT_LIST_HEAD(&pending); @@ -477,8 +477,8 @@ lnet_expand1tb(struct list_head *list, char *str, char *sep1, char *sep2, char *item, int itemlen) { - int len1 = (int)(sep1 - str); - int len2 = strlen(sep2 + 1); + int len1 = (int)(sep1 - str); + int len2 = strlen(sep2 + 1); struct lnet_text_buf_t *ltb; LASSERT(*sep1 == '['); @@ -500,18 +500,18 @@ lnet_expand1tb(struct list_head *list, static int lnet_str2tbs_expand(struct list_head *tbs, char *str) { - char num[16]; - struct list_head pending; - char *sep; - char *sep2; - char *parsed; - char *enditem; - int lo; - int hi; - int stride; - int i; - int nob; - int scanned; + char num[16]; + struct list_head pending; + char *sep; + char *sep2; + char *parsed; + char *enditem; + int lo; + int hi; + int stride; + int i; + int nob; + int scanned; INIT_LIST_HEAD(&pending); @@ -584,8 +584,8 @@ lnet_str2tbs_expand(struct list_head *tbs, char *str) static int lnet_parse_hops(char *str, unsigned int *hops) { - int len = strlen(str); - int nob = len; + int len = strlen(str); + int nob = len; return (sscanf(str, "%u%n", hops, &nob) >= 1 && nob == len && @@ -597,9 +597,9 @@ lnet_parse_hops(char *str, unsigned int *hops) static int lnet_parse_priority(char *str, unsigned int *priority, char **token) { - int nob; + int nob; char *sep; - int len; + int len; sep = strchr(str, LNET_PRIORITY_SEPARATOR); if (sep == NULL) { @@ -628,23 +628,23 @@ static int lnet_parse_route(char *str, int *im_a_router) { /* static scratch buffer OK (single threaded) */ - static char cmd[LNET_SINGLE_TEXTBUF_NOB]; - - struct list_head nets; - struct list_head gateways; - struct list_head *tmp1; - struct list_head *tmp2; - __u32 net; - lnet_nid_t nid; - struct lnet_text_buf_t *ltb; - int rc; - char *sep; - char *token = str; - int ntokens = 0; - int myrc = -1; - unsigned int hops; - int got_hops = 0; - unsigned int priority = 0; + static char cmd[LNET_SINGLE_TEXTBUF_NOB]; + + struct list_head nets; + struct list_head gateways; + struct list_head *tmp1; + struct list_head *tmp2; + __u32 net; + lnet_nid_t nid; + struct lnet_text_buf_t *ltb; + int rc; + char *sep; + char *token = str; + int ntokens = 0; + int myrc = -1; + unsigned int hops; + int got_hops = 0; + unsigned int priority = 0; INIT_LIST_HEAD(&gateways); INIT_LIST_HEAD(&nets); @@ -772,7 +772,7 @@ lnet_parse_route(char *str, int *im_a_router) static int lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router) { - struct lnet_text_buf_t *ltb; + struct lnet_text_buf_t *ltb; while (!list_empty(tbs)) { ltb = list_entry(tbs->next, struct lnet_text_buf_t, ltb_list); @@ -792,8 +792,8 @@ lnet_parse_route_tbs(struct list_head *tbs, int *im_a_router) int lnet_parse_routes(char *routes, int *im_a_router) { - struct list_head tbs; - int rc = 0; + struct list_head tbs; + int rc = 0; *im_a_router = 0; @@ -814,8 +814,8 @@ static int lnet_match_network_token(char *token, int len, __u32 *ipaddrs, int nip) { LIST_HEAD(list); - int rc; - int i; + int rc; + int i; rc = cfs_ip_addr_parse(token, len, &list); if (rc != 0) @@ -834,13 +834,13 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) { static char tokens[LNET_SINGLE_TEXTBUF_NOB]; - int matched = 0; - int ntokens = 0; - int len; + int matched = 0; + int ntokens = 0; + int len; char *net = NULL; char *sep; char *token; - int rc; + int rc; LASSERT(strlen(net_entry) < sizeof(tokens)); @@ -889,8 +889,8 @@ lnet_match_network_tokens(char *net_entry, __u32 *ipaddrs, int nip) static __u32 lnet_netspec2net(char *netspec) { - char *bracket = strchr(netspec, '('); - __u32 net; + char *bracket = strchr(netspec, '('); + __u32 net; if (bracket != NULL) *bracket = 0; @@ -906,15 +906,15 @@ lnet_netspec2net(char *netspec) static int lnet_splitnets(char *source, struct list_head *nets) { - int offset = 0; - int offset2; - int len; - struct lnet_text_buf_t *tb; - struct lnet_text_buf_t *tb2; - struct list_head *t; - char *sep; - char *bracket; - __u32 net; + int offset = 0; + int offset2; + int len; + struct lnet_text_buf_t *tb; + struct lnet_text_buf_t *tb2; + struct list_head *t; + char *sep; + char *bracket; + __u32 net; LASSERT(!list_empty(nets)); LASSERT(nets->next == nets->prev); /* single entry */ @@ -986,22 +986,22 @@ lnet_splitnets(char *source, struct list_head *nets) static int lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip) { - static char networks[LNET_SINGLE_TEXTBUF_NOB]; - static char source[LNET_SINGLE_TEXTBUF_NOB]; - - struct list_head raw_entries; - struct list_head matched_nets; - struct list_head current_nets; - struct list_head *t; - struct list_head *t2; - struct lnet_text_buf_t *tb; - struct lnet_text_buf_t *tb2; - __u32 net1; - __u32 net2; - int len; - int count; - int dup; - int rc; + static char networks[LNET_SINGLE_TEXTBUF_NOB]; + static char source[LNET_SINGLE_TEXTBUF_NOB]; + + struct list_head raw_entries; + struct list_head matched_nets; + struct list_head current_nets; + struct list_head *t; + struct list_head *t2; + struct lnet_text_buf_t *tb; + struct lnet_text_buf_t *tb2; + __u32 net1; + __u32 net2; + int len; + int count; + int dup; + int rc; INIT_LIST_HEAD(&raw_entries); if (lnet_str2tbs_sep(&raw_entries, ip2nets) < 0) { @@ -1112,15 +1112,15 @@ lnet_ipaddr_free_enumeration(__u32 *ipaddrs, int nip) static int lnet_ipaddr_enumerate(__u32 **ipaddrsp) { - int up; - __u32 netmask; - __u32 *ipaddrs; - __u32 *ipaddrs2; - int nip; - char **ifnames; - int nif = libcfs_ipif_enumerate(&ifnames); - int i; - int rc; + int up; + __u32 netmask; + __u32 *ipaddrs; + __u32 *ipaddrs2; + int nip; + char **ifnames; + int nif = lnet_ipif_enumerate(&ifnames); + int i; + int rc; if (nif <= 0) return nif; @@ -1128,7 +1128,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) LIBCFS_ALLOC(ipaddrs, nif * sizeof(*ipaddrs)); if (ipaddrs == NULL) { CERROR("Can't allocate ipaddrs[%d]\n", nif); - libcfs_ipif_free_enumeration(ifnames, nif); + lnet_ipif_free_enumeration(ifnames, nif); return -ENOMEM; } @@ -1136,8 +1136,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) if (!strcmp(ifnames[i], "lo")) continue; - rc = libcfs_ipif_query(ifnames[i], &up, - &ipaddrs[nip], &netmask); + rc = lnet_ipif_query(ifnames[i], &up, &ipaddrs[nip], &netmask); if (rc != 0) { CWARN("Can't query interface %s: %d\n", ifnames[i], rc); @@ -1153,7 +1152,7 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) nip++; } - libcfs_ipif_free_enumeration(ifnames, nif); + lnet_ipif_free_enumeration(ifnames, nif); if (nip == nif) { *ipaddrsp = ipaddrs; @@ -1178,9 +1177,9 @@ lnet_ipaddr_enumerate(__u32 **ipaddrsp) int lnet_parse_ip2nets(char **networksp, char *ip2nets) { - __u32 *ipaddrs = NULL; - int nip = lnet_ipaddr_enumerate(&ipaddrs); - int rc; + __u32 *ipaddrs = NULL; + int nip = lnet_ipaddr_enumerate(&ipaddrs); + int rc; if (nip < 0) { LCONSOLE_ERROR_MSG(0x117, @@ -1211,82 +1210,3 @@ lnet_parse_ip2nets(char **networksp, char *ip2nets) return 0; } - -int -lnet_set_ip_niaddr(lnet_ni_t *ni) -{ - __u32 net = LNET_NIDNET(ni->ni_nid); - char **names; - int n; - __u32 ip; - __u32 netmask; - int up; - int i; - int rc; - - /* Convenience for LNDs that use the IP address of a local interface as - * the local address part of their NID */ - - if (ni->ni_interfaces[0] != NULL) { - - CLASSERT(LNET_MAX_INTERFACES > 1); - - if (ni->ni_interfaces[1] != NULL) { - CERROR("Net %s doesn't support multiple interfaces\n", - libcfs_net2str(net)); - return -EPERM; - } - - rc = libcfs_ipif_query(ni->ni_interfaces[0], - &up, &ip, &netmask); - if (rc != 0) { - CERROR("Net %s can't query interface %s: %d\n", - libcfs_net2str(net), ni->ni_interfaces[0], rc); - return -EPERM; - } - - if (!up) { - CERROR("Net %s can't use interface %s: it's down\n", - libcfs_net2str(net), ni->ni_interfaces[0]); - return -ENETDOWN; - } - - ni->ni_nid = LNET_MKNID(net, ip); - return 0; - } - - n = libcfs_ipif_enumerate(&names); - if (n <= 0) { - CERROR("Net %s can't enumerate interfaces: %d\n", - libcfs_net2str(net), n); - return 0; - } - - for (i = 0; i < n; i++) { - if (!strcmp(names[i], "lo")) /* skip the loopback IF */ - continue; - - rc = libcfs_ipif_query(names[i], &up, &ip, &netmask); - - if (rc != 0) { - CWARN("Net %s can't query interface %s: %d\n", - libcfs_net2str(net), names[i], rc); - continue; - } - - if (!up) { - CWARN("Net %s ignoring interface %s (down)\n", - libcfs_net2str(net), names[i]); - continue; - } - - libcfs_ipif_free_enumeration(names, n); - ni->ni_nid = LNET_MKNID(net, ip); - return 0; - } - - CERROR("Net %s can't find any interfaces\n", libcfs_net2str(net)); - libcfs_ipif_free_enumeration(names, n); - return -ENOENT; -} -EXPORT_SYMBOL(lnet_set_ip_niaddr); diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c index 5470148f5..f19ce9ae6 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-eq.c +++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c @@ -70,7 +70,7 @@ int LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, lnet_handle_eq_t *handle) { - lnet_eq_t *eq; + lnet_eq_t *eq; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -79,7 +79,7 @@ LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback, * overflow, they don't skip entries, so the queue has the same * apparent capacity at all times */ - count = cfs_power2_roundup(count); + count = roundup_pow_of_two(count); if (callback != LNET_EQ_HANDLER_NONE && count != 0) CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count); @@ -151,13 +151,13 @@ EXPORT_SYMBOL(LNetEQAlloc); int LNetEQFree(lnet_handle_eq_t eqh) { - struct lnet_eq *eq; - lnet_event_t *events = NULL; - int **refs = NULL; - int *ref; - int rc = 0; - int size = 0; - int i; + struct lnet_eq *eq; + lnet_event_t *events = NULL; + int **refs = NULL; + int *ref; + int rc = 0; + int size = 0; + int i; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -185,13 +185,13 @@ LNetEQFree(lnet_handle_eq_t eqh) } /* stash for free after lock dropped */ - events = eq->eq_events; - size = eq->eq_size; - refs = eq->eq_refs; + events = eq->eq_events; + size = eq->eq_size; + refs = eq->eq_refs; lnet_res_lh_invalidate(&eq->eq_lh); list_del(&eq->eq_list); - lnet_eq_free_locked(eq); + lnet_eq_free(eq); out: lnet_eq_wait_unlock(); lnet_res_unlock(LNET_LOCK_EX); @@ -237,9 +237,9 @@ lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev) static int lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev) { - int new_index = eq->eq_deq_seq & (eq->eq_size - 1); - lnet_event_t *new_event = &eq->eq_events[new_index]; - int rc; + int new_index = eq->eq_deq_seq & (eq->eq_size - 1); + lnet_event_t *new_event = &eq->eq_events[new_index]; + int rc; /* must called with lnet_eq_wait_lock hold */ if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence)) @@ -323,10 +323,10 @@ static int lnet_eq_wait_locked(int *timeout_ms) __must_hold(&the_lnet.ln_eq_wait_lock) { - int tms = *timeout_ms; - int wait; - wait_queue_t wl; - unsigned long now; + int tms = *timeout_ms; + int wait; + wait_queue_t wl; + unsigned long now; if (tms == 0) return -1; /* don't want to wait and no new event */ @@ -392,9 +392,9 @@ int LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms, lnet_event_t *event, int *which) { - int wait = 1; - int rc; - int i; + int wait = 1; + int rc; + int i; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c index 89d660fef..758f5bede 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-md.c +++ b/drivers/staging/lustre/lnet/lnet/lib-md.c @@ -74,7 +74,7 @@ lnet_md_unlink(lnet_libmd_t *md) CDEBUG(D_NET, "Unlinking md %p\n", md); if (md->md_eq != NULL) { - int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); + int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie); LASSERT(*md->md_eq->eq_refs[cpt] > 0); (*md->md_eq->eq_refs[cpt])--; @@ -82,15 +82,15 @@ lnet_md_unlink(lnet_libmd_t *md) LASSERT(!list_empty(&md->md_list)); list_del_init(&md->md_list); - lnet_md_free_locked(md); + lnet_md_free(md); } static int lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) { - int i; + int i; unsigned int niov; - int total_length = 0; + int total_length = 0; lmd->md_me = NULL; lmd->md_start = umd->start; @@ -268,10 +268,10 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, { LIST_HEAD(matches); LIST_HEAD(drops); - struct lnet_me *me; - struct lnet_libmd *md; - int cpt; - int rc; + struct lnet_me *me; + struct lnet_libmd *md; + int cpt; + int rc; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -320,7 +320,7 @@ LNetMDAttach(lnet_handle_me_t meh, lnet_md_t umd, return 0; failed: - lnet_md_free_locked(md); + lnet_md_free(md); lnet_res_unlock(cpt); return rc; @@ -346,9 +346,9 @@ EXPORT_SYMBOL(LNetMDAttach); int LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) { - lnet_libmd_t *md; - int cpt; - int rc; + lnet_libmd_t *md; + int cpt; + int rc; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -381,7 +381,7 @@ LNetMDBind(lnet_md_t umd, lnet_unlink_t unlink, lnet_handle_md_t *handle) return 0; failed: - lnet_md_free_locked(md); + lnet_md_free(md); lnet_res_unlock(cpt); return rc; @@ -421,9 +421,9 @@ EXPORT_SYMBOL(LNetMDBind); int LNetMDUnlink(lnet_handle_md_t mdh) { - lnet_event_t ev; - lnet_libmd_t *md; - int cpt; + lnet_event_t ev; + lnet_libmd_t *md; + int cpt; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); diff --git a/drivers/staging/lustre/lnet/lnet/lib-me.c b/drivers/staging/lustre/lnet/lnet/lib-me.c index a3f929244..42fc99ef9 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-me.c +++ b/drivers/staging/lustre/lnet/lnet/lib-me.c @@ -80,8 +80,8 @@ LNetMEAttach(unsigned int portal, lnet_handle_me_t *handle) { struct lnet_match_table *mtable; - struct lnet_me *me; - struct list_head *head; + struct lnet_me *me; + struct list_head *head; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -151,10 +151,10 @@ LNetMEInsert(lnet_handle_me_t current_meh, lnet_unlink_t unlink, lnet_ins_pos_t pos, lnet_handle_me_t *handle) { - struct lnet_me *current_me; - struct lnet_me *new_me; - struct lnet_portal *ptl; - int cpt; + struct lnet_me *current_me; + struct lnet_me *new_me; + struct lnet_portal *ptl; + int cpt; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -172,7 +172,7 @@ LNetMEInsert(lnet_handle_me_t current_meh, current_me = lnet_handle2me(¤t_meh); if (current_me == NULL) { - lnet_me_free_locked(new_me); + lnet_me_free(new_me); lnet_res_unlock(cpt); return -ENOENT; @@ -183,7 +183,7 @@ LNetMEInsert(lnet_handle_me_t current_meh, ptl = the_lnet.ln_portals[current_me->me_portal]; if (lnet_ptl_is_unique(ptl)) { /* nosense to insertion on unique portal */ - lnet_me_free_locked(new_me); + lnet_me_free(new_me); lnet_res_unlock(cpt); return -EPERM; } @@ -228,10 +228,10 @@ EXPORT_SYMBOL(LNetMEInsert); int LNetMEUnlink(lnet_handle_me_t meh) { - lnet_me_t *me; - lnet_libmd_t *md; - lnet_event_t ev; - int cpt; + lnet_me_t *me; + lnet_libmd_t *md; + lnet_event_t ev; + int cpt; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -276,7 +276,7 @@ lnet_me_unlink(lnet_me_t *me) } lnet_res_lh_invalidate(&me->me_lh); - lnet_me_free_locked(me); + lnet_me_free(me); } #if 0 diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index c2fb70e5f..433faae9a 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -49,10 +49,10 @@ MODULE_PARM_DESC(local_nid_dist_zero, "Reserved"); int lnet_fail_nid(lnet_nid_t nid, unsigned int threshold) { - lnet_test_peer_t *tp; - struct list_head *el; - struct list_head *next; - struct list_head cull; + lnet_test_peer_t *tp; + struct list_head *el; + struct list_head *next; + struct list_head cull; LASSERT(the_lnet.ln_init); @@ -103,10 +103,10 @@ static int fail_peer(lnet_nid_t nid, int outgoing) { lnet_test_peer_t *tp; - struct list_head *el; - struct list_head *next; - struct list_head cull; - int fail = 0; + struct list_head *el; + struct list_head *next; + struct list_head cull; + int fail = 0; INIT_LIST_HEAD(&cull); @@ -175,7 +175,7 @@ lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset, unsigned int nob) { /* NB diov, siov are READ-ONLY */ - unsigned int this_nob; + unsigned int this_nob; if (nob == 0) return; @@ -236,8 +236,8 @@ lnet_extract_iov(int dst_niov, struct kvec *dst, /* Initialise 'dst' to the subset of 'src' starting at 'offset', * for exactly 'len' bytes, and return the number of entries. * NB not destructive to 'src' */ - unsigned int frag_len; - unsigned int niov; + unsigned int frag_len; + unsigned int niov; if (len == 0) /* no data => */ return 0; /* no frags */ @@ -279,7 +279,7 @@ EXPORT_SYMBOL(lnet_extract_iov); unsigned int lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov) { - unsigned int nob = 0; + unsigned int nob = 0; while (niov-- > 0) nob += (kiov++)->kiov_len; @@ -294,9 +294,9 @@ lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset, unsigned int nob) { /* NB diov, siov are READ-ONLY */ - unsigned int this_nob; - char *daddr = NULL; - char *saddr = NULL; + unsigned int this_nob; + char *daddr = NULL; + char *saddr = NULL; if (nob == 0) return; @@ -376,8 +376,8 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset, unsigned int kiovoffset, unsigned int nob) { /* NB iov, kiov are READ-ONLY */ - unsigned int this_nob; - char *addr = NULL; + unsigned int this_nob; + char *addr = NULL; if (nob == 0) return; @@ -447,8 +447,8 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int nob) { /* NB kiov, iov are READ-ONLY */ - unsigned int this_nob; - char *addr = NULL; + unsigned int this_nob; + char *addr = NULL; if (nob == 0) return; @@ -518,8 +518,8 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, /* Initialise 'dst' to the subset of 'src' starting at 'offset', * for exactly 'len' bytes, and return the number of entries. * NB not destructive to 'src' */ - unsigned int frag_len; - unsigned int niov; + unsigned int frag_len; + unsigned int niov; if (len == 0) /* no data => */ return 0; /* no frags */ @@ -565,10 +565,10 @@ static void lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int offset, unsigned int mlen, unsigned int rlen) { - unsigned int niov = 0; + unsigned int niov = 0; struct kvec *iov = NULL; - lnet_kiov_t *kiov = NULL; - int rc; + lnet_kiov_t *kiov = NULL; + int rc; LASSERT(!in_interrupt()); LASSERT(mlen == 0 || msg != NULL); @@ -642,8 +642,8 @@ lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target, static void lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg) { - void *priv = msg->msg_private; - int rc; + void *priv = msg->msg_private; + int rc; LASSERT(!in_interrupt()); LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND || @@ -657,7 +657,7 @@ lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg) static int lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg) { - int rc; + int rc; LASSERT(!msg->msg_sending); LASSERT(msg->msg_receiving); @@ -700,7 +700,7 @@ lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp) static inline int lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now) { - int alive; + int alive; unsigned long deadline; LASSERT(lnet_peer_aliveness_enabled(lp)); @@ -785,10 +785,10 @@ lnet_peer_alive_locked(lnet_peer_t *lp) static int lnet_post_send_locked(lnet_msg_t *msg, int do_send) { - lnet_peer_t *lp = msg->msg_txpeer; - lnet_ni_t *ni = lp->lp_ni; - int cpt = msg->msg_tx_cpt; - struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt]; + lnet_peer_t *lp = msg->msg_txpeer; + lnet_ni_t *ni = lp->lp_ni; + int cpt = msg->msg_tx_cpt; + struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt]; /* non-lnet_send() callers have checked before */ LASSERT(!do_send || msg->msg_tx_delayed); @@ -871,8 +871,8 @@ lnet_post_send_locked(lnet_msg_t *msg, int do_send) static lnet_rtrbufpool_t * lnet_msg2bufpool(lnet_msg_t *msg) { - lnet_rtrbufpool_t *rbp; - int cpt; + lnet_rtrbufpool_t *rbp; + int cpt; LASSERT(msg->msg_rx_committed); @@ -894,9 +894,9 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) /* lnet_parse is going to lnet_net_unlock immediately after this, so it * sets do_recv FALSE and I don't do the unlock/send/lock bit. I * return EAGAIN if msg blocked and 0 if received or OK to receive */ - lnet_peer_t *lp = msg->msg_rxpeer; - lnet_rtrbufpool_t *rbp; - lnet_rtrbuf_t *rb; + lnet_peer_t *lp = msg->msg_rxpeer; + lnet_rtrbufpool_t *rbp; + lnet_rtrbuf_t *rb; LASSERT(msg->msg_iov == NULL); LASSERT(msg->msg_kiov == NULL); @@ -967,11 +967,11 @@ lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv) void lnet_return_tx_credits_locked(lnet_msg_t *msg) { - lnet_peer_t *txpeer = msg->msg_txpeer; - lnet_msg_t *msg2; + lnet_peer_t *txpeer = msg->msg_txpeer; + lnet_msg_t *msg2; if (msg->msg_txcredit) { - struct lnet_ni *ni = txpeer->lp_ni; + struct lnet_ni *ni = txpeer->lp_ni; struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt]; /* give back NI txcredits */ @@ -1025,12 +1025,12 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg) void lnet_return_rx_credits_locked(lnet_msg_t *msg) { - lnet_peer_t *rxpeer = msg->msg_rxpeer; - lnet_msg_t *msg2; + lnet_peer_t *rxpeer = msg->msg_rxpeer; + lnet_msg_t *msg2; if (msg->msg_rtrcredit) { /* give back global router credits */ - lnet_rtrbuf_t *rb; + lnet_rtrbuf_t *rb; lnet_rtrbufpool_t *rbp; /* NB If a msg ever blocks for a buffer in rbp_msgs, it stays @@ -1122,13 +1122,13 @@ lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2) static lnet_peer_t * lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid) { - lnet_remotenet_t *rnet; - lnet_route_t *rtr; - lnet_route_t *rtr_best; - lnet_route_t *rtr_last; - struct lnet_peer *lp_best; - struct lnet_peer *lp; - int rc; + lnet_remotenet_t *rnet; + lnet_route_t *rtr; + lnet_route_t *rtr_best; + lnet_route_t *rtr_last; + struct lnet_peer *lp_best; + struct lnet_peer *lp; + int rc; /* If @rtr_nid is not LNET_NID_ANY, return the gateway with * rtr_nid nid, otherwise find the best gateway I can use */ @@ -1182,13 +1182,13 @@ lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid) int lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid) { - lnet_nid_t dst_nid = msg->msg_target.nid; - struct lnet_ni *src_ni; - struct lnet_ni *local_ni; - struct lnet_peer *lp; - int cpt; - int cpt2; - int rc; + lnet_nid_t dst_nid = msg->msg_target.nid; + struct lnet_ni *src_ni; + struct lnet_ni *local_ni; + struct lnet_peer *lp; + int cpt; + int cpt2; + int rc; /* NB: rtr_nid is set to LNET_NID_ANY for all current use-cases, * but we might want to use pre-determined router for ACK/REPLY @@ -1364,7 +1364,7 @@ lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob) static void lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg) { - lnet_hdr_t *hdr = &msg->msg_hdr; + lnet_hdr_t *hdr = &msg->msg_hdr; if (msg->msg_wanted != 0) lnet_setpayloadbuffer(msg); @@ -1383,9 +1383,9 @@ lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg) static int lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) { - lnet_hdr_t *hdr = &msg->msg_hdr; - struct lnet_match_info info; - int rc; + lnet_hdr_t *hdr = &msg->msg_hdr; + struct lnet_match_info info; + int rc; /* Convert put fields to host byte order */ hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits); @@ -1433,24 +1433,24 @@ lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg) static int lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get) { - struct lnet_match_info info; - lnet_hdr_t *hdr = &msg->msg_hdr; - lnet_handle_wire_t reply_wmd; - int rc; + struct lnet_match_info info; + lnet_hdr_t *hdr = &msg->msg_hdr; + lnet_handle_wire_t reply_wmd; + int rc; /* Convert get fields to host byte order */ - hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits); - hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index); - hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length); - hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset); - - info.mi_id.nid = hdr->src_nid; - info.mi_id.pid = hdr->src_pid; - info.mi_opc = LNET_MD_OP_GET; - info.mi_portal = hdr->msg.get.ptl_index; - info.mi_rlength = hdr->msg.get.sink_length; - info.mi_roffset = hdr->msg.get.src_offset; - info.mi_mbits = hdr->msg.get.match_bits; + hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits); + hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index); + hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length); + hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset); + + info.mi_id.nid = hdr->src_nid; + info.mi_id.pid = hdr->src_pid; + info.mi_opc = LNET_MD_OP_GET; + info.mi_portal = hdr->msg.get.ptl_index; + info.mi_rlength = hdr->msg.get.sink_length; + info.mi_roffset = hdr->msg.get.src_offset; + info.mi_mbits = hdr->msg.get.match_bits; rc = lnet_ptl_match_md(&info, msg); if (rc == LNET_MATCHMD_DROP) { @@ -1497,13 +1497,13 @@ lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get) static int lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) { - void *private = msg->msg_private; - lnet_hdr_t *hdr = &msg->msg_hdr; + void *private = msg->msg_private; + lnet_hdr_t *hdr = &msg->msg_hdr; lnet_process_id_t src = {0}; - lnet_libmd_t *md; - int rlength; - int mlength; - int cpt; + lnet_libmd_t *md; + int rlength; + int mlength; + int cpt; cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie); lnet_res_lock(cpt); @@ -1562,10 +1562,10 @@ lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg) static int lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) { - lnet_hdr_t *hdr = &msg->msg_hdr; + lnet_hdr_t *hdr = &msg->msg_hdr; lnet_process_id_t src = {0}; - lnet_libmd_t *md; - int cpt; + lnet_libmd_t *md; + int cpt; src.nid = hdr->src_nid; src.pid = hdr->src_pid; @@ -1612,7 +1612,7 @@ lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg) static int lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg) { - int rc = 0; + int rc = 0; if (msg->msg_rxpeer->lp_rtrcredits <= 0 || lnet_msg2bufpool(msg)->rbp_credits <= 0) { @@ -1713,15 +1713,15 @@ int lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid, void *private, int rdma_req) { - int rc = 0; - int cpt; - int for_me; - struct lnet_msg *msg; - lnet_pid_t dest_pid; - lnet_nid_t dest_nid; - lnet_nid_t src_nid; - __u32 payload_length; - __u32 type; + int rc = 0; + int cpt; + int for_me; + struct lnet_msg *msg; + lnet_pid_t dest_pid; + lnet_nid_t dest_nid; + lnet_nid_t src_nid; + __u32 payload_length; + __u32 type; LASSERT(!in_interrupt()); @@ -1945,8 +1945,8 @@ void lnet_drop_delayed_msg_list(struct list_head *head, char *reason) { while (!list_empty(head)) { - lnet_process_id_t id = {0}; - lnet_msg_t *msg; + lnet_process_id_t id = {0}; + lnet_msg_t *msg; msg = list_entry(head->next, lnet_msg_t, msg_list); list_del(&msg->msg_list); @@ -1986,8 +1986,8 @@ void lnet_recv_delayed_msg_list(struct list_head *head) { while (!list_empty(head)) { - lnet_msg_t *msg; - lnet_process_id_t id; + lnet_msg_t *msg; + lnet_process_id_t id; msg = list_entry(head->next, lnet_msg_t, msg_list); list_del(&msg->msg_list); @@ -2063,10 +2063,10 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, __u64 match_bits, unsigned int offset, __u64 hdr_data) { - struct lnet_msg *msg; - struct lnet_libmd *md; - int cpt; - int rc; + struct lnet_msg *msg; + struct lnet_libmd *md; + int cpt; + int rc; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -2153,10 +2153,10 @@ lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg) * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when * lnet_finalize() is called on it, so the LND must call this first */ - struct lnet_msg *msg = lnet_msg_alloc(); - struct lnet_libmd *getmd = getmsg->msg_md; - lnet_process_id_t peer_id = getmsg->msg_target; - int cpt; + struct lnet_msg *msg = lnet_msg_alloc(); + struct lnet_libmd *getmd = getmsg->msg_md; + lnet_process_id_t peer_id = getmsg->msg_target; + int cpt; LASSERT(!getmsg->msg_target_is_router); LASSERT(!getmsg->msg_routing); @@ -2263,10 +2263,10 @@ LNetGet(lnet_nid_t self, lnet_handle_md_t mdh, lnet_process_id_t target, unsigned int portal, __u64 match_bits, unsigned int offset) { - struct lnet_msg *msg; - struct lnet_libmd *md; - int cpt; - int rc; + struct lnet_msg *msg; + struct lnet_libmd *md; + int cpt; + int rc; LASSERT(the_lnet.ln_init); LASSERT(the_lnet.ln_refcount > 0); @@ -2353,14 +2353,14 @@ EXPORT_SYMBOL(LNetGet); int LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) { - struct list_head *e; - struct lnet_ni *ni; - lnet_remotenet_t *rnet; - __u32 dstnet = LNET_NIDNET(dstnid); - int hops; - int cpt; - __u32 order = 2; - struct list_head *rn_list; + struct list_head *e; + struct lnet_ni *ni; + lnet_remotenet_t *rnet; + __u32 dstnet = LNET_NIDNET(dstnid); + int hops; + int cpt; + __u32 order = 2; + struct list_head *rn_list; /* if !local_nid_dist_zero, I don't return a distance of 0 ever * (when lustre sees a distance of 0, it substitutes 0@lo), so I @@ -2434,27 +2434,3 @@ LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp) return -EHOSTUNREACH; } EXPORT_SYMBOL(LNetDist); - -/** - * Set the number of asynchronous messages expected from a target process. - * - * This function is only meaningful for userspace callers. It's a no-op when - * called from kernel. - * - * Asynchronous messages are those that can come from a target when the - * userspace process is not waiting for IO to complete; e.g., AST callbacks - * from Lustre servers. Specifying the expected number of such messages - * allows them to be eagerly received when user process is not running in - * LNet; otherwise network errors may occur. - * - * \param id Process ID of the target process. - * \param nasync Number of asynchronous messages expected from the target. - * - * \return 0 on success, and an error code otherwise. - */ -int -LNetSetAsync(lnet_process_id_t id, int nasync) -{ - return 0; -} -EXPORT_SYMBOL(LNetSetAsync); diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c index a46ccbf66..43977e8df 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-msg.c +++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c @@ -60,8 +60,8 @@ lnet_build_unlink_event(lnet_libmd_t *md, lnet_event_t *ev) void lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) { - lnet_hdr_t *hdr = &msg->msg_hdr; - lnet_event_t *ev = &msg->msg_ev; + lnet_hdr_t *hdr = &msg->msg_hdr; + lnet_event_t *ev = &msg->msg_ev; LASSERT(!msg->msg_routing); @@ -73,7 +73,7 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) ev->target.pid = le32_to_cpu(hdr->dest_pid); ev->initiator.nid = LNET_NID_ANY; ev->initiator.pid = the_lnet.ln_pid; - ev->sender = LNET_NID_ANY; + ev->sender = LNET_NID_ANY; } else { /* event for passive message */ @@ -82,9 +82,9 @@ lnet_build_msg_event(lnet_msg_t *msg, lnet_event_kind_t ev_type) ev->initiator.pid = hdr->src_pid; ev->initiator.nid = hdr->src_nid; ev->rlength = hdr->payload_length; - ev->sender = msg->msg_from; - ev->mlength = msg->msg_wanted; - ev->offset = msg->msg_offset; + ev->sender = msg->msg_from; + ev->mlength = msg->msg_wanted; + ev->offset = msg->msg_offset; } switch (ev_type) { @@ -137,7 +137,7 @@ void lnet_msg_commit(lnet_msg_t *msg, int cpt) { struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt]; - lnet_counters_t *counters = the_lnet.ln_counters[cpt]; + lnet_counters_t *counters = the_lnet.ln_counters[cpt]; /* routed message can be committed for both receiving and sending */ LASSERT(!msg->msg_tx_committed); @@ -170,7 +170,7 @@ static void lnet_msg_decommit_tx(lnet_msg_t *msg, int status) { lnet_counters_t *counters; - lnet_event_t *ev = &msg->msg_ev; + lnet_event_t *ev = &msg->msg_ev; LASSERT(msg->msg_tx_committed); if (status != 0) @@ -219,8 +219,8 @@ lnet_msg_decommit_tx(lnet_msg_t *msg, int status) static void lnet_msg_decommit_rx(lnet_msg_t *msg, int status) { - lnet_counters_t *counters; - lnet_event_t *ev = &msg->msg_ev; + lnet_counters_t *counters; + lnet_event_t *ev = &msg->msg_ev; LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */ LASSERT(msg->msg_rx_committed); @@ -273,7 +273,7 @@ lnet_msg_decommit_rx(lnet_msg_t *msg, int status) void lnet_msg_decommit(lnet_msg_t *msg, int cpt, int status) { - int cpt2 = cpt; + int cpt2 = cpt; LASSERT(msg->msg_tx_committed || msg->msg_rx_committed); LASSERT(msg->msg_onactivelist); @@ -335,8 +335,8 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md, void lnet_msg_detach_md(lnet_msg_t *msg, int status) { - lnet_libmd_t *md = msg->msg_md; - int unlink; + lnet_libmd_t *md = msg->msg_md; + int unlink; /* Now it's safe to drop my caller's ref */ md->md_refcount--; @@ -359,8 +359,8 @@ static int lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) { lnet_handle_wire_t ack_wmd; - int rc; - int status = msg->msg_ev.status; + int rc; + int status = msg->msg_ev.status; LASSERT(msg->msg_onactivelist); @@ -427,18 +427,18 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt) } lnet_msg_decommit(msg, cpt, status); - lnet_msg_free_locked(msg); + lnet_msg_free(msg); return 0; } void lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status) { - struct lnet_msg_container *container; - int my_slot; - int cpt; - int rc; - int i; + struct lnet_msg_container *container; + int my_slot; + int cpt; + int rc; + int i; LASSERT(!in_interrupt()); @@ -534,7 +534,7 @@ EXPORT_SYMBOL(lnet_finalize); void lnet_msg_container_cleanup(struct lnet_msg_container *container) { - int count = 0; + int count = 0; if (container->msc_init == 0) return; @@ -568,7 +568,7 @@ lnet_msg_container_cleanup(struct lnet_msg_container *container) int lnet_msg_container_setup(struct lnet_msg_container *container, int cpt) { - int rc; + int rc; container->msc_init = 1; @@ -608,7 +608,7 @@ void lnet_msg_containers_destroy(void) { struct lnet_msg_container *container; - int i; + int i; if (the_lnet.ln_msg_containers == NULL) return; @@ -624,8 +624,8 @@ int lnet_msg_containers_create(void) { struct lnet_msg_container *container; - int rc; - int i; + int rc; + int i; the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*container)); diff --git a/drivers/staging/lustre/lnet/lnet/lib-ptl.c b/drivers/staging/lustre/lnet/lnet/lib-ptl.c index 3ba0da919..84707c5cb 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-ptl.c +++ b/drivers/staging/lustre/lnet/lnet/lib-ptl.c @@ -39,7 +39,7 @@ #include "../../include/linux/lnet/lib-lnet.h" /* NB: add /proc interfaces in upcoming patches */ -int portal_rotor = LNET_PTL_ROTOR_HASH_RT; +int portal_rotor = LNET_PTL_ROTOR_HASH_RT; module_param(portal_rotor, int, 0644); MODULE_PARM_DESC(portal_rotor, "redirect PUTs to different cpu-partitions"); @@ -47,8 +47,8 @@ static int lnet_ptl_match_type(unsigned int index, lnet_process_id_t match_id, __u64 mbits, __u64 ignore_bits) { - struct lnet_portal *ptl = the_lnet.ln_portals[index]; - int unique; + struct lnet_portal *ptl = the_lnet.ln_portals[index]; + int unique; unique = ignore_bits == 0 && match_id.nid != LNET_NID_ANY && @@ -89,7 +89,7 @@ static void lnet_ptl_enable_mt(struct lnet_portal *ptl, int cpt) { struct lnet_match_table *mtable = ptl->ptl_mtables[cpt]; - int i; + int i; /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */ LASSERT(lnet_ptl_is_wildcard(ptl)); @@ -114,7 +114,7 @@ static void lnet_ptl_disable_mt(struct lnet_portal *ptl, int cpt) { struct lnet_match_table *mtable = ptl->ptl_mtables[cpt]; - int i; + int i; /* with hold of both lnet_res_lock(cpt) and lnet_ptl_lock */ LASSERT(lnet_ptl_is_wildcard(ptl)); @@ -141,9 +141,9 @@ lnet_try_match_md(lnet_libmd_t *md, { /* ALWAYS called holding the lnet_res_lock, and can't lnet_res_unlock; * lnet_match_blocked_msg() relies on this to avoid races */ - unsigned int offset; - unsigned int mlength; - lnet_me_t *me = md->md_me; + unsigned int offset; + unsigned int mlength; + lnet_me_t *me = md->md_me; /* MD exhausted */ if (lnet_md_exhausted(md)) @@ -227,7 +227,7 @@ struct lnet_match_table * lnet_mt_of_attach(unsigned int index, lnet_process_id_t id, __u64 mbits, __u64 ignore_bits, lnet_ins_pos_t pos) { - struct lnet_portal *ptl; + struct lnet_portal *ptl; struct lnet_match_table *mtable; /* NB: called w/o lock */ @@ -261,11 +261,11 @@ static struct lnet_match_table * lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) { struct lnet_match_table *mtable; - struct lnet_portal *ptl; - unsigned int nmaps; - unsigned int rotor; - unsigned int cpt; - bool routed; + struct lnet_portal *ptl; + unsigned int nmaps; + unsigned int rotor; + unsigned int cpt; + bool routed; /* NB: called w/o lock */ LASSERT(info->mi_portal < the_lnet.ln_nportals); @@ -312,8 +312,8 @@ lnet_mt_of_match(struct lnet_match_info *info, struct lnet_msg *msg) static int lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos) { - __u64 *bmap; - int i; + __u64 *bmap; + int i; if (!lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])) return 0; @@ -337,7 +337,7 @@ lnet_mt_test_exhausted(struct lnet_match_table *mtable, int pos) static void lnet_mt_set_exhausted(struct lnet_match_table *mtable, int pos, int exhausted) { - __u64 *bmap; + __u64 *bmap; LASSERT(lnet_ptl_is_wildcard(the_lnet.ln_portals[mtable->mt_portal])); LASSERT(pos <= LNET_MT_HASH_IGNORE); @@ -373,11 +373,11 @@ int lnet_mt_match_md(struct lnet_match_table *mtable, struct lnet_match_info *info, struct lnet_msg *msg) { - struct list_head *head; - lnet_me_t *me; - lnet_me_t *tmp; - int exhausted = 0; - int rc; + struct list_head *head; + lnet_me_t *me; + lnet_me_t *tmp; + int exhausted = 0; + int rc; /* any ME with ignore bits? */ if (!list_empty(&mtable->mt_mhash[LNET_MT_HASH_IGNORE])) @@ -428,7 +428,7 @@ lnet_mt_match_md(struct lnet_match_table *mtable, static int lnet_ptl_match_early(struct lnet_portal *ptl, struct lnet_msg *msg) { - int rc; + int rc; /* message arrived before any buffer posting on this portal, * simply delay or drop this message */ @@ -461,9 +461,9 @@ static int lnet_ptl_match_delay(struct lnet_portal *ptl, struct lnet_match_info *info, struct lnet_msg *msg) { - int first = ptl->ptl_mt_maps[0]; /* read w/o lock */ - int rc = 0; - int i; + int first = ptl->ptl_mt_maps[0]; /* read w/o lock */ + int rc = 0; + int i; /* steal buffer from other CPTs, and delay it if nothing to steal, * this function is more expensive than a regular match, but we @@ -472,7 +472,7 @@ lnet_ptl_match_delay(struct lnet_portal *ptl, for (i = 0; i < LNET_CPT_NUMBER; i++) { struct lnet_match_table *mtable; - int cpt; + int cpt; cpt = (first + i) % LNET_CPT_NUMBER; mtable = ptl->ptl_mtables[cpt]; @@ -536,8 +536,8 @@ int lnet_ptl_match_md(struct lnet_match_info *info, struct lnet_msg *msg) { struct lnet_match_table *mtable; - struct lnet_portal *ptl; - int rc; + struct lnet_portal *ptl; + int rc; CDEBUG(D_NET, "Request from %s of length %d into portal %d MB=%#llx\n", libcfs_id2str(info->mi_id), info->mi_rlength, info->mi_portal, @@ -622,13 +622,13 @@ void lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, struct list_head *matches, struct list_head *drops) { - struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal]; + struct lnet_portal *ptl = the_lnet.ln_portals[me->me_portal]; struct lnet_match_table *mtable; - struct list_head *head; - lnet_msg_t *tmp; - lnet_msg_t *msg; - int exhausted = 0; - int cpt; + struct list_head *head; + lnet_msg_t *tmp; + lnet_msg_t *msg; + int exhausted = 0; + int cpt; LASSERT(md->md_refcount == 0); /* a brand new MD */ @@ -647,20 +647,20 @@ lnet_ptl_attach_md(lnet_me_t *me, lnet_libmd_t *md, head = &ptl->ptl_msg_stealing; again: list_for_each_entry_safe(msg, tmp, head, msg_list) { - struct lnet_match_info info; - lnet_hdr_t *hdr; - int rc; + struct lnet_match_info info; + lnet_hdr_t *hdr; + int rc; LASSERT(msg->msg_rx_delayed || head == &ptl->ptl_msg_stealing); - hdr = &msg->msg_hdr; - info.mi_id.nid = hdr->src_nid; - info.mi_id.pid = hdr->src_pid; - info.mi_opc = LNET_MD_OP_PUT; - info.mi_portal = hdr->msg.put.ptl_index; - info.mi_rlength = hdr->payload_length; - info.mi_roffset = hdr->msg.put.offset; - info.mi_mbits = hdr->msg.put.match_bits; + hdr = &msg->msg_hdr; + info.mi_id.nid = hdr->src_nid; + info.mi_id.pid = hdr->src_pid; + info.mi_opc = LNET_MD_OP_PUT; + info.mi_portal = hdr->msg.put.ptl_index; + info.mi_rlength = hdr->payload_length; + info.mi_roffset = hdr->msg.put.offset; + info.mi_mbits = hdr->msg.put.match_bits; rc = lnet_try_match_md(md, &info, msg); @@ -715,7 +715,7 @@ static void lnet_ptl_cleanup(struct lnet_portal *ptl) { struct lnet_match_table *mtable; - int i; + int i; if (ptl->ptl_mtables == NULL) /* uninitialized portal */ return; @@ -723,9 +723,9 @@ lnet_ptl_cleanup(struct lnet_portal *ptl) LASSERT(list_empty(&ptl->ptl_msg_delayed)); LASSERT(list_empty(&ptl->ptl_msg_stealing)); cfs_percpt_for_each(mtable, i, ptl->ptl_mtables) { - struct list_head *mhash; - lnet_me_t *me; - int j; + struct list_head *mhash; + lnet_me_t *me; + int j; if (mtable->mt_mhash == NULL) /* uninitialized match-table */ continue; @@ -753,9 +753,9 @@ static int lnet_ptl_setup(struct lnet_portal *ptl, int index) { struct lnet_match_table *mtable; - struct list_head *mhash; - int i; - int j; + struct list_head *mhash; + int i; + int j; ptl->ptl_mtables = cfs_percpt_alloc(lnet_cpt_table(), sizeof(struct lnet_match_table)); @@ -798,7 +798,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index) void lnet_portals_destroy(void) { - int i; + int i; if (the_lnet.ln_portals == NULL) return; @@ -813,8 +813,8 @@ lnet_portals_destroy(void) int lnet_portals_create(void) { - int size; - int i; + int size; + int i; size = offsetof(struct lnet_portal, ptl_mt_maps[LNET_CPT_NUMBER]); @@ -898,8 +898,8 @@ EXPORT_SYMBOL(LNetSetLazyPortal); int LNetClearLazyPortal(int portal) { - struct lnet_portal *ptl; - LIST_HEAD (zombies); + struct lnet_portal *ptl; + LIST_HEAD(zombies); if (portal < 0 || portal >= the_lnet.ln_nportals) return -EINVAL; diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c new file mode 100644 index 000000000..6f7ef4c73 --- /dev/null +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c @@ -0,0 +1,594 @@ +/* + * GPL HEADER START + * + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). + * + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.gnu.org/licenses/gpl-2.0.html + * + * GPL HEADER END + */ +/* + * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. + * Use is subject to license terms. + * + * Copyright (c) 2012, 2015 Intel Corporation. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Seagate, Inc. + */ +#define DEBUG_SUBSYSTEM S_LNET + +#include +#include +#include +#include +#include +/* For sys_open & sys_close */ +#include +#include + +#include "../../include/linux/libcfs/libcfs.h" +#include "../../include/linux/lnet/lib-lnet.h" + +static int +kernel_sock_unlocked_ioctl(struct file *filp, int cmd, unsigned long arg) +{ + mm_segment_t oldfs = get_fs(); + int err; + + set_fs(KERNEL_DS); + err = filp->f_op->unlocked_ioctl(filp, cmd, arg); + set_fs(oldfs); + + return err; +} + +static int +lnet_sock_ioctl(int cmd, unsigned long arg) +{ + struct file *sock_filp; + struct socket *sock; + int rc; + + rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); + if (rc != 0) { + CERROR("Can't create socket: %d\n", rc); + return rc; + } + + sock_filp = sock_alloc_file(sock, 0, NULL); + if (IS_ERR(sock_filp)) { + sock_release(sock); + rc = PTR_ERR(sock_filp); + goto out; + } + + rc = kernel_sock_unlocked_ioctl(sock_filp, cmd, arg); + + fput(sock_filp); +out: + return rc; +} + +int +lnet_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask) +{ + struct ifreq ifr; + int nob; + int rc; + __u32 val; + + nob = strnlen(name, IFNAMSIZ); + if (nob == IFNAMSIZ) { + CERROR("Interface name %s too long\n", name); + return -EINVAL; + } + + CLASSERT(sizeof(ifr.ifr_name) >= IFNAMSIZ); + + strcpy(ifr.ifr_name, name); + rc = lnet_sock_ioctl(SIOCGIFFLAGS, (unsigned long)&ifr); + if (rc != 0) { + CERROR("Can't get flags for interface %s\n", name); + return rc; + } + + if ((ifr.ifr_flags & IFF_UP) == 0) { + CDEBUG(D_NET, "Interface %s down\n", name); + *up = 0; + *ip = *mask = 0; + return 0; + } + *up = 1; + + strcpy(ifr.ifr_name, name); + ifr.ifr_addr.sa_family = AF_INET; + rc = lnet_sock_ioctl(SIOCGIFADDR, (unsigned long)&ifr); + if (rc != 0) { + CERROR("Can't get IP address for interface %s\n", name); + return rc; + } + + val = ((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr.s_addr; + *ip = ntohl(val); + + strcpy(ifr.ifr_name, name); + ifr.ifr_addr.sa_family = AF_INET; + rc = lnet_sock_ioctl(SIOCGIFNETMASK, (unsigned long)&ifr); + if (rc != 0) { + CERROR("Can't get netmask for interface %s\n", name); + return rc; + } + + val = ((struct sockaddr_in *)&ifr.ifr_netmask)->sin_addr.s_addr; + *mask = ntohl(val); + + return 0; +} +EXPORT_SYMBOL(lnet_ipif_query); + +int +lnet_ipif_enumerate(char ***namesp) +{ + /* Allocate and fill in 'names', returning # interfaces/error */ + char **names; + int toobig; + int nalloc; + int nfound; + struct ifreq *ifr; + struct ifconf ifc; + int rc; + int nob; + int i; + + nalloc = 16; /* first guess at max interfaces */ + toobig = 0; + for (;;) { + if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { + toobig = 1; + nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); + CWARN("Too many interfaces: only enumerating first %d\n", + nalloc); + } + + LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr)); + if (ifr == NULL) { + CERROR("ENOMEM enumerating up to %d interfaces\n", + nalloc); + rc = -ENOMEM; + goto out0; + } + + ifc.ifc_buf = (char *)ifr; + ifc.ifc_len = nalloc * sizeof(*ifr); + + rc = lnet_sock_ioctl(SIOCGIFCONF, (unsigned long)&ifc); + if (rc < 0) { + CERROR("Error %d enumerating interfaces\n", rc); + goto out1; + } + + LASSERT(rc == 0); + + nfound = ifc.ifc_len/sizeof(*ifr); + LASSERT(nfound <= nalloc); + + if (nfound < nalloc || toobig) + break; + + LIBCFS_FREE(ifr, nalloc * sizeof(*ifr)); + nalloc *= 2; + } + + if (nfound == 0) + goto out1; + + LIBCFS_ALLOC(names, nfound * sizeof(*names)); + if (names == NULL) { + rc = -ENOMEM; + goto out1; + } + + for (i = 0; i < nfound; i++) { + nob = strnlen(ifr[i].ifr_name, IFNAMSIZ); + if (nob == IFNAMSIZ) { + /* no space for terminating NULL */ + CERROR("interface name %.*s too long (%d max)\n", + nob, ifr[i].ifr_name, IFNAMSIZ); + rc = -ENAMETOOLONG; + goto out2; + } + + LIBCFS_ALLOC(names[i], IFNAMSIZ); + if (names[i] == NULL) { + rc = -ENOMEM; + goto out2; + } + + memcpy(names[i], ifr[i].ifr_name, nob); + names[i][nob] = 0; + } + + *namesp = names; + rc = nfound; + +out2: + if (rc < 0) + lnet_ipif_free_enumeration(names, nfound); +out1: + LIBCFS_FREE(ifr, nalloc * sizeof(*ifr)); +out0: + return rc; +} +EXPORT_SYMBOL(lnet_ipif_enumerate); + +void +lnet_ipif_free_enumeration(char **names, int n) +{ + int i; + + LASSERT(n > 0); + + for (i = 0; i < n && names[i] != NULL; i++) + LIBCFS_FREE(names[i], IFNAMSIZ); + + LIBCFS_FREE(names, n * sizeof(*names)); +} +EXPORT_SYMBOL(lnet_ipif_free_enumeration); + +int +lnet_sock_write(struct socket *sock, void *buffer, int nob, int timeout) +{ + int rc; + long ticks = timeout * HZ; + unsigned long then; + struct timeval tv; + + LASSERT(nob > 0); + /* Caller may pass a zero timeout if she thinks the socket buffer is + * empty enough to take the whole message immediately */ + + for (;;) { + struct kvec iov = { + .iov_base = buffer, + .iov_len = nob + }; + struct msghdr msg = { + .msg_flags = (timeout == 0) ? MSG_DONTWAIT : 0 + }; + + if (timeout != 0) { + /* Set send timeout to remaining time */ + tv = (struct timeval) { + .tv_sec = ticks / HZ, + .tv_usec = ((ticks % HZ) * 1000000) / HZ + }; + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, + (char *)&tv, sizeof(tv)); + if (rc != 0) { + CERROR("Can't set socket send timeout %ld.%06d: %d\n", + (long)tv.tv_sec, (int)tv.tv_usec, rc); + return rc; + } + } + + then = jiffies; + rc = kernel_sendmsg(sock, &msg, &iov, 1, nob); + ticks -= jiffies - then; + + if (rc == nob) + return 0; + + if (rc < 0) + return rc; + + if (rc == 0) { + CERROR("Unexpected zero rc\n"); + return -ECONNABORTED; + } + + if (ticks <= 0) + return -EAGAIN; + + buffer = ((char *)buffer) + rc; + nob -= rc; + } + return 0; +} +EXPORT_SYMBOL(lnet_sock_write); + +int +lnet_sock_read(struct socket *sock, void *buffer, int nob, int timeout) +{ + int rc; + long ticks = timeout * HZ; + unsigned long then; + struct timeval tv; + + LASSERT(nob > 0); + LASSERT(ticks > 0); + + for (;;) { + struct kvec iov = { + .iov_base = buffer, + .iov_len = nob + }; + struct msghdr msg = { + .msg_flags = 0 + }; + + /* Set receive timeout to remaining time */ + tv = (struct timeval) { + .tv_sec = ticks / HZ, + .tv_usec = ((ticks % HZ) * 1000000) / HZ + }; + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, + (char *)&tv, sizeof(tv)); + if (rc != 0) { + CERROR("Can't set socket recv timeout %ld.%06d: %d\n", + (long)tv.tv_sec, (int)tv.tv_usec, rc); + return rc; + } + + then = jiffies; + rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0); + ticks -= jiffies - then; + + if (rc < 0) + return rc; + + if (rc == 0) + return -ECONNRESET; + + buffer = ((char *)buffer) + rc; + nob -= rc; + + if (nob == 0) + return 0; + + if (ticks <= 0) + return -ETIMEDOUT; + } +} +EXPORT_SYMBOL(lnet_sock_read); + +static int +lnet_sock_create(struct socket **sockp, int *fatal, __u32 local_ip, + int local_port) +{ + struct sockaddr_in locaddr; + struct socket *sock; + int rc; + int option; + + /* All errors are fatal except bind failure if the port is in use */ + *fatal = 1; + + rc = sock_create(PF_INET, SOCK_STREAM, 0, &sock); + *sockp = sock; + if (rc != 0) { + CERROR("Can't create socket: %d\n", rc); + return rc; + } + + option = 1; + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, + (char *)&option, sizeof(option)); + if (rc != 0) { + CERROR("Can't set SO_REUSEADDR for socket: %d\n", rc); + goto failed; + } + + if (local_ip != 0 || local_port != 0) { + memset(&locaddr, 0, sizeof(locaddr)); + locaddr.sin_family = AF_INET; + locaddr.sin_port = htons(local_port); + locaddr.sin_addr.s_addr = (local_ip == 0) ? + INADDR_ANY : htonl(local_ip); + + rc = kernel_bind(sock, (struct sockaddr *)&locaddr, + sizeof(locaddr)); + if (rc == -EADDRINUSE) { + CDEBUG(D_NET, "Port %d already in use\n", local_port); + *fatal = 0; + goto failed; + } + if (rc != 0) { + CERROR("Error trying to bind to port %d: %d\n", + local_port, rc); + goto failed; + } + } + return 0; + +failed: + sock_release(sock); + return rc; +} + +int +lnet_sock_setbuf(struct socket *sock, int txbufsize, int rxbufsize) +{ + int option; + int rc; + + if (txbufsize != 0) { + option = txbufsize; + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, + (char *)&option, sizeof(option)); + if (rc != 0) { + CERROR("Can't set send buffer %d: %d\n", + option, rc); + return rc; + } + } + + if (rxbufsize != 0) { + option = rxbufsize; + rc = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, + (char *)&option, sizeof(option)); + if (rc != 0) { + CERROR("Can't set receive buffer %d: %d\n", + option, rc); + return rc; + } + } + return 0; +} +EXPORT_SYMBOL(lnet_sock_setbuf); + +int +lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port) +{ + struct sockaddr_in sin; + int len = sizeof(sin); + int rc; + + if (remote) + rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len); + else + rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len); + if (rc != 0) { + CERROR("Error %d getting sock %s IP/port\n", + rc, remote ? "peer" : "local"); + return rc; + } + + if (ip != NULL) + *ip = ntohl(sin.sin_addr.s_addr); + + if (port != NULL) + *port = ntohs(sin.sin_port); + + return 0; +} +EXPORT_SYMBOL(lnet_sock_getaddr); + +int +lnet_sock_getbuf(struct socket *sock, int *txbufsize, int *rxbufsize) +{ + if (txbufsize != NULL) + *txbufsize = sock->sk->sk_sndbuf; + + if (rxbufsize != NULL) + *rxbufsize = sock->sk->sk_rcvbuf; + + return 0; +} +EXPORT_SYMBOL(lnet_sock_getbuf); + +int +lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port, + int backlog) +{ + int fatal; + int rc; + + rc = lnet_sock_create(sockp, &fatal, local_ip, local_port); + if (rc != 0) { + if (!fatal) + CERROR("Can't create socket: port %d already in use\n", + local_port); + return rc; + } + + rc = kernel_listen(*sockp, backlog); + if (rc == 0) + return 0; + + CERROR("Can't set listen backlog %d: %d\n", backlog, rc); + sock_release(*sockp); + return rc; +} +EXPORT_SYMBOL(lnet_sock_listen); + +int +lnet_sock_accept(struct socket **newsockp, struct socket *sock) +{ + wait_queue_t wait; + struct socket *newsock; + int rc; + + init_waitqueue_entry(&wait, current); + + /* XXX this should add a ref to sock->ops->owner, if + * TCP could be a module */ + rc = sock_create_lite(PF_PACKET, sock->type, IPPROTO_TCP, &newsock); + if (rc) { + CERROR("Can't allocate socket\n"); + return rc; + } + + newsock->ops = sock->ops; + + rc = sock->ops->accept(sock, newsock, O_NONBLOCK); + if (rc == -EAGAIN) { + /* Nothing ready, so wait for activity */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(sk_sleep(sock->sk), &wait); + schedule(); + remove_wait_queue(sk_sleep(sock->sk), &wait); + set_current_state(TASK_RUNNING); + rc = sock->ops->accept(sock, newsock, O_NONBLOCK); + } + + if (rc != 0) + goto failed; + + *newsockp = newsock; + return 0; + +failed: + sock_release(newsock); + return rc; +} +EXPORT_SYMBOL(lnet_sock_accept); + +int +lnet_sock_connect(struct socket **sockp, int *fatal, __u32 local_ip, + int local_port, __u32 peer_ip, int peer_port) +{ + struct sockaddr_in srvaddr; + int rc; + + rc = lnet_sock_create(sockp, fatal, local_ip, local_port); + if (rc != 0) + return rc; + + memset(&srvaddr, 0, sizeof(srvaddr)); + srvaddr.sin_family = AF_INET; + srvaddr.sin_port = htons(peer_port); + srvaddr.sin_addr.s_addr = htonl(peer_ip); + + rc = kernel_connect(*sockp, (struct sockaddr *)&srvaddr, + sizeof(srvaddr), 0); + if (rc == 0) + return 0; + + /* EADDRNOTAVAIL probably means we're already connected to the same + * peer/port on the same local port on a differently typed + * connection. Let our caller retry with a different local + * port... */ + *fatal = !(rc == -EADDRNOTAVAIL); + + CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET, + "Error %d connecting %pI4h/%d -> %pI4h/%d\n", rc, + &local_ip, local_port, &peer_ip, peer_port); + + sock_release(*sockp); + return rc; +} +EXPORT_SYMBOL(lnet_sock_connect); diff --git a/drivers/staging/lustre/lnet/lnet/lo.c b/drivers/staging/lustre/lnet/lnet/lo.c index f708c2e64..2a137f468 100644 --- a/drivers/staging/lustre/lnet/lnet/lo.c +++ b/drivers/staging/lustre/lnet/lnet/lo.c @@ -111,7 +111,7 @@ lnd_t the_lolnd = { /* .lnd_type = */ LOLND, /* .lnd_startup = */ lolnd_startup, /* .lnd_shutdown = */ lolnd_shutdown, - /* .lnt_ctl = */ NULL, + /* .lnt_ctl = */ NULL, /* .lnd_send = */ lolnd_send, /* .lnd_recv = */ lolnd_recv, /* .lnd_eager_recv = */ NULL, diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c index 72b7fbc83..576201a83 100644 --- a/drivers/staging/lustre/lnet/lnet/module.c +++ b/drivers/staging/lustre/lnet/lnet/module.c @@ -47,9 +47,9 @@ static int lnet_configure(void *arg) { /* 'arg' only there so I can be passed to cfs_create_thread() */ - int rc = 0; + int rc = 0; - LNET_MUTEX_LOCK(&lnet_config_mutex); + mutex_lock(&lnet_config_mutex); if (!the_lnet.ln_niinit_self) { rc = LNetNIInit(LUSTRE_SRV_LNET_PID); @@ -59,34 +59,34 @@ lnet_configure(void *arg) } } - LNET_MUTEX_UNLOCK(&lnet_config_mutex); + mutex_unlock(&lnet_config_mutex); return rc; } static int lnet_unconfigure(void) { - int refcount; + int refcount; - LNET_MUTEX_LOCK(&lnet_config_mutex); + mutex_lock(&lnet_config_mutex); if (the_lnet.ln_niinit_self) { the_lnet.ln_niinit_self = 0; LNetNIFini(); } - LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex); + mutex_lock(&the_lnet.ln_api_mutex); refcount = the_lnet.ln_refcount; - LNET_MUTEX_UNLOCK(&the_lnet.ln_api_mutex); + mutex_unlock(&the_lnet.ln_api_mutex); - LNET_MUTEX_UNLOCK(&lnet_config_mutex); + mutex_unlock(&lnet_config_mutex); return (refcount == 0) ? 0 : -EBUSY; } static int lnet_ioctl(unsigned int cmd, struct libcfs_ioctl_data *data) { - int rc; + int rc; switch (cmd) { case IOC_LIBCFS_CONFIGURE: @@ -113,13 +113,13 @@ static DECLARE_IOCTL_HANDLER(lnet_ioctl_handler, lnet_ioctl); static int __init init_lnet(void) { - int rc; + int rc; mutex_init(&lnet_config_mutex); - rc = LNetInit(); + rc = lnet_init(); if (rc != 0) { - CERROR("LNetInit: error %d\n", rc); + CERROR("lnet_init: error %d\n", rc); return rc; } @@ -143,11 +143,11 @@ fini_lnet(void) rc = libcfs_deregister_ioctl(&lnet_ioctl_handler); LASSERT(rc == 0); - LNetFini(); + lnet_fini(); } MODULE_AUTHOR("Peter J. Braam "); -MODULE_DESCRIPTION("Portals v3.1"); +MODULE_DESCRIPTION("LNet v3.1"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); diff --git a/drivers/staging/lustre/lnet/lnet/peer.c b/drivers/staging/lustre/lnet/lnet/peer.c index 45b5742f1..1fceed3c8 100644 --- a/drivers/staging/lustre/lnet/lnet/peer.c +++ b/drivers/staging/lustre/lnet/lnet/peer.c @@ -43,10 +43,10 @@ int lnet_peer_tables_create(void) { - struct lnet_peer_table *ptable; - struct list_head *hash; - int i; - int j; + struct lnet_peer_table *ptable; + struct list_head *hash; + int i; + int j; the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*ptable)); @@ -77,10 +77,10 @@ lnet_peer_tables_create(void) void lnet_peer_tables_destroy(void) { - struct lnet_peer_table *ptable; - struct list_head *hash; - int i; - int j; + struct lnet_peer_table *ptable; + struct list_head *hash; + int i; + int j; if (the_lnet.ln_peer_tables == NULL) return; @@ -106,9 +106,9 @@ lnet_peer_tables_destroy(void) void lnet_peer_tables_cleanup(void) { - struct lnet_peer_table *ptable; - int i; - int j; + struct lnet_peer_table *ptable; + int i; + int j; LASSERT(the_lnet.ln_shutdown); /* i.e. no new peers */ @@ -133,7 +133,7 @@ lnet_peer_tables_cleanup(void) cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) { LIST_HEAD(deathrow); - lnet_peer_t *lp; + lnet_peer_t *lp; lnet_net_lock(i); @@ -186,8 +186,8 @@ lnet_destroy_peer_locked(lnet_peer_t *lp) lnet_peer_t * lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid) { - struct list_head *peers; - lnet_peer_t *lp; + struct list_head *peers; + lnet_peer_t *lp; LASSERT(!the_lnet.ln_shutdown); @@ -205,11 +205,11 @@ lnet_find_peer_locked(struct lnet_peer_table *ptable, lnet_nid_t nid) int lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) { - struct lnet_peer_table *ptable; - lnet_peer_t *lp = NULL; - lnet_peer_t *lp2; - int cpt2; - int rc = 0; + struct lnet_peer_table *ptable; + lnet_peer_t *lp = NULL; + lnet_peer_t *lp2; + int cpt2; + int rc = 0; *lpp = NULL; if (the_lnet.ln_shutdown) /* it's shutting down */ @@ -287,8 +287,8 @@ lnet_nid2peer_locked(lnet_peer_t **lpp, lnet_nid_t nid, int cpt) goto out; } - lp->lp_txcredits = - lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; + lp->lp_txcredits = + lp->lp_mintxcredits = lp->lp_ni->ni_peertxcredits; lp->lp_rtrcredits = lp->lp_minrtrcredits = lnet_peer_buffer_credits(lp->lp_ni); @@ -308,10 +308,10 @@ out: void lnet_debug_peer(lnet_nid_t nid) { - char *aliveness = "NA"; - lnet_peer_t *lp; - int rc; - int cpt; + char *aliveness = "NA"; + lnet_peer_t *lp; + int rc; + int cpt; cpt = lnet_cpt_of_nid(nid); lnet_net_lock(cpt); diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index 8510bae48..4fbae5ef4 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c @@ -24,8 +24,6 @@ #define DEBUG_SUBSYSTEM S_LNET #include "../../include/linux/lnet/lib-lnet.h" -#if defined(LNET_ROUTER) - #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */ #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4) #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */ @@ -70,15 +68,6 @@ lnet_peer_buffer_credits(lnet_ni_t *ni) /* forward ref's */ static int lnet_router_checker(void *); -#else - -int -lnet_peer_buffer_credits(lnet_ni_t *ni) -{ - return 0; -} - -#endif static int check_routers_before_use; module_param(check_routers_before_use, int, 0444); @@ -139,8 +128,8 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, static void lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp) { - int alive; - int notifylnd; + int alive; + int notifylnd; /* Notify only in 1 thread at any time to ensure ordered notification. * NB individual events can be missed; the only guarantee is that you @@ -152,7 +141,7 @@ lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp) lp->lp_notifying = 1; while (lp->lp_notify) { - alive = lp->lp_alive; + alive = lp->lp_alive; notifylnd = lp->lp_notifylnd; lp->lp_notifylnd = 0; @@ -228,9 +217,9 @@ lnet_rtr_decref_locked(lnet_peer_t *lp) lnet_remotenet_t * lnet_find_net_locked(__u32 net) { - lnet_remotenet_t *rnet; - struct list_head *tmp; - struct list_head *rn_list; + lnet_remotenet_t *rnet; + struct list_head *tmp; + struct list_head *rn_list; LASSERT(!the_lnet.ln_shutdown); @@ -276,9 +265,9 @@ static void lnet_shuffle_seed(void) static void lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route) { - unsigned int len = 0; - unsigned int offset = 0; - struct list_head *e; + unsigned int len = 0; + unsigned int offset = 0; + struct list_head *e; lnet_shuffle_seed(); @@ -304,13 +293,13 @@ int lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, unsigned int priority) { - struct list_head *e; - lnet_remotenet_t *rnet; - lnet_remotenet_t *rnet2; - lnet_route_t *route; - lnet_ni_t *ni; - int add_route; - int rc; + struct list_head *e; + lnet_remotenet_t *rnet; + lnet_remotenet_t *rnet2; + lnet_route_t *route; + lnet_ni_t *ni; + int add_route; + int rc; CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n", libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway)); @@ -416,14 +405,14 @@ lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway, int lnet_check_routes(void) { - lnet_remotenet_t *rnet; - lnet_route_t *route; - lnet_route_t *route2; - struct list_head *e1; - struct list_head *e2; - int cpt; - struct list_head *rn_list; - int i; + lnet_remotenet_t *rnet; + lnet_route_t *route; + lnet_route_t *route2; + struct list_head *e1; + struct list_head *e2; + int cpt; + struct list_head *rn_list; + int i; cpt = lnet_net_lock_current(); @@ -434,9 +423,9 @@ lnet_check_routes(void) route2 = NULL; list_for_each(e2, &rnet->lrn_routes) { - lnet_nid_t nid1; - lnet_nid_t nid2; - int net; + lnet_nid_t nid1; + lnet_nid_t nid2; + int net; route = list_entry(e2, lnet_route_t, lr_list); @@ -472,14 +461,14 @@ lnet_check_routes(void) int lnet_del_route(__u32 net, lnet_nid_t gw_nid) { - struct lnet_peer *gateway; - lnet_remotenet_t *rnet; - lnet_route_t *route; - struct list_head *e1; - struct list_head *e2; - int rc = -ENOENT; - struct list_head *rn_list; - int idx = 0; + struct lnet_peer *gateway; + lnet_remotenet_t *rnet; + lnet_route_t *route; + struct list_head *e1; + struct list_head *e2; + int rc = -ENOENT; + struct list_head *rn_list; + int idx = 0; CDEBUG(D_NET, "Del route: net %s : gw %s\n", libcfs_net2str(net), libcfs_nid2str(gw_nid)); @@ -554,13 +543,13 @@ int lnet_get_route(int idx, __u32 *net, __u32 *hops, lnet_nid_t *gateway, __u32 *alive, __u32 *priority) { - struct list_head *e1; - struct list_head *e2; - lnet_remotenet_t *rnet; - lnet_route_t *route; - int cpt; - int i; - struct list_head *rn_list; + struct list_head *e1; + struct list_head *e2; + lnet_remotenet_t *rnet; + lnet_route_t *route; + int cpt; + int i; + struct list_head *rn_list; cpt = lnet_net_lock_current(); @@ -574,11 +563,11 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops, lr_list); if (idx-- == 0) { - *net = rnet->lrn_net; - *hops = route->lr_hops; + *net = rnet->lrn_net; + *hops = route->lr_hops; *priority = route->lr_priority; *gateway = route->lr_gateway->lp_nid; - *alive = route->lr_gateway->lp_alive; + *alive = route->lr_gateway->lp_alive; lnet_net_unlock(cpt); return 0; } @@ -593,7 +582,7 @@ lnet_get_route(int idx, __u32 *net, __u32 *hops, void lnet_swap_pinginfo(lnet_ping_info_t *info) { - int i; + int i; lnet_ni_status_t *stat; __swab32s(&info->pi_magic); @@ -614,9 +603,9 @@ lnet_swap_pinginfo(lnet_ping_info_t *info) static void lnet_parse_rc_info(lnet_rc_data_t *rcd) { - lnet_ping_info_t *info = rcd->rcd_pinginfo; - struct lnet_peer *gw = rcd->rcd_gateway; - lnet_route_t *rtr; + lnet_ping_info_t *info = rcd->rcd_pinginfo; + struct lnet_peer *gw = rcd->rcd_gateway; + lnet_route_t *rtr; if (!gw->lp_alive) return; @@ -643,14 +632,14 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) return; /* can't carry NI status info */ list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) { - int ptl_status = LNET_NI_STATUS_INVALID; - int down = 0; - int up = 0; - int i; + int ptl_status = LNET_NI_STATUS_INVALID; + int down = 0; + int up = 0; + int i; for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) { lnet_ni_status_t *stat = &info->pi_ni[i]; - lnet_nid_t nid = stat->ns_nid; + lnet_nid_t nid = stat->ns_nid; if (nid == LNET_NID_ANY) { CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n", @@ -699,8 +688,8 @@ lnet_parse_rc_info(lnet_rc_data_t *rcd) static void lnet_router_checker_event(lnet_event_t *event) { - lnet_rc_data_t *rcd = event->md.user_ptr; - struct lnet_peer *lp; + lnet_rc_data_t *rcd = event->md.user_ptr; + struct lnet_peer *lp; LASSERT(rcd != NULL); @@ -752,14 +741,14 @@ lnet_router_checker_event(lnet_event_t *event) static void lnet_wait_known_routerstate(void) { - lnet_peer_t *rtr; - struct list_head *entry; - int all_known; + lnet_peer_t *rtr; + struct list_head *entry; + int all_known; LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); for (;;) { - int cpt = lnet_net_lock_current(); + int cpt = lnet_net_lock_current(); all_known = 1; list_for_each(entry, &the_lnet.ln_routers) { @@ -799,9 +788,9 @@ lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net) static void lnet_update_ni_status_locked(void) { - lnet_ni_t *ni; - long now; - int timeout; + lnet_ni_t *ni; + long now; + int timeout; LASSERT(the_lnet.ln_routing); @@ -860,10 +849,10 @@ lnet_destroy_rc_data(lnet_rc_data_t *rcd) static lnet_rc_data_t * lnet_create_rc_data_locked(lnet_peer_t *gateway) { - lnet_rc_data_t *rcd = NULL; - lnet_ping_info_t *pi; - int rc; - int i; + lnet_rc_data_t *rcd = NULL; + lnet_ping_info_t *pi; + int rc; + int i; lnet_net_unlock(gateway->lp_cpt); @@ -943,8 +932,8 @@ static void lnet_ping_router_locked(lnet_peer_t *rtr) { lnet_rc_data_t *rcd = NULL; - unsigned long now = cfs_time_current(); - int secs; + unsigned long now = cfs_time_current(); + int secs; lnet_peer_addref_locked(rtr); @@ -979,9 +968,9 @@ lnet_ping_router_locked(lnet_peer_t *rtr) if (secs != 0 && !rtr->lp_ping_notsent && cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp, cfs_time_seconds(secs)))) { - int rc; + int rc; lnet_process_id_t id; - lnet_handle_md_t mdh; + lnet_handle_md_t mdh; id.nid = rtr->lp_nid; id.pid = LUSTRE_SRV_LNET_PID; @@ -1013,8 +1002,8 @@ lnet_ping_router_locked(lnet_peer_t *rtr) int lnet_router_checker_start(void) { - int rc; - int eqsz; + int rc; + int eqsz; LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN); @@ -1085,11 +1074,11 @@ lnet_router_checker_stop(void) static void lnet_prune_rc_data(int wait_unlink) { - lnet_rc_data_t *rcd; - lnet_rc_data_t *tmp; - lnet_peer_t *lp; - struct list_head head; - int i = 2; + lnet_rc_data_t *rcd; + lnet_rc_data_t *tmp; + lnet_peer_t *lp; + struct list_head head; + int i = 2; if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING && list_empty(&the_lnet.ln_rcd_deathrow) && @@ -1163,23 +1152,20 @@ lnet_prune_rc_data(int wait_unlink) lnet_net_unlock(LNET_LOCK_EX); } - -#if defined(LNET_ROUTER) - static int lnet_router_checker(void *arg) { - lnet_peer_t *rtr; - struct list_head *entry; + lnet_peer_t *rtr; + struct list_head *entry; cfs_block_allsigs(); LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) { - __u64 version; - int cpt; - int cpt2; + __u64 version; + int cpt; + int cpt2; cpt = lnet_net_lock_current(); rescan: @@ -1245,11 +1231,11 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) static lnet_rtrbuf_t * lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) { - int npages = rbp->rbp_npages; - int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); - struct page *page; + int npages = rbp->rbp_npages; + int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); + struct page *page; lnet_rtrbuf_t *rb; - int i; + int i; LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); if (rb == NULL) @@ -1280,9 +1266,9 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) static void lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp) { - int npages = rbp->rbp_npages; - int nbuffers = 0; - lnet_rtrbuf_t *rb; + int npages = rbp->rbp_npages; + int nbuffers = 0; + lnet_rtrbuf_t *rb; if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */ return; @@ -1310,7 +1296,7 @@ static int lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt) { lnet_rtrbuf_t *rb; - int i; + int i; if (rbp->rbp_nbuffers != 0) { LASSERT(rbp->rbp_nbuffers == nbufs); @@ -1355,7 +1341,7 @@ void lnet_rtrpools_free(void) { lnet_rtrbufpool_t *rtrp; - int i; + int i; if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */ return; @@ -1373,7 +1359,7 @@ lnet_rtrpools_free(void) static int lnet_nrb_tiny_calculate(int npages) { - int nrbs = LNET_NRB_TINY; + int nrbs = LNET_NRB_TINY; if (tiny_router_buffers < 0) { LCONSOLE_ERROR_MSG(0x10c, @@ -1392,7 +1378,7 @@ lnet_nrb_tiny_calculate(int npages) static int lnet_nrb_small_calculate(int npages) { - int nrbs = LNET_NRB_SMALL; + int nrbs = LNET_NRB_SMALL; if (small_router_buffers < 0) { LCONSOLE_ERROR_MSG(0x10c, @@ -1411,7 +1397,7 @@ lnet_nrb_small_calculate(int npages) static int lnet_nrb_large_calculate(int npages) { - int nrbs = LNET_NRB_LARGE; + int nrbs = LNET_NRB_LARGE; if (large_router_buffers < 0) { LCONSOLE_ERROR_MSG(0x10c, @@ -1431,13 +1417,13 @@ int lnet_rtrpools_alloc(int im_a_router) { lnet_rtrbufpool_t *rtrp; - int large_pages; - int small_pages = 1; - int nrb_tiny; - int nrb_small; - int nrb_large; - int rc; - int i; + int large_pages; + int small_pages = 1; + int nrb_tiny; + int nrb_small; + int nrb_large; + int rc; + int i; large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; @@ -1507,9 +1493,9 @@ lnet_rtrpools_alloc(int im_a_router) int lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) { - struct lnet_peer *lp = NULL; - unsigned long now = cfs_time_current(); - int cpt = lnet_cpt_of_nid(nid); + struct lnet_peer *lp = NULL; + unsigned long now = cfs_time_current(); + int cpt = lnet_cpt_of_nid(nid); LASSERT(!in_interrupt ()); @@ -1573,134 +1559,3 @@ lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) return 0; } EXPORT_SYMBOL(lnet_notify); - -void -lnet_get_tunables(void) -{ -} - -#else - -int -lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, unsigned long when) -{ - return -EOPNOTSUPP; -} - -void -lnet_router_checker(void) -{ - static time_t last; - static int running; - - time_t now = get_seconds(); - int interval = now - last; - int rc; - __u64 version; - lnet_peer_t *rtr; - - /* It's no use to call me again within a sec - all intervals and - * timeouts are measured in seconds */ - if (last != 0 && interval < 2) - return; - - if (last != 0 && - interval > max(live_router_check_interval, - dead_router_check_interval)) - CNETERR("Checker(%d/%d) not called for %d seconds\n", - live_router_check_interval, dead_router_check_interval, - interval); - - LASSERT(LNET_CPT_NUMBER == 1); - - lnet_net_lock(0); - LASSERT(!running); /* recursion check */ - running = 1; - lnet_net_unlock(0); - - last = now; - - if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) - lnet_prune_rc_data(0); /* unlink all rcd and nowait */ - - /* consume all pending events */ - while (1) { - int i; - lnet_event_t ev; - - /* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the - * recursion breaker in LNetEQPoll would fail */ - rc = LNetEQPoll(&the_lnet.ln_rc_eqh, 1, 0, &ev, &i); - if (rc == 0) /* no event pending */ - break; - - /* NB a lost SENT prevents me from pinging a router again */ - if (rc == -EOVERFLOW) { - CERROR("Dropped an event!!!\n"); - abort(); - } - - LASSERT(rc == 1); - - lnet_router_checker_event(&ev); - } - - if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) { - lnet_prune_rc_data(1); /* release rcd */ - the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN; - running = 0; - return; - } - - LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING); - - lnet_net_lock(0); - - version = the_lnet.ln_routers_version; - list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) { - lnet_ping_router_locked(rtr); - LASSERT(version == the_lnet.ln_routers_version); - } - - lnet_net_unlock(0); - - running = 0; /* lock only needed for the recursion check */ -} - -/* NB lnet_peers_start_down depends on me, - * so must be called before any peer creation */ -void -lnet_get_tunables(void) -{ - char *s; - - s = getenv("LNET_ROUTER_PING_TIMEOUT"); - if (s != NULL) - router_ping_timeout = atoi(s); - - s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL"); - if (s != NULL) - live_router_check_interval = atoi(s); - - s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL"); - if (s != NULL) - dead_router_check_interval = atoi(s); - - /* This replaces old lnd_notify mechanism */ - check_routers_before_use = 1; - if (dead_router_check_interval <= 0) - dead_router_check_interval = 30; -} - -void -lnet_rtrpools_free(void) -{ -} - -int -lnet_rtrpools_alloc(int im_a_arouter) -{ - return 0; -} - -#endif diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c index c055afc86..ee902dc43 100644 --- a/drivers/staging/lustre/lnet/lnet/router_proc.c +++ b/drivers/staging/lustre/lnet/lnet/router_proc.c @@ -112,11 +112,11 @@ static int proc_call_handler(void *data, int write, loff_t *ppos, static int __proc_lnet_stats(void *data, int write, loff_t pos, void __user *buffer, int nob) { - int rc; + int rc; lnet_counters_t *ctrs; - int len; - char *tmpstr; - const int tmpsiz = 256; /* 7 %u and 4 %llu */ + int len; + char *tmpstr; + const int tmpsiz = 256; /* 7 %u and 4 %llu */ if (write) { lnet_counters_reset(); @@ -167,13 +167,13 @@ static int proc_lnet_stats(struct ctl_table *table, int write, static int proc_lnet_routes(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - const int tmpsiz = 256; - char *tmpstr; - char *s; - int rc = 0; - int len; - int ver; - int off; + const int tmpsiz = 256; + char *tmpstr; + char *s; + int rc = 0; + int len; + int ver; + int off; CLASSERT(sizeof(loff_t) >= 4); @@ -205,13 +205,13 @@ static int proc_lnet_routes(struct ctl_table *table, int write, lnet_net_unlock(0); *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); } else { - struct list_head *n; - struct list_head *r; - lnet_route_t *route = NULL; - lnet_remotenet_t *rnet = NULL; - int skip = off - 1; - struct list_head *rn_list; - int i; + struct list_head *n; + struct list_head *r; + lnet_route_t *route = NULL; + lnet_remotenet_t *rnet = NULL; + int skip = off - 1; + struct list_head *rn_list; + int i; lnet_net_lock(0); @@ -251,11 +251,11 @@ static int proc_lnet_routes(struct ctl_table *table, int write, } if (route != NULL) { - __u32 net = rnet->lrn_net; - unsigned int hops = route->lr_hops; - unsigned int priority = route->lr_priority; - lnet_nid_t nid = route->lr_gateway->lp_nid; - int alive = route->lr_gateway->lp_alive; + __u32 net = rnet->lrn_net; + unsigned int hops = route->lr_hops; + unsigned int priority = route->lr_priority; + lnet_nid_t nid = route->lr_gateway->lp_nid; + int alive = route->lr_gateway->lp_alive; s += snprintf(s, tmpstr + tmpsiz - s, "%-8s %4u %8u %7s %s\n", @@ -293,13 +293,13 @@ static int proc_lnet_routes(struct ctl_table *table, int write, static int proc_lnet_routers(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int rc = 0; - char *tmpstr; - char *s; - const int tmpsiz = 256; - int len; - int ver; - int off; + int rc = 0; + char *tmpstr; + char *s; + const int tmpsiz = 256; + int len; + int ver; + int off; off = LNET_PROC_HOFF_GET(*ppos); ver = LNET_PROC_VER_GET(*ppos); @@ -328,9 +328,9 @@ static int proc_lnet_routers(struct ctl_table *table, int write, lnet_net_unlock(0); *ppos = LNET_PROC_POS_MAKE(0, ver, 0, off); } else { - struct list_head *r; - struct lnet_peer *peer = NULL; - int skip = off - 1; + struct list_head *r; + struct lnet_peer *peer = NULL; + int skip = off - 1; lnet_net_lock(0); @@ -360,14 +360,14 @@ static int proc_lnet_routers(struct ctl_table *table, int write, lnet_nid_t nid = peer->lp_nid; unsigned long now = cfs_time_current(); unsigned long deadline = peer->lp_ping_deadline; - int nrefs = peer->lp_refcount; - int nrtrrefs = peer->lp_rtr_refcount; + int nrefs = peer->lp_refcount; + int nrtrrefs = peer->lp_rtr_refcount; int alive_cnt = peer->lp_alive_count; - int alive = peer->lp_alive; - int pingsent = !peer->lp_ping_notsent; + int alive = peer->lp_alive; + int pingsent = !peer->lp_ping_notsent; int last_ping = cfs_duration_sec(cfs_time_sub(now, peer->lp_ping_timestamp)); - int down_ni = 0; + int down_ni = 0; lnet_route_t *rtr; if ((peer->lp_ping_feats & @@ -428,16 +428,16 @@ static int proc_lnet_routers(struct ctl_table *table, int write, static int proc_lnet_peers(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - const int tmpsiz = 256; - struct lnet_peer_table *ptable; - char *tmpstr; - char *s; - int cpt = LNET_PROC_CPT_GET(*ppos); - int ver = LNET_PROC_VER_GET(*ppos); - int hash = LNET_PROC_HASH_GET(*ppos); - int hoff = LNET_PROC_HOFF_GET(*ppos); - int rc = 0; - int len; + const int tmpsiz = 256; + struct lnet_peer_table *ptable; + char *tmpstr; + char *s; + int cpt = LNET_PROC_CPT_GET(*ppos); + int ver = LNET_PROC_VER_GET(*ppos); + int hash = LNET_PROC_HASH_GET(*ppos); + int hoff = LNET_PROC_HOFF_GET(*ppos); + int rc = 0; + int len; CLASSERT(LNET_PROC_HASH_BITS >= LNET_PEER_HASH_BITS); LASSERT(!write); @@ -465,9 +465,9 @@ static int proc_lnet_peers(struct ctl_table *table, int write, hoff++; } else { - struct lnet_peer *peer; - struct list_head *p; - int skip; + struct lnet_peer *peer; + struct list_head *p; + int skip; again: p = NULL; peer = NULL; @@ -521,23 +521,23 @@ static int proc_lnet_peers(struct ctl_table *table, int write, } if (peer != NULL) { - lnet_nid_t nid = peer->lp_nid; - int nrefs = peer->lp_refcount; - int lastalive = -1; - char *aliveness = "NA"; - int maxcr = peer->lp_ni->ni_peertxcredits; - int txcr = peer->lp_txcredits; - int mintxcr = peer->lp_mintxcredits; - int rtrcr = peer->lp_rtrcredits; - int minrtrcr = peer->lp_minrtrcredits; - int txqnob = peer->lp_txqnob; + lnet_nid_t nid = peer->lp_nid; + int nrefs = peer->lp_refcount; + int lastalive = -1; + char *aliveness = "NA"; + int maxcr = peer->lp_ni->ni_peertxcredits; + int txcr = peer->lp_txcredits; + int mintxcr = peer->lp_mintxcredits; + int rtrcr = peer->lp_rtrcredits; + int minrtrcr = peer->lp_minrtrcredits; + int txqnob = peer->lp_txqnob; if (lnet_isrouter(peer) || lnet_peer_aliveness_enabled(peer)) aliveness = peer->lp_alive ? "up" : "down"; if (lnet_peer_aliveness_enabled(peer)) { - unsigned long now = cfs_time_current(); + unsigned long now = cfs_time_current(); long delta; delta = cfs_time_sub(now, peer->lp_last_alive); @@ -595,13 +595,13 @@ static int proc_lnet_peers(struct ctl_table *table, int write, static int __proc_lnet_buffers(void *data, int write, loff_t pos, void __user *buffer, int nob) { - char *s; - char *tmpstr; - int tmpsiz; - int idx; - int len; - int rc; - int i; + char *s; + char *tmpstr; + int tmpsiz; + int idx; + int len; + int rc; + int i; LASSERT(!write); @@ -660,11 +660,11 @@ static int proc_lnet_buffers(struct ctl_table *table, int write, static int proc_lnet_nis(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { - int tmpsiz = 128 * LNET_CPT_NUMBER; - int rc = 0; - char *tmpstr; - char *s; - int len; + int tmpsiz = 128 * LNET_CPT_NUMBER; + int rc = 0; + char *tmpstr; + char *s; + int len; LASSERT(!write); @@ -684,9 +684,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write, "rtr", "max", "tx", "min"); LASSERT(tmpstr + tmpsiz - s > 0); } else { - struct list_head *n; - lnet_ni_t *ni = NULL; - int skip = *ppos - 1; + struct list_head *n; + lnet_ni_t *ni = NULL; + int skip = *ppos - 1; lnet_net_lock(0); @@ -705,12 +705,12 @@ static int proc_lnet_nis(struct ctl_table *table, int write, } if (ni != NULL) { - struct lnet_tx_queue *tq; - char *stat; - long now = get_seconds(); - int last_alive = -1; - int i; - int j; + struct lnet_tx_queue *tq; + char *stat; + long now = get_seconds(); + int last_alive = -1; + int i; + int j; if (the_lnet.ln_routing) last_alive = now - ni->ni_last_alive; @@ -777,9 +777,9 @@ static int proc_lnet_nis(struct ctl_table *table, int write, } struct lnet_portal_rotors { - int pr_value; - const char *pr_name; - const char *pr_desc; + int pr_value; + const char *pr_name; + const char *pr_desc; }; static struct lnet_portal_rotors portal_rotors[] = { @@ -815,11 +815,11 @@ extern int portal_rotor; static int __proc_lnet_portal_rotor(void *data, int write, loff_t pos, void __user *buffer, int nob) { - const int buf_len = 128; - char *buf; - char *tmp; - int rc; - int i; + const int buf_len = 128; + char *buf; + char *tmp; + int rc; + int i; LIBCFS_ALLOC(buf, buf_len); if (buf == NULL) @@ -887,38 +887,38 @@ static struct ctl_table lnet_table[] = { * to go via /proc for portability. */ { - .procname = "stats", - .mode = 0644, + .procname = "stats", + .mode = 0644, .proc_handler = &proc_lnet_stats, }, { - .procname = "routes", - .mode = 0444, + .procname = "routes", + .mode = 0444, .proc_handler = &proc_lnet_routes, }, { - .procname = "routers", - .mode = 0444, + .procname = "routers", + .mode = 0444, .proc_handler = &proc_lnet_routers, }, { - .procname = "peers", - .mode = 0444, + .procname = "peers", + .mode = 0444, .proc_handler = &proc_lnet_peers, }, { - .procname = "buffers", - .mode = 0444, + .procname = "buffers", + .mode = 0444, .proc_handler = &proc_lnet_buffers, }, { - .procname = "nis", - .mode = 0444, + .procname = "nis", + .mode = 0444, .proc_handler = &proc_lnet_nis, }, { - .procname = "portal_rotor", - .mode = 0644, + .procname = "portal_rotor", + .mode = 0644, .proc_handler = &proc_lnet_portal_rotor, }, { diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index 658f4584f..de11f1bc8 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -91,7 +91,7 @@ brw_client_init(sfw_test_instance_t *tsi) len = npg * PAGE_CACHE_SIZE; } else { - test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; + test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; /* I should never get this step if it's unknown feature * because make_session will reject unknown feature */ @@ -223,7 +223,7 @@ bad_data: static void brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) { - int i; + int i; struct page *pg; for (i = 0; i < bk->bk_niov; i++) { @@ -235,7 +235,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) static int brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic) { - int i; + int i; struct page *pg; for (i = 0; i < bk->bk_niov; i++) { @@ -254,16 +254,16 @@ static int brw_client_prep_rpc(sfw_test_unit_t *tsu, lnet_process_id_t dest, srpc_client_rpc_t **rpcpp) { - srpc_bulk_t *bulk = tsu->tsu_private; + srpc_bulk_t *bulk = tsu->tsu_private; sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - srpc_client_rpc_t *rpc; - srpc_brw_reqst_t *req; - int flags; - int npg; - int len; - int opc; - int rc; + sfw_session_t *sn = tsi->tsi_batch->bat_session; + srpc_client_rpc_t *rpc; + srpc_brw_reqst_t *req; + int flags; + int npg; + int len; + int opc; + int rc; LASSERT(sn != NULL); LASSERT(bulk != NULL); @@ -277,7 +277,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, len = npg * PAGE_CACHE_SIZE; } else { - test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; + test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; /* I should never get this step if it's unknown feature * because make_session will reject unknown feature */ @@ -311,12 +311,12 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, static void brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) { - __u64 magic = BRW_MAGIC; + __u64 magic = BRW_MAGIC; sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - srpc_msg_t *msg = &rpc->crpc_replymsg; - srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; - srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; + sfw_session_t *sn = tsi->tsi_batch->bat_session; + srpc_msg_t *msg = &rpc->crpc_replymsg; + srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; + srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst; LASSERT(sn != NULL); @@ -380,10 +380,10 @@ brw_server_rpc_done(srpc_server_rpc_t *rpc) static int brw_bulk_ready(srpc_server_rpc_t *rpc, int status) { - __u64 magic = BRW_MAGIC; + __u64 magic = BRW_MAGIC; srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply; srpc_brw_reqst_t *reqst; - srpc_msg_t *reqstmsg; + srpc_msg_t *reqstmsg; LASSERT(rpc->srpc_bulk != NULL); LASSERT(rpc->srpc_reqstbuf != NULL); @@ -416,13 +416,13 @@ brw_bulk_ready(srpc_server_rpc_t *rpc, int status) static int brw_server_handle(struct srpc_server_rpc *rpc) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - srpc_msg_t *replymsg = &rpc->srpc_replymsg; - srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; + srpc_msg_t *replymsg = &rpc->srpc_replymsg; + srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply; srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst; - int npg; - int rc; + int npg; + int rc; LASSERT(sv->sv_id == SRPC_SERVICE_BRW); @@ -490,17 +490,17 @@ brw_server_handle(struct srpc_server_rpc *rpc) sfw_test_client_ops_t brw_test_client; void brw_init_test_client(void) { - brw_test_client.tso_init = brw_client_init; - brw_test_client.tso_fini = brw_client_fini; - brw_test_client.tso_prep_rpc = brw_client_prep_rpc; - brw_test_client.tso_done_rpc = brw_client_done_rpc; + brw_test_client.tso_init = brw_client_init; + brw_test_client.tso_fini = brw_client_fini; + brw_test_client.tso_prep_rpc = brw_client_prep_rpc; + brw_test_client.tso_done_rpc = brw_client_done_rpc; }; srpc_service_t brw_test_service; void brw_init_test_service(void) { - brw_test_service.sv_id = SRPC_SERVICE_BRW; + brw_test_service.sv_id = SRPC_SERVICE_BRW; brw_test_service.sv_name = "brw_test"; brw_test_service.sv_handler = brw_server_handle; brw_test_service.sv_bulk_ready = brw_bulk_ready; diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index 045fe295a..1a7870e91 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -48,11 +48,11 @@ static int lst_session_new_ioctl(lstio_session_new_args_t *args) { - char *name; - int rc; + char *name; + int rc; if (args->lstio_ses_idp == NULL || /* address for output sid */ - args->lstio_ses_key == 0 || /* no key is specified */ + args->lstio_ses_key == 0 || /* no key is specified */ args->lstio_ses_namep == NULL || /* session name */ args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) @@ -96,12 +96,12 @@ lst_session_info_ioctl(lstio_session_info_args_t *args) { /* no checking of key */ - if (args->lstio_ses_idp == NULL || /* address for output sid */ - args->lstio_ses_keyp == NULL || /* address for output key */ + if (args->lstio_ses_idp == NULL || /* address for output sid */ + args->lstio_ses_keyp == NULL || /* address for output key */ args->lstio_ses_featp == NULL || /* address for output features */ args->lstio_ses_ndinfo == NULL || /* address for output ndinfo */ - args->lstio_ses_namep == NULL || /* address for output name */ - args->lstio_ses_nmlen <= 0 || + args->lstio_ses_namep == NULL || /* address for output name */ + args->lstio_ses_nmlen <= 0 || args->lstio_ses_nmlen > LST_NAME_SIZE) return -EINVAL; @@ -197,8 +197,8 @@ out: static int lst_group_add_ioctl(lstio_group_add_args_t *args) { - char *name; - int rc; + char *name; + int rc; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; @@ -324,8 +324,8 @@ static int lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) { unsigned feats; - int rc; - char *name; + int rc; + char *name; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; @@ -385,10 +385,10 @@ lst_group_list_ioctl(lstio_group_list_args_t *args) static int lst_group_info_ioctl(lstio_group_info_args_t *args) { - char *name; - int ndent; - int index; - int rc; + char *name; + int ndent; + int index; + int rc; if (args->lstio_grp_key != console_session.ses_key) return -EACCES; @@ -449,8 +449,8 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) static int lst_batch_add_ioctl(lstio_batch_add_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_bat_key != console_session.ses_key) return -EACCES; @@ -483,8 +483,8 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args) static int lst_batch_run_ioctl(lstio_batch_run_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_bat_key != console_session.ses_key) return -EACCES; @@ -518,8 +518,8 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args) static int lst_batch_stop_ioctl(lstio_batch_stop_args_t *args) { - int rc; - char *name; + int rc; + char *name; if (args->lstio_bat_key != console_session.ses_key) return -EACCES; @@ -613,10 +613,10 @@ lst_batch_list_ioctl(lstio_batch_list_args_t *args) static int lst_batch_info_ioctl(lstio_batch_info_args_t *args) { - char *name; - int rc; - int index; - int ndent; + char *name; + int rc; + int index; + int ndent; if (args->lstio_bat_key != console_session.ses_key) return -EACCES; @@ -678,8 +678,8 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) static int lst_stat_query_ioctl(lstio_stat_args_t *args) { - int rc; - char *name; + int rc; + char *name; /* TODO: not finished */ if (args->lstio_sta_key != console_session.ses_key) diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index 77f02b761..a1a4e08f7 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -117,8 +117,8 @@ static int lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats, int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp) { - lstcon_rpc_t *crpc = NULL; - int rc; + lstcon_rpc_t *crpc = NULL; + int rc; spin_lock(&console_session.ses_rpc_lock); @@ -151,7 +151,7 @@ void lstcon_rpc_put(lstcon_rpc_t *crpc) { srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk; - int i; + int i; LASSERT(list_empty(&crpc->crp_link)); @@ -336,8 +336,8 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans) int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) { - lstcon_rpc_t *crpc; - int rc; + lstcon_rpc_t *crpc; + int rc; if (list_empty(&trans->tas_rpcs_list)) return 0; @@ -386,8 +386,8 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout) static int lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) { - lstcon_node_t *nd = crpc->crp_node; - srpc_client_rpc_t *rpc = crpc->crp_rpc; + lstcon_node_t *nd = crpc->crp_node; + srpc_client_rpc_t *rpc = crpc->crp_rpc; srpc_generic_reply_t *rep; LASSERT(nd != NULL && rpc != NULL); @@ -423,9 +423,9 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp) void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat) { - lstcon_rpc_t *crpc; - srpc_msg_t *rep; - int error; + lstcon_rpc_t *crpc; + srpc_msg_t *rep; + int error; LASSERT(stat != NULL); @@ -470,16 +470,16 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, struct list_head *head_up, lstcon_rpc_readent_func_t readent) { - struct list_head tmp; - struct list_head *next; - lstcon_rpc_ent_t *ent; + struct list_head tmp; + struct list_head *next; + lstcon_rpc_ent_t *ent; srpc_generic_reply_t *rep; - lstcon_rpc_t *crpc; - srpc_msg_t *msg; - lstcon_node_t *nd; - long dur; - struct timeval tv; - int error; + lstcon_rpc_t *crpc; + srpc_msg_t *msg; + lstcon_node_t *nd; + long dur; + struct timeval tv; + int error; LASSERT(head_up != NULL); @@ -544,9 +544,9 @@ void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans) { srpc_client_rpc_t *rpc; - lstcon_rpc_t *crpc; - lstcon_rpc_t *tmp; - int count = 0; + lstcon_rpc_t *crpc; + lstcon_rpc_t *tmp; + int count = 0; list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) { @@ -601,7 +601,7 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop, { srpc_mksn_reqst_t *msrq; srpc_rmsn_reqst_t *rsrq; - int rc; + int rc; switch (transop) { case LST_TRANS_SESNEW: @@ -638,7 +638,7 @@ int lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc) { srpc_debug_reqst_t *drq; - int rc; + int rc; rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc); if (rc != 0) @@ -707,7 +707,7 @@ static lnet_process_id_packed_t * lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov) { lnet_process_id_packed_t *pid; - int i; + int i; i = idx / SFW_ID_PER_PAGE; @@ -723,11 +723,11 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx, int dist, int span, int nkiov, lnet_kiov_t *kiov) { lnet_process_id_packed_t *pid; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; - int start; - int end; - int i = 0; + lstcon_ndlink_t *ndl; + lstcon_node_t *nd; + int start; + int end; + int i = 0; LASSERT(dist >= 1); LASSERT(span >= 1); @@ -777,8 +777,8 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req) { test_ping_req_t *prq = &req->tsr_u.ping; - prq->png_size = param->png_size; - prq->png_flags = param->png_flags; + prq->png_size = param->png_size; + prq->png_flags = param->png_flags; /* TODO dest */ return 0; } @@ -788,9 +788,10 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) { test_bulk_req_t *brq = &req->tsr_u.bulk_v0; - brq->blk_opc = param->blk_opc; - brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE; - brq->blk_flags = param->blk_flags; + brq->blk_opc = param->blk_opc; + brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + brq->blk_flags = param->blk_flags; return 0; } @@ -816,7 +817,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, lstcon_group_t *dgrp = test->tes_dst_grp; srpc_test_reqst_t *trq; srpc_bulk_t *bulk; - int i; + int i; int npg = 0; int nob = 0; int rc = 0; @@ -835,8 +836,10 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, trq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.tes_reqst; if (transop == LST_TRANS_TSBSRVADD) { - int ndist = (sgrp->grp_nnode + test->tes_dist - 1) / test->tes_dist; - int nspan = (dgrp->grp_nnode + test->tes_span - 1) / test->tes_span; + int ndist = (sgrp->grp_nnode + test->tes_dist - 1) / + test->tes_dist; + int nspan = (dgrp->grp_nnode + test->tes_span - 1) / + test->tes_span; int nmax = (ndist + nspan - 1) / nspan; trq->tsr_ndest = 0; @@ -851,7 +854,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, LASSERT(nob > 0); len = (feats & LST_FEAT_BULK_LEN) == 0 ? - PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE); + PAGE_CACHE_SIZE : + min_t(int, nob, PAGE_CACHE_SIZE); nob -= len; bulk->bk_iovs[i].kiov_offset = 0; @@ -883,8 +887,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, trq->tsr_loop = test->tes_loop; } - trq->tsr_sid = console_session.ses_id; - trq->tsr_bid = test->tes_hdr.tsb_id; + trq->tsr_sid = console_session.ses_id; + trq->tsr_bid = test->tes_hdr.tsb_id; trq->tsr_concur = test->tes_concur; trq->tsr_is_client = (transop == LST_TRANS_TSBCLIADD) ? 1 : 0; trq->tsr_stop_onerr = !!test->tes_stop_onerr; @@ -966,7 +970,7 @@ lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg, srpc_batch_reply_t *bat_rep; srpc_test_reply_t *test_rep; srpc_stat_reply_t *stat_rep; - int rc = 0; + int rc = 0; switch (trans->tas_opc) { case LST_TRANS_SESNEW: @@ -1084,11 +1088,11 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, lstcon_rpc_trans_t **transpp) { lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; - lstcon_rpc_t *rpc; - unsigned feats; - int rc; + lstcon_ndlink_t *ndl; + lstcon_node_t *nd; + lstcon_rpc_t *rpc; + unsigned feats; + int rc; /* Creating session RPG for list of nodes */ @@ -1165,16 +1169,16 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist, static void lstcon_rpc_pinger(void *arg) { - stt_timer_t *ptimer = (stt_timer_t *)arg; + stt_timer_t *ptimer = (stt_timer_t *)arg; lstcon_rpc_trans_t *trans; - lstcon_rpc_t *crpc; - srpc_msg_t *rep; + lstcon_rpc_t *crpc; + srpc_msg_t *rep; srpc_debug_reqst_t *drq; - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; - time_t intv; - int count = 0; - int rc; + lstcon_ndlink_t *ndl; + lstcon_node_t *nd; + time_t intv; + int count = 0; + int rc; /* RPC pinger is a special case of transaction, * it's called by timer at 8 seconds interval. @@ -1283,8 +1287,8 @@ lstcon_rpc_pinger(void *arg) int lstcon_rpc_pinger_start(void) { - stt_timer_t *ptimer; - int rc; + stt_timer_t *ptimer; + int rc; LASSERT(list_empty(&console_session.ses_rpc_freelist)); LASSERT(atomic_read(&console_session.ses_rpc_counter) == 0); @@ -1324,9 +1328,9 @@ void lstcon_rpc_cleanup_wait(void) { lstcon_rpc_trans_t *trans; - lstcon_rpc_t *crpc; - struct list_head *pacer; - struct list_head zlist; + lstcon_rpc_t *crpc; + struct list_head *pacer; + struct list_head zlist; /* Called with hold of global mutex */ diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h index 2353889c6..7d33cf9e9 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.h +++ b/drivers/staging/lustre/lnet/selftest/conrpc.h @@ -64,31 +64,29 @@ struct lstcon_test; struct lstcon_node; typedef struct lstcon_rpc { - struct list_head crp_link; /* chain on rpc transaction */ + struct list_head crp_link; /* chain on rpc transaction */ srpc_client_rpc_t *crp_rpc; /* client rpc */ - struct lstcon_node *crp_node; /* destination node */ + struct lstcon_node *crp_node; /* destination node */ struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */ - unsigned int crp_posted:1; /* rpc is posted */ - unsigned int crp_finished:1; /* rpc is finished */ - unsigned int crp_unpacked:1; /* reply is unpacked */ + unsigned int crp_posted:1; /* rpc is posted */ + unsigned int crp_finished:1; /* rpc is finished */ + unsigned int crp_unpacked:1; /* reply is unpacked */ /** RPC is embedded in other structure and can't free it */ - unsigned int crp_embedded:1; - int crp_status; /* console rpc errors */ - unsigned long crp_stamp; /* replied time stamp */ + unsigned int crp_embedded:1; + int crp_status; /* console rpc errors */ + unsigned long crp_stamp; /* replied time stamp */ } lstcon_rpc_t; typedef struct lstcon_rpc_trans { - struct list_head tas_olink; /* link chain on owner list */ - struct list_head tas_link; /* link chain on global list */ - int tas_opc; /* operation code of transaction */ - /* features mask is uptodate */ - unsigned tas_feats_updated; - /* test features mask */ - unsigned tas_features; - wait_queue_head_t tas_waitq; /* wait queue head */ - atomic_t tas_remaining; /* # of un-scheduled rpcs */ - struct list_head tas_rpcs_list; /* queued requests */ + struct list_head tas_olink; /* link chain on owner list */ + struct list_head tas_link; /* link chain on global list */ + int tas_opc; /* operation code of transaction */ + unsigned tas_feats_updated; /* features mask is uptodate */ + unsigned tas_features; /* test features mask */ + wait_queue_head_t tas_waitq; /* wait queue head */ + atomic_t tas_remaining; /* # of un-scheduled rpcs */ + struct list_head tas_rpcs_list; /* queued requests */ } lstcon_rpc_trans_t; #define LST_TRANS_PRIVATE 0x1000 diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c index 2b5f53c7a..f47c8f27f 100644 --- a/drivers/staging/lustre/lnet/selftest/console.c +++ b/drivers/staging/lustre/lnet/selftest/console.c @@ -59,7 +59,7 @@ do { \ (p)->nle_nnode++; \ } while (0) -lstcon_session_t console_session; +lstcon_session_t console_session; static void lstcon_node_get(lstcon_node_t *nd) @@ -73,7 +73,7 @@ static int lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) { lstcon_ndlink_t *ndl; - unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; + unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE; LASSERT(id.nid != LNET_NID_ANY); @@ -117,7 +117,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create) static void lstcon_node_put(lstcon_node_t *nd) { - lstcon_ndlink_t *ndl; + lstcon_ndlink_t *ndl; LASSERT(nd->nd_ref > 0); @@ -140,10 +140,10 @@ static int lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create) { - unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; + unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE; lstcon_ndlink_t *ndl; - lstcon_node_t *nd; - int rc; + lstcon_node_t *nd; + int rc; if (id.nid == LNET_NID_ANY) return -EINVAL; @@ -197,7 +197,7 @@ static int lstcon_group_alloc(char *name, lstcon_group_t **grpp) { lstcon_group_t *grp; - int i; + int i; LIBCFS_ALLOC(grp, offsetof(lstcon_group_t, grp_ndl_hash[LST_NODE_HASHSIZE])); @@ -243,7 +243,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep) static void lstcon_group_decref(lstcon_group_t *grp) { - int i; + int i; if (--grp->grp_ref > 0) return; @@ -264,7 +264,7 @@ lstcon_group_decref(lstcon_group_t *grp) static int lstcon_group_find(const char *name, lstcon_group_t **grpp) { - lstcon_group_t *grp; + lstcon_group_t *grp; list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) { if (strncmp(grp->grp_name, name, LST_NAME_SIZE) != 0) @@ -288,7 +288,7 @@ static int lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create) { - int rc; + int rc; rc = lstcon_ndlink_find(&grp->grp_ndl_hash[0], id, ndlpp, create); if (rc != 0) @@ -404,12 +404,12 @@ lstcon_group_nodes_add(lstcon_group_t *grp, int count, lnet_process_id_t *ids_up, unsigned *featp, struct list_head *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; - lstcon_group_t *tmp; - lnet_process_id_t id; - int i; - int rc; + lstcon_rpc_trans_t *trans; + lstcon_ndlink_t *ndl; + lstcon_group_t *tmp; + lnet_process_id_t id; + int i; + int rc; rc = lstcon_group_alloc(NULL, &tmp); if (rc != 0) { @@ -471,12 +471,12 @@ lstcon_group_nodes_remove(lstcon_group_t *grp, int count, lnet_process_id_t *ids_up, struct list_head *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_ndlink_t *ndl; - lstcon_group_t *tmp; - lnet_process_id_t id; - int rc; - int i; + lstcon_rpc_trans_t *trans; + lstcon_ndlink_t *ndl; + lstcon_group_t *tmp; + lnet_process_id_t id; + int rc; + int i; /* End session and remove node from the group */ @@ -525,7 +525,7 @@ int lstcon_group_add(char *name) { lstcon_group_t *grp; - int rc; + int rc; rc = (lstcon_group_find(name, &grp) == 0)? -EEXIST: 0; if (rc != 0) { @@ -549,8 +549,8 @@ int lstcon_nodes_add(char *name, int count, lnet_process_id_t *ids_up, unsigned *featp, struct list_head *result_up) { - lstcon_group_t *grp; - int rc; + lstcon_group_t *grp; + int rc; LASSERT(count > 0); LASSERT(ids_up != NULL); @@ -580,8 +580,8 @@ int lstcon_group_del(char *name) { lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; - int rc; + lstcon_group_t *grp; + int rc; rc = lstcon_group_find(name, &grp); if (rc != 0) { @@ -621,7 +621,7 @@ int lstcon_group_clean(char *name, int args) { lstcon_group_t *grp = NULL; - int rc; + int rc; rc = lstcon_group_find(name, &grp); if (rc != 0) { @@ -654,7 +654,7 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t *ids_up, struct list_head *result_up) { lstcon_group_t *grp = NULL; - int rc; + int rc; rc = lstcon_group_find(name, &grp); if (rc != 0) { @@ -682,9 +682,9 @@ lstcon_nodes_remove(char *name, int count, int lstcon_group_refresh(char *name, struct list_head *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; - int rc; + lstcon_rpc_trans_t *trans; + lstcon_group_t *grp; + int rc; rc = lstcon_group_find(name, &grp); if (rc != 0) { @@ -743,10 +743,10 @@ static int lstcon_nodes_getent(struct list_head *head, int *index_p, int *count_p, lstcon_node_ent_t *dents_up) { - lstcon_ndlink_t *ndl; - lstcon_node_t *nd; - int count = 0; - int index = 0; + lstcon_ndlink_t *ndl; + lstcon_node_t *nd; + int count = 0; + int index = 0; LASSERT(index_p != NULL && count_p != NULL); LASSERT(dents_up != NULL); @@ -784,9 +784,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, int *index_p, int *count_p, lstcon_node_ent_t *dents_up) { lstcon_ndlist_ent_t *gentp; - lstcon_group_t *grp; - lstcon_ndlink_t *ndl; - int rc; + lstcon_group_t *grp; + lstcon_ndlink_t *ndl; + int rc; rc = lstcon_group_find(name, &grp); if (rc != 0) { @@ -828,7 +828,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, static int lstcon_batch_find(const char *name, lstcon_batch_t **batpp) { - lstcon_batch_t *bat; + lstcon_batch_t *bat; list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) { if (strncmp(bat->bat_name, name, LST_NAME_SIZE) == 0) { @@ -843,9 +843,9 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp) int lstcon_batch_add(char *name) { - lstcon_batch_t *bat; - int i; - int rc; + lstcon_batch_t *bat; + int i; + int rc; rc = (lstcon_batch_find(name, &bat) == 0)? -EEXIST: 0; if (rc != 0) { @@ -903,7 +903,7 @@ lstcon_batch_add(char *name) int lstcon_batch_list(int index, int len, char *name_up) { - lstcon_batch_t *bat; + lstcon_batch_t *bat; LASSERT(name_up != NULL); LASSERT(index >= 0); @@ -924,12 +924,12 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, lstcon_node_ent_t *dents_up) { lstcon_test_batch_ent_t *entp; - struct list_head *clilst; - struct list_head *srvlst; - lstcon_test_t *test = NULL; - lstcon_batch_t *bat; - lstcon_ndlink_t *ndl; - int rc; + struct list_head *clilst; + struct list_head *srvlst; + lstcon_test_t *test = NULL; + lstcon_batch_t *bat; + lstcon_ndlink_t *ndl; + int rc; rc = lstcon_batch_find(name, &bat); if (rc != 0) { @@ -1018,7 +1018,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop, struct list_head *result_up) { lstcon_rpc_trans_t *trans; - int rc; + int rc; rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list, &bat->bat_trans_list, transop, @@ -1041,7 +1041,7 @@ int lstcon_batch_run(char *name, int timeout, struct list_head *result_up) { lstcon_batch_t *bat; - int rc; + int rc; if (lstcon_batch_find(name, &bat) != 0) { CDEBUG(D_NET, "Can't find batch %s\n", name); @@ -1063,7 +1063,7 @@ int lstcon_batch_stop(char *name, int force, struct list_head *result_up) { lstcon_batch_t *bat; - int rc; + int rc; if (lstcon_batch_find(name, &bat) != 0) { CDEBUG(D_NET, "Can't find batch %s\n", name); @@ -1084,9 +1084,9 @@ lstcon_batch_stop(char *name, int force, struct list_head *result_up) static void lstcon_batch_destroy(lstcon_batch_t *bat) { - lstcon_ndlink_t *ndl; - lstcon_test_t *test; - int i; + lstcon_ndlink_t *ndl; + lstcon_test_t *test; + int i; list_del(&bat->bat_link); @@ -1137,11 +1137,11 @@ lstcon_batch_destroy(lstcon_batch_t *bat) static int lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) { - lstcon_test_t *test; - lstcon_batch_t *batch; - lstcon_ndlink_t *ndl; - struct list_head *hash; - struct list_head *head; + lstcon_test_t *test; + lstcon_batch_t *batch; + lstcon_ndlink_t *ndl; + struct list_head *hash; + struct list_head *head; test = (lstcon_test_t *)arg; LASSERT(test != NULL); @@ -1181,10 +1181,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg) static int lstcon_test_nodes_add(lstcon_test_t *test, struct list_head *result_up) { - lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; - int transop; - int rc; + lstcon_rpc_trans_t *trans; + lstcon_group_t *grp; + int transop; + int rc; LASSERT(test->tes_src_grp != NULL); LASSERT(test->tes_dst_grp != NULL); @@ -1251,8 +1251,8 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch) static int lstcon_verify_group(const char *name, lstcon_group_t **grp) { - int rc; - lstcon_ndlink_t *ndl; + int rc; + lstcon_ndlink_t *ndl; rc = lstcon_group_find(name, grp); if (rc != 0) { @@ -1398,13 +1398,13 @@ lstcon_test_batch_query(char *name, int testidx, int client, int timeout, struct list_head *result_up) { lstcon_rpc_trans_t *trans; - struct list_head *translist; - struct list_head *ndlist; - lstcon_tsb_hdr_t *hdr; - lstcon_batch_t *batch; - lstcon_test_t *test = NULL; - int transop; - int rc; + struct list_head *translist; + struct list_head *ndlist; + lstcon_tsb_hdr_t *hdr; + lstcon_batch_t *batch; + lstcon_test_t *test = NULL; + int transop; + int rc; rc = lstcon_batch_find(name, &batch); if (rc != 0) { @@ -1460,9 +1460,9 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg, lstcon_rpc_ent_t *ent_up) { srpc_stat_reply_t *rep = &msg->msg_body.stat_reply; - sfw_counters_t *sfwk_stat; - srpc_counters_t *srpc_stat; - lnet_counters_t *lnet_stat; + sfw_counters_t *sfwk_stat; + srpc_counters_t *srpc_stat; + lnet_counters_t *lnet_stat; if (rep->str_status != 0) return 0; @@ -1483,9 +1483,9 @@ static int lstcon_ndlist_stat(struct list_head *ndlist, int timeout, struct list_head *result_up) { - struct list_head head; + struct list_head head; lstcon_rpc_trans_t *trans; - int rc; + int rc; INIT_LIST_HEAD(&head); @@ -1508,8 +1508,8 @@ lstcon_ndlist_stat(struct list_head *ndlist, int lstcon_group_stat(char *grp_name, int timeout, struct list_head *result_up) { - lstcon_group_t *grp; - int rc; + lstcon_group_t *grp; + int rc; rc = lstcon_group_find(grp_name, &grp); if (rc != 0) { @@ -1528,11 +1528,11 @@ int lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, int timeout, struct list_head *result_up) { - lstcon_ndlink_t *ndl; - lstcon_group_t *tmp; - lnet_process_id_t id; - int i; - int rc; + lstcon_ndlink_t *ndl; + lstcon_group_t *tmp; + lnet_process_id_t id; + int i; + int rc; rc = lstcon_group_alloc(NULL, &tmp); if (rc != 0) { @@ -1604,7 +1604,7 @@ lstcon_batch_debug(int timeout, char *name, int client, struct list_head *result_up) { lstcon_batch_t *bat; - int rc; + int rc; rc = lstcon_batch_find(name, &bat); if (rc != 0) @@ -1622,7 +1622,7 @@ lstcon_group_debug(int timeout, char *name, struct list_head *result_up) { lstcon_group_t *grp; - int rc; + int rc; rc = lstcon_group_find(name, &grp); if (rc != 0) @@ -1640,11 +1640,11 @@ lstcon_nodes_debug(int timeout, int count, lnet_process_id_t *ids_up, struct list_head *result_up) { - lnet_process_id_t id; - lstcon_ndlink_t *ndl; - lstcon_group_t *grp; - int i; - int rc; + lnet_process_id_t id; + lstcon_ndlink_t *ndl; + lstcon_group_t *grp; + int i; + int rc; rc = lstcon_group_alloc(NULL, &grp); if (rc != 0) { @@ -1689,7 +1689,7 @@ lstcon_session_match(lst_sid_t sid) static void lstcon_new_session_id(lst_sid_t *sid) { - lnet_process_id_t id; + lnet_process_id_t id; LASSERT(console_session.ses_state == LST_SESSION_NONE); @@ -1704,8 +1704,8 @@ int lstcon_session_new(char *name, int key, unsigned feats, int timeout, int force, lst_sid_t *sid_up) { - int rc = 0; - int i; + int rc = 0; + int i; if (console_session.ses_state != LST_SESSION_NONE) { /* session exists */ @@ -1733,9 +1733,9 @@ lstcon_session_new(char *name, int key, unsigned feats, lstcon_new_session_id(&console_session.ses_id); - console_session.ses_key = key; - console_session.ses_state = LST_SESSION_ACTIVE; - console_session.ses_force = !!force; + console_session.ses_key = key; + console_session.ses_state = LST_SESSION_ACTIVE; + console_session.ses_force = !!force; console_session.ses_features = feats; console_session.ses_feats_updated = 0; console_session.ses_timeout = (timeout <= 0) ? @@ -1770,8 +1770,8 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp, lstcon_ndlist_ent_t *ndinfo_up, char *name_up, int len) { lstcon_ndlist_ent_t *entp; - lstcon_ndlink_t *ndl; - int rc = 0; + lstcon_ndlink_t *ndl; + int rc = 0; if (console_session.ses_state != LST_SESSION_ACTIVE) return -ESRCH; @@ -1802,9 +1802,9 @@ int lstcon_session_end(void) { lstcon_rpc_trans_t *trans; - lstcon_group_t *grp; - lstcon_batch_t *bat; - int rc = 0; + lstcon_group_t *grp; + lstcon_batch_t *bat; + int rc = 0; LASSERT(console_session.ses_state == LST_SESSION_ACTIVE); @@ -1894,13 +1894,13 @@ lstcon_session_feats_check(unsigned feats) static int lstcon_acceptor_handle(srpc_server_rpc_t *rpc) { - srpc_msg_t *rep = &rpc->srpc_replymsg; - srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; + srpc_msg_t *rep = &rpc->srpc_replymsg; + srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg; srpc_join_reqst_t *jreq = &req->msg_body.join_reqst; srpc_join_reply_t *jrep = &rep->msg_body.join_reply; - lstcon_group_t *grp = NULL; - lstcon_ndlink_t *ndl; - int rc = 0; + lstcon_group_t *grp = NULL; + lstcon_ndlink_t *ndl; + int rc = 0; sfw_unpack_message(req); @@ -1978,9 +1978,9 @@ srpc_service_t lstcon_acceptor_service; static void lstcon_init_acceptor_service(void) { /* initialize selftest console acceptor service table */ - lstcon_acceptor_service.sv_name = "join session"; - lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; - lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; + lstcon_acceptor_service.sv_name = "join session"; + lstcon_acceptor_service.sv_handler = lstcon_acceptor_handle; + lstcon_acceptor_service.sv_id = SRPC_SERVICE_JOIN; lstcon_acceptor_service.sv_wi_total = SFW_FRWK_WI_MAX; } @@ -1992,19 +1992,19 @@ static DECLARE_IOCTL_HANDLER(lstcon_ioctl_handler, lstcon_ioctl_entry); int lstcon_console_init(void) { - int i; - int rc; + int i; + int rc; memset(&console_session, 0, sizeof(lstcon_session_t)); - console_session.ses_id = LST_INVALID_SID; - console_session.ses_state = LST_SESSION_NONE; - console_session.ses_timeout = 0; - console_session.ses_force = 0; - console_session.ses_expired = 0; - console_session.ses_feats_updated = 0; - console_session.ses_features = LST_FEATS_MASK; - console_session.ses_laststamp = get_seconds(); + console_session.ses_id = LST_INVALID_SID; + console_session.ses_state = LST_SESSION_NONE; + console_session.ses_timeout = 0; + console_session.ses_force = 0; + console_session.ses_expired = 0; + console_session.ses_feats_updated = 0; + console_session.ses_features = LST_FEATS_MASK; + console_session.ses_laststamp = get_seconds(); mutex_init(&console_session.ses_mutex); @@ -2062,7 +2062,7 @@ out: int lstcon_console_fini(void) { - int i; + int i; libcfs_deregister_ioctl(&lstcon_ioctl_handler); diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h index e41ca89f1..c4cf0aed8 100644 --- a/drivers/staging/lustre/lnet/selftest/console.h +++ b/drivers/staging/lustre/lnet/selftest/console.h @@ -52,119 +52,121 @@ #include "conrpc.h" typedef struct lstcon_node { - lnet_process_id_t nd_id; /* id of the node */ - int nd_ref; /* reference count */ - int nd_state; /* state of the node */ - int nd_timeout; /* session timeout */ - unsigned long nd_stamp; /* timestamp of last replied RPC */ - struct lstcon_rpc nd_ping; /* ping rpc */ -} lstcon_node_t; /*** node descriptor */ + lnet_process_id_t nd_id; /* id of the node */ + int nd_ref; /* reference count */ + int nd_state; /* state of the node */ + int nd_timeout; /* session timeout */ + unsigned long nd_stamp; /* timestamp of last replied RPC */ + struct lstcon_rpc nd_ping; /* ping rpc */ +} lstcon_node_t; /* node descriptor */ typedef struct { - struct list_head ndl_link; /* chain on list */ - struct list_head ndl_hlink; /* chain on hash */ - lstcon_node_t *ndl_node; /* pointer to node */ -} lstcon_ndlink_t; /*** node link descriptor */ + struct list_head ndl_link; /* chain on list */ + struct list_head ndl_hlink; /* chain on hash */ + lstcon_node_t *ndl_node; /* pointer to node */ +} lstcon_ndlink_t; /* node link descriptor */ typedef struct { - struct list_head grp_link; /* chain on global group list */ - int grp_ref; /* reference count */ - int grp_userland; /* has userland nodes */ - int grp_nnode; /* # of nodes */ - char grp_name[LST_NAME_SIZE]; /* group name */ + struct list_head grp_link; /* chain on global group list + */ + int grp_ref; /* reference count */ + int grp_userland; /* has userland nodes */ + int grp_nnode; /* # of nodes */ + char grp_name[LST_NAME_SIZE]; /* group name */ - struct list_head grp_trans_list; /* transaction list */ - struct list_head grp_ndl_list; /* nodes list */ - struct list_head grp_ndl_hash[0];/* hash table for nodes */ -} lstcon_group_t; /*** (alias of nodes) group descriptor */ + struct list_head grp_trans_list; /* transaction list */ + struct list_head grp_ndl_list; /* nodes list */ + struct list_head grp_ndl_hash[0]; /* hash table for nodes */ +} lstcon_group_t; /* (alias of nodes) group descriptor */ -#define LST_BATCH_IDLE 0xB0 /* idle batch */ -#define LST_BATCH_RUNNING 0xB1 /* running batch */ +#define LST_BATCH_IDLE 0xB0 /* idle batch */ +#define LST_BATCH_RUNNING 0xB1 /* running batch */ typedef struct lstcon_tsb_hdr { - lst_bid_t tsb_id; /* batch ID */ - int tsb_index; /* test index */ + lst_bid_t tsb_id; /* batch ID */ + int tsb_index; /* test index */ } lstcon_tsb_hdr_t; typedef struct { - lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ - struct list_head bat_link; /* chain on session's batches list */ - int bat_ntest; /* # of test */ - int bat_state; /* state of the batch */ - int bat_arg; /* parameter for run|stop, timeout for run, force for stop */ - char bat_name[LST_NAME_SIZE]; /* name of batch */ - - struct list_head bat_test_list; /* list head of tests (lstcon_test_t) */ - struct list_head bat_trans_list; /* list head of transaction */ - struct list_head bat_cli_list; /* list head of client nodes (lstcon_node_t) */ - struct list_head *bat_cli_hash; /* hash table of client nodes */ - struct list_head bat_srv_list; /* list head of server nodes */ - struct list_head *bat_srv_hash; /* hash table of server nodes */ -} lstcon_batch_t; /*** (tests ) batch descriptor */ + lstcon_tsb_hdr_t bat_hdr; /* test_batch header */ + struct list_head bat_link; /* chain on session's batches list */ + int bat_ntest; /* # of test */ + int bat_state; /* state of the batch */ + int bat_arg; /* parameter for run|stop, timeout + * for run, force for stop */ + char bat_name[LST_NAME_SIZE];/* name of batch */ + + struct list_head bat_test_list; /* list head of tests (lstcon_test_t) + */ + struct list_head bat_trans_list; /* list head of transaction */ + struct list_head bat_cli_list; /* list head of client nodes + * (lstcon_node_t) */ + struct list_head *bat_cli_hash; /* hash table of client nodes */ + struct list_head bat_srv_list; /* list head of server nodes */ + struct list_head *bat_srv_hash; /* hash table of server nodes */ +} lstcon_batch_t; /* (tests ) batch descriptor */ typedef struct lstcon_test { - lstcon_tsb_hdr_t tes_hdr; /* test batch header */ - struct list_head tes_link; /* chain on batch's tests list */ - lstcon_batch_t *tes_batch; /* pointer to batch */ + lstcon_tsb_hdr_t tes_hdr; /* test batch header */ + struct list_head tes_link; /* chain on batch's tests list */ + lstcon_batch_t *tes_batch; /* pointer to batch */ - int tes_type; /* type of the test, i.e: bulk, ping */ - int tes_stop_onerr; /* stop on error */ - int tes_oneside; /* one-sided test */ - int tes_concur; /* concurrency */ - int tes_loop; /* loop count */ - int tes_dist; /* nodes distribution of target group */ - int tes_span; /* nodes span of target group */ - int tes_cliidx; /* client index, used for RPC creating */ + int tes_type; /* type of the test, i.e: bulk, ping */ + int tes_stop_onerr; /* stop on error */ + int tes_oneside; /* one-sided test */ + int tes_concur; /* concurrency */ + int tes_loop; /* loop count */ + int tes_dist; /* nodes distribution of target group */ + int tes_span; /* nodes span of target group */ + int tes_cliidx; /* client index, used for RPC creating */ - struct list_head tes_trans_list; /* transaction list */ - lstcon_group_t *tes_src_grp; /* group run the test */ - lstcon_group_t *tes_dst_grp; /* target group */ + struct list_head tes_trans_list; /* transaction list */ + lstcon_group_t *tes_src_grp; /* group run the test */ + lstcon_group_t *tes_dst_grp; /* target group */ - int tes_paramlen; /* test parameter length */ - char tes_param[0]; /* test parameter */ -} lstcon_test_t; /*** a single test descriptor */ + int tes_paramlen; /* test parameter length */ + char tes_param[0]; /* test parameter */ +} lstcon_test_t; /* a single test descriptor */ -#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ -#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */ +#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */ +#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */ -#define LST_SESSION_NONE 0x0 /* no session */ -#define LST_SESSION_ACTIVE 0x1 /* working session */ +#define LST_SESSION_NONE 0x0 /* no session */ +#define LST_SESSION_ACTIVE 0x1 /* working session */ -#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */ +#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */ typedef struct { - struct mutex ses_mutex; /* only 1 thread in session */ - lst_sid_t ses_id; /* global session id */ - int ses_key; /* local session key */ - int ses_state; /* state of session */ - int ses_timeout; /* timeout in seconds */ - time_t ses_laststamp; /* last operation stamp (seconds) */ - /** tests features of the session */ - unsigned ses_features; - /** features are synced with remote test nodes */ - unsigned ses_feats_updated:1; - /** force creating */ - unsigned ses_force:1; - /** session is shutting down */ - unsigned ses_shutdown:1; - /** console is timedout */ - unsigned ses_expired:1; - __u64 ses_id_cookie; /* batch id cookie */ - char ses_name[LST_NAME_SIZE]; /* session name */ - lstcon_rpc_trans_t *ses_ping; /* session pinger */ - stt_timer_t ses_ping_timer; /* timer for pinger */ - lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ - - struct list_head ses_trans_list; /* global list of transaction */ - struct list_head ses_grp_list; /* global list of groups */ - struct list_head ses_bat_list; /* global list of batches */ - struct list_head ses_ndl_list; /* global list of nodes */ - struct list_head *ses_ndl_hash; /* hash table of nodes */ - - spinlock_t ses_rpc_lock; /* serialize */ - atomic_t ses_rpc_counter;/* # of initialized RPCs */ - struct list_head ses_rpc_freelist; /* idle console rpc */ -} lstcon_session_t; /*** session descriptor */ + struct mutex ses_mutex; /* only 1 thread in session */ + lst_sid_t ses_id; /* global session id */ + int ses_key; /* local session key */ + int ses_state; /* state of session */ + int ses_timeout; /* timeout in seconds */ + time_t ses_laststamp; /* last operation stamp (seconds) + */ + unsigned ses_features; /* tests features of the session + */ + unsigned ses_feats_updated:1; /* features are synced with + * remote test nodes */ + unsigned ses_force:1; /* force creating */ + unsigned ses_shutdown:1; /* session is shutting down */ + unsigned ses_expired:1; /* console is timedout */ + __u64 ses_id_cookie; /* batch id cookie */ + char ses_name[LST_NAME_SIZE];/* session name */ + lstcon_rpc_trans_t *ses_ping; /* session pinger */ + stt_timer_t ses_ping_timer; /* timer for pinger */ + lstcon_trans_stat_t ses_trans_stat; /* transaction stats */ + + struct list_head ses_trans_list; /* global list of transaction */ + struct list_head ses_grp_list; /* global list of groups */ + struct list_head ses_bat_list; /* global list of batches */ + struct list_head ses_ndl_list; /* global list of nodes */ + struct list_head *ses_ndl_hash; /* hash table of nodes */ + + spinlock_t ses_rpc_lock; /* serialize */ + atomic_t ses_rpc_counter; /* # of initialized RPCs */ + struct list_head ses_rpc_freelist; /* idle console rpc */ +} lstcon_session_t; /* session descriptor */ extern lstcon_session_t console_session; diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index a93a90de0..7c5185a2a 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -53,20 +53,20 @@ static int rpc_timeout = 64; module_param(rpc_timeout, int, 0644); MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)"); -#define sfw_unpack_id(id) \ -do { \ +#define sfw_unpack_id(id) \ +do { \ __swab64s(&(id).nid); \ __swab32s(&(id).pid); \ } while (0) -#define sfw_unpack_sid(sid) \ -do { \ +#define sfw_unpack_sid(sid) \ +do { \ __swab64s(&(sid).ses_nid); \ __swab64s(&(sid).ses_stamp); \ } while (0) -#define sfw_unpack_fw_counters(fc) \ -do { \ +#define sfw_unpack_fw_counters(fc) \ +do { \ __swab32s(&(fc).running_ms); \ __swab32s(&(fc).active_batches); \ __swab32s(&(fc).zombie_sessions); \ @@ -75,7 +75,7 @@ do { \ } while (0) #define sfw_unpack_rpc_counters(rc) \ -do { \ +do { \ __swab32s(&(rc).errors); \ __swab32s(&(rc).rpcs_sent); \ __swab32s(&(rc).rpcs_rcvd); \ @@ -86,7 +86,7 @@ do { \ } while (0) #define sfw_unpack_lnet_counters(lc) \ -do { \ +do { \ __swab32s(&(lc).errors); \ __swab32s(&(lc).msgs_max); \ __swab32s(&(lc).msgs_alloc); \ @@ -104,14 +104,14 @@ do { \ #define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0) static struct smoketest_framework { - struct list_head fw_zombie_rpcs; /* RPCs to be recycled */ - struct list_head fw_zombie_sessions; /* stopping sessions */ - struct list_head fw_tests; /* registered test cases */ - atomic_t fw_nzombies; /* # zombie sessions */ - spinlock_t fw_lock; /* serialise */ - sfw_session_t *fw_session; /* _the_ session */ - int fw_shuttingdown; /* shutdown in progress */ - srpc_server_rpc_t *fw_active_srpc; /* running RPC */ + struct list_head fw_zombie_rpcs; /* RPCs to be recycled */ + struct list_head fw_zombie_sessions; /* stopping sessions */ + struct list_head fw_tests; /* registered test cases */ + atomic_t fw_nzombies; /* # zombie sessions */ + spinlock_t fw_lock; /* serialise */ + sfw_session_t *fw_session; /* _the_ session */ + int fw_shuttingdown; /* shutdown in progress */ + srpc_server_rpc_t *fw_active_srpc; /* running RPC */ } sfw_data; /* forward ref's */ @@ -160,7 +160,7 @@ static void sfw_add_session_timer(void) { sfw_session_t *sn = sfw_data.fw_session; - stt_timer_t *timer = &sn->sn_timer; + stt_timer_t *timer = &sn->sn_timer; LASSERT(!sfw_data.fw_shuttingdown); @@ -199,8 +199,8 @@ sfw_deactivate_session(void) __must_hold(&sfw_data.fw_lock) { sfw_session_t *sn = sfw_data.fw_session; - int nactive = 0; - sfw_batch_t *tsb; + int nactive = 0; + sfw_batch_t *tsb; sfw_test_case_t *tsc; if (sn == NULL) return; @@ -273,7 +273,7 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid, strlcpy(&sn->sn_name[0], name, sizeof(sn->sn_name)); sn->sn_timer_active = 0; - sn->sn_id = sid; + sn->sn_id = sid; sn->sn_features = features; sn->sn_timeout = session_timeout; sn->sn_started = cfs_time_current(); @@ -287,8 +287,8 @@ sfw_init_session(sfw_session_t *sn, lst_sid_t sid, static void sfw_server_rpc_done(struct srpc_server_rpc *rpc) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - int status = rpc->srpc_status; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; + int status = rpc->srpc_status; CDEBUG(D_NET, "Incoming framework RPC done: service %s, peer %s, status %s:%d\n", @@ -327,7 +327,7 @@ static sfw_batch_t * sfw_find_batch(lst_bid_t bid) { sfw_session_t *sn = sfw_data.fw_session; - sfw_batch_t *bat; + sfw_batch_t *bat; LASSERT(sn != NULL); @@ -343,7 +343,7 @@ static sfw_batch_t * sfw_bid2batch(lst_bid_t bid) { sfw_session_t *sn = sfw_data.fw_session; - sfw_batch_t *bat; + sfw_batch_t *bat; LASSERT(sn != NULL); @@ -368,10 +368,10 @@ sfw_bid2batch(lst_bid_t bid) static int sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) { - sfw_session_t *sn = sfw_data.fw_session; + sfw_session_t *sn = sfw_data.fw_session; sfw_counters_t *cnt = &reply->str_fw; - sfw_batch_t *bat; - struct timeval tv; + sfw_batch_t *bat; + struct timeval tv; reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; @@ -412,9 +412,9 @@ int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; - srpc_msg_t *msg = container_of(request, srpc_msg_t, + srpc_msg_t *msg = container_of(request, srpc_msg_t, msg_body.mksn_reqst); - int cplen = 0; + int cplen = 0; if (request->mksn_sid.ses_nid == LNET_NID_ANY) { reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; @@ -533,7 +533,7 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) static void sfw_test_rpc_fini(srpc_client_rpc_t *rpc) { - sfw_test_unit_t *tsu = rpc->crpc_priv; + sfw_test_unit_t *tsu = rpc->crpc_priv; sfw_test_instance_t *tsi = tsu->tsu_instance; /* Called with hold of tsi->tsi_lock */ @@ -544,9 +544,9 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc) static inline int sfw_test_buffers(sfw_test_instance_t *tsi) { - struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service); - struct srpc_service *svc = tsc->tsc_srv_service; - int nbuf; + struct sfw_test_case *tsc = sfw_find_test_case(tsi->tsi_service); + struct srpc_service *svc = tsc->tsc_srv_service; + int nbuf; nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts; return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA); @@ -555,10 +555,10 @@ sfw_test_buffers(sfw_test_instance_t *tsi) static int sfw_load_test(struct sfw_test_instance *tsi) { - struct sfw_test_case *tsc; - struct srpc_service *svc; - int nbuf; - int rc; + struct sfw_test_case *tsc; + struct srpc_service *svc; + int nbuf; + int rc; LASSERT(tsi != NULL); tsc = sfw_find_test_case(tsi->tsi_service); @@ -611,7 +611,7 @@ static void sfw_destroy_test_instance(sfw_test_instance_t *tsi) { srpc_client_rpc_t *rpc; - sfw_test_unit_t *tsu; + sfw_test_unit_t *tsu; if (!tsi->tsi_is_client) goto clean; @@ -728,14 +728,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg) static int sfw_add_test_instance(sfw_batch_t *tsb, srpc_server_rpc_t *rpc) { - srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg; - srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; - srpc_bulk_t *bk = rpc->srpc_bulk; - int ndest = req->tsr_ndest; - sfw_test_unit_t *tsu; + srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg; + srpc_test_reqst_t *req = &msg->msg_body.tes_reqst; + srpc_bulk_t *bk = rpc->srpc_bulk; + int ndest = req->tsr_ndest; + sfw_test_unit_t *tsu; sfw_test_instance_t *tsi; - int i; - int rc; + int i; + int rc; LIBCFS_ALLOC(tsi, sizeof(*tsi)); if (tsi == NULL) { @@ -751,9 +751,9 @@ sfw_add_test_instance(sfw_batch_t *tsb, srpc_server_rpc_t *rpc) INIT_LIST_HEAD(&tsi->tsi_active_rpcs); tsi->tsi_stopping = 0; - tsi->tsi_batch = tsb; - tsi->tsi_loop = req->tsr_loop; - tsi->tsi_concur = req->tsr_concur; + tsi->tsi_batch = tsb; + tsi->tsi_loop = req->tsr_loop; + tsi->tsi_concur = req->tsr_concur; tsi->tsi_service = req->tsr_service; tsi->tsi_is_client = !!(req->tsr_is_client); tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr); @@ -782,8 +782,8 @@ sfw_add_test_instance(sfw_batch_t *tsb, srpc_server_rpc_t *rpc) for (i = 0; i < ndest; i++) { lnet_process_id_packed_t *dests; - lnet_process_id_packed_t id; - int j; + lnet_process_id_packed_t id; + int j; dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); LASSERT(dests != NULL); /* my pages are within KVM always */ @@ -824,8 +824,8 @@ static void sfw_test_unit_done(sfw_test_unit_t *tsu) { sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_batch_t *tsb = tsi->tsi_batch; - sfw_session_t *sn = tsb->bat_session; + sfw_batch_t *tsb = tsi->tsi_batch; + sfw_session_t *sn = tsb->bat_session; LASSERT(sfw_test_active(tsi)); @@ -866,9 +866,9 @@ sfw_test_unit_done(sfw_test_unit_t *tsu) static void sfw_test_rpc_done(srpc_client_rpc_t *rpc) { - sfw_test_unit_t *tsu = rpc->crpc_priv; + sfw_test_unit_t *tsu = rpc->crpc_priv; sfw_test_instance_t *tsi = tsu->tsu_instance; - int done = 0; + int done = 0; tsi->tsi_ops->tso_done_rpc(tsu, rpc); @@ -904,7 +904,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, unsigned features, int nblk, int blklen, srpc_client_rpc_t **rpcpp) { - srpc_client_rpc_t *rpc = NULL; + srpc_client_rpc_t *rpc = NULL; sfw_test_instance_t *tsi = tsu->tsu_instance; spin_lock(&tsi->tsi_lock); @@ -945,9 +945,9 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer, static int sfw_run_test(swi_workitem_t *wi) { - sfw_test_unit_t *tsu = wi->swi_workitem.wi_data; + sfw_test_unit_t *tsu = wi->swi_workitem.wi_data; sfw_test_instance_t *tsi = tsu->tsu_instance; - srpc_client_rpc_t *rpc = NULL; + srpc_client_rpc_t *rpc = NULL; LASSERT(wi == &tsu->tsu_worker); @@ -995,8 +995,8 @@ test_done: static int sfw_run_batch(sfw_batch_t *tsb) { - swi_workitem_t *wi; - sfw_test_unit_t *tsu; + swi_workitem_t *wi; + sfw_test_unit_t *tsu; sfw_test_instance_t *tsi; if (sfw_batch_active(tsb)) { @@ -1032,7 +1032,7 @@ int sfw_stop_batch(sfw_batch_t *tsb, int force) { sfw_test_instance_t *tsi; - srpc_client_rpc_t *rpc; + srpc_client_rpc_t *rpc; if (!sfw_batch_active(tsb)) { CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id); @@ -1118,11 +1118,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len, static int sfw_add_test(srpc_server_rpc_t *rpc) { - sfw_session_t *sn = sfw_data.fw_session; + sfw_session_t *sn = sfw_data.fw_session; srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply; srpc_test_reqst_t *request; - int rc; - sfw_batch_t *bat; + int rc; + sfw_batch_t *bat; request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst; reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; @@ -1160,8 +1160,8 @@ sfw_add_test(srpc_server_rpc_t *rpc) if (request->tsr_is_client && rpc->srpc_bulk == NULL) { /* rpc will be resumed later in sfw_bulk_ready */ - int npg = sfw_id_pages(request->tsr_ndest); - int len; + int npg = sfw_id_pages(request->tsr_ndest); + int len; if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { len = npg * PAGE_CACHE_SIZE; @@ -1189,8 +1189,8 @@ static int sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) { sfw_session_t *sn = sfw_data.fw_session; - int rc = 0; - sfw_batch_t *bat; + int rc = 0; + sfw_batch_t *bat; reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; @@ -1229,11 +1229,11 @@ sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) static int sfw_handle_server_rpc(struct srpc_server_rpc *rpc) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - srpc_msg_t *reply = &rpc->srpc_replymsg; - srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg; - unsigned features = LST_FEATS_MASK; - int rc = 0; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; + srpc_msg_t *reply = &rpc->srpc_replymsg; + srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg; + unsigned features = LST_FEATS_MASK; + int rc = 0; LASSERT(sfw_data.fw_active_srpc == NULL); LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID); @@ -1334,8 +1334,8 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc) static int sfw_bulk_ready(struct srpc_server_rpc *rpc, int status) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - int rc; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; + int rc; LASSERT(rpc->srpc_bulk != NULL); LASSERT(sv->sv_id == SRPC_SERVICE_TEST); @@ -1640,10 +1640,10 @@ extern void brw_init_test_service(void); int sfw_startup(void) { - int i; - int rc; - int error; - srpc_service_t *sv; + int i; + int rc; + int error; + srpc_service_t *sv; sfw_test_case_t *tsc; @@ -1735,9 +1735,9 @@ sfw_startup(void) void sfw_shutdown(void) { - srpc_service_t *sv; + srpc_service_t *sv; sfw_test_case_t *tsc; - int i; + int i; spin_lock(&sfw_data.fw_lock); diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c index 7ad62f167..09b8f4649 100644 --- a/drivers/staging/lustre/lnet/selftest/module.c +++ b/drivers/staging/lustre/lnet/selftest/module.c @@ -39,7 +39,7 @@ #include "selftest.h" enum { - LST_INIT_NONE = 0, + LST_INIT_NONE = 0, LST_INIT_WI_SERIAL, LST_INIT_WI_TEST, LST_INIT_RPC, @@ -58,7 +58,7 @@ struct cfs_wi_sched **lst_sched_test; static void lnet_selftest_fini(void) { - int i; + int i; switch (lst_init_step) { case LST_INIT_CONSOLE: @@ -92,9 +92,9 @@ lnet_selftest_fini(void) static int lnet_selftest_init(void) { - int nscheds; - int rc; - int i; + int nscheds; + int rc; + int i; rc = cfs_wi_sched_create("lst_s", lnet_cpt_table(), CFS_CPT_ANY, 1, &lst_sched_serial); diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c index 644069a9f..1dab9984c 100644 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c @@ -73,7 +73,7 @@ static void ping_client_fini(sfw_test_instance_t *tsi) { sfw_session_t *sn = tsi->tsi_batch->bat_session; - int errors; + int errors; LASSERT(sn != NULL); LASSERT(tsi->tsi_is_client); @@ -89,11 +89,11 @@ static int ping_client_prep_rpc(sfw_test_unit_t *tsu, lnet_process_id_t dest, srpc_client_rpc_t **rpc) { - srpc_ping_reqst_t *req; + srpc_ping_reqst_t *req; sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - struct timeval tv; - int rc; + sfw_session_t *sn = tsi->tsi_batch->bat_session; + struct timeval tv; + int rc; LASSERT(sn != NULL); LASSERT((sn->sn_features & ~LST_FEATS_MASK) == 0); @@ -121,10 +121,10 @@ static void ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) { sfw_test_instance_t *tsi = tsu->tsu_instance; - sfw_session_t *sn = tsi->tsi_batch->bat_session; - srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; - srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; - struct timeval tv; + sfw_session_t *sn = tsi->tsi_batch->bat_session; + srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; + srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; + struct timeval tv; LASSERT(sn != NULL); @@ -171,9 +171,9 @@ ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) static int ping_server_handle(struct srpc_server_rpc *rpc) { - struct srpc_service *sv = rpc->srpc_scd->scd_svc; - srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; - srpc_msg_t *replymsg = &rpc->srpc_replymsg; + struct srpc_service *sv = rpc->srpc_scd->scd_svc; + srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; + srpc_msg_t *replymsg = &rpc->srpc_replymsg; srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply; diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 080788ab7..6ae133138 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt) static int srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) { - nob = min(nob, (int)PAGE_CACHE_SIZE); + nob = min_t(int, nob, PAGE_CACHE_SIZE); LASSERT(nob > 0); LASSERT(i >= 0 && i < bk->bk_niov); @@ -104,7 +104,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) void srpc_free_bulk(srpc_bulk_t *bk) { - int i; + int i; struct page *pg; LASSERT(bk != NULL); @@ -124,8 +124,8 @@ srpc_free_bulk(srpc_bulk_t *bk) srpc_bulk_t * srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) { - srpc_bulk_t *bk; - int i; + srpc_bulk_t *bk; + int i; LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); @@ -143,7 +143,7 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) for (i = 0; i < bulk_npg; i++) { struct page *pg; - int nob; + int nob; pg = alloc_pages_node(cfs_cpt_spread_node(lnet_cpt_table(), cpt), GFP_IOFS, 0); @@ -193,11 +193,11 @@ srpc_init_server_rpc(struct srpc_server_rpc *rpc, static void srpc_service_fini(struct srpc_service *svc) { - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - struct srpc_buffer *buf; - struct list_head *q; - int i; + struct srpc_service_cd *scd; + struct srpc_server_rpc *rpc; + struct srpc_buffer *buf; + struct list_head *q; + int i; if (svc->sv_cpt_data == NULL) return; @@ -249,11 +249,11 @@ int srpc_add_buffer(struct swi_workitem *wi); static int srpc_service_init(struct srpc_service *svc) { - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - int nrpcs; - int i; - int j; + struct srpc_service_cd *scd; + struct srpc_server_rpc *rpc; + int nrpcs; + int i; + int j; svc->sv_shuttingdown = 0; @@ -357,8 +357,8 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf, int len, int options, lnet_process_id_t peer, lnet_handle_md_t *mdh, srpc_event_t *ev) { - int rc; - lnet_md_t md; + int rc; + lnet_md_t md; lnet_handle_me_t meh; rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK, @@ -397,7 +397,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len, int options, lnet_process_id_t peer, lnet_nid_t self, lnet_handle_md_t *mdh, srpc_event_t *ev) { - int rc; + int rc; lnet_md_t md; md.user_ptr = ev; @@ -471,9 +471,9 @@ static int srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) __must_hold(&scd->scd_lock) { - struct srpc_service *sv = scd->scd_svc; - struct srpc_msg *msg = &buf->buf_msg; - int rc; + struct srpc_service *sv = scd->scd_svc; + struct srpc_msg *msg = &buf->buf_msg; + int rc; LNetInvalidateHandle(&buf->buf_mdh); list_add(&buf->buf_list, &scd->scd_buf_posted); @@ -519,9 +519,9 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf) int srpc_add_buffer(struct swi_workitem *wi) { - struct srpc_service_cd *scd = wi->swi_workitem.wi_data; - struct srpc_buffer *buf; - int rc = 0; + struct srpc_service_cd *scd = wi->swi_workitem.wi_data; + struct srpc_buffer *buf; + int rc = 0; /* it's called by workitem scheduler threads, these threads * should have been set CPT affinity, so buffers will be posted @@ -579,9 +579,9 @@ srpc_add_buffer(struct swi_workitem *wi) int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer) { - struct srpc_service_cd *scd; - int rc = 0; - int i; + struct srpc_service_cd *scd; + int rc = 0; + int i; LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer); @@ -633,9 +633,9 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer) void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer) { - struct srpc_service_cd *scd; - int num; - int i; + struct srpc_service_cd *scd; + int num; + int i; LASSERT(!sv->sv_shuttingdown); @@ -653,9 +653,9 @@ srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer) int srpc_finish_service(struct srpc_service *sv) { - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - int i; + struct srpc_service_cd *scd; + struct srpc_server_rpc *rpc; + int i; LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */ @@ -731,9 +731,9 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf) void srpc_abort_service(struct srpc_service *sv) { - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - int i; + struct srpc_service_cd *scd; + struct srpc_server_rpc *rpc; + int i; CDEBUG(D_NET, "Aborting service: id %d, name %s\n", sv->sv_id, sv->sv_name); @@ -756,10 +756,10 @@ srpc_abort_service(struct srpc_service *sv) void srpc_shutdown_service(srpc_service_t *sv) { - struct srpc_service_cd *scd; - struct srpc_server_rpc *rpc; - srpc_buffer_t *buf; - int i; + struct srpc_service_cd *scd; + struct srpc_server_rpc *rpc; + srpc_buffer_t *buf; + int i; CDEBUG(D_NET, "Shutting down service: id %d, name %s\n", sv->sv_id, sv->sv_name); @@ -792,7 +792,7 @@ static int srpc_send_request(srpc_client_rpc_t *rpc) { srpc_event_t *ev = &rpc->crpc_reqstev; - int rc; + int rc; ev->ev_fired = 0; ev->ev_data = rpc; @@ -812,8 +812,8 @@ static int srpc_prepare_reply(srpc_client_rpc_t *rpc) { srpc_event_t *ev = &rpc->crpc_replyev; - __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid; - int rc; + __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid; + int rc; ev->ev_fired = 0; ev->ev_data = rpc; @@ -835,11 +835,11 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc) static int srpc_prepare_bulk(srpc_client_rpc_t *rpc) { - srpc_bulk_t *bk = &rpc->crpc_bulk; + srpc_bulk_t *bk = &rpc->crpc_bulk; srpc_event_t *ev = &rpc->crpc_bulkev; __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid; - int rc; - int opt; + int rc; + int opt; LASSERT(bk->bk_niov <= LNET_MAX_IOV); @@ -868,11 +868,11 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc) static int srpc_do_bulk(srpc_server_rpc_t *rpc) { - srpc_event_t *ev = &rpc->srpc_ev; - srpc_bulk_t *bk = rpc->srpc_bulk; - __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid; - int rc; - int opt; + srpc_event_t *ev = &rpc->srpc_ev; + srpc_bulk_t *bk = rpc->srpc_bulk; + __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid; + int rc; + int opt; LASSERT(bk != NULL); @@ -896,9 +896,9 @@ srpc_do_bulk(srpc_server_rpc_t *rpc) static void srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status) { - struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; - srpc_buffer_t *buffer; + struct srpc_service_cd *scd = rpc->srpc_scd; + struct srpc_service *sv = scd->scd_svc; + srpc_buffer_t *buffer; LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE); @@ -959,11 +959,11 @@ srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status) int srpc_handle_rpc(swi_workitem_t *wi) { - struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data; - struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; - srpc_event_t *ev = &rpc->srpc_ev; - int rc = 0; + struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data; + struct srpc_service_cd *scd = rpc->srpc_scd; + struct srpc_service *sv = scd->scd_svc; + srpc_event_t *ev = &rpc->srpc_ev; + int rc = 0; LASSERT(wi == &rpc->srpc_wi); @@ -989,7 +989,7 @@ srpc_handle_rpc(swi_workitem_t *wi) default: LBUG(); case SWI_STATE_NEWBORN: { - srpc_msg_t *msg; + srpc_msg_t *msg; srpc_generic_reply_t *reply; msg = &rpc->srpc_reqstbuf->buf_msg; @@ -1173,10 +1173,10 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status) int srpc_send_rpc(swi_workitem_t *wi) { - int rc = 0; + int rc = 0; srpc_client_rpc_t *rpc; - srpc_msg_t *reply; - int do_bulk; + srpc_msg_t *reply; + int do_bulk; LASSERT(wi != NULL); @@ -1359,13 +1359,13 @@ srpc_post_rpc(srpc_client_rpc_t *rpc) int srpc_send_reply(struct srpc_server_rpc *rpc) { - srpc_event_t *ev = &rpc->srpc_ev; - struct srpc_msg *msg = &rpc->srpc_replymsg; - struct srpc_buffer *buffer = rpc->srpc_reqstbuf; - struct srpc_service_cd *scd = rpc->srpc_scd; - struct srpc_service *sv = scd->scd_svc; - __u64 rpyid; - int rc; + srpc_event_t *ev = &rpc->srpc_ev; + struct srpc_msg *msg = &rpc->srpc_replymsg; + struct srpc_buffer *buffer = rpc->srpc_reqstbuf; + struct srpc_service_cd *scd = rpc->srpc_scd; + struct srpc_service *sv = scd->scd_svc; + __u64 rpyid; + int rc; LASSERT(buffer != NULL); rpyid = buffer->buf_msg.msg_body.reqst.rpyid; @@ -1403,14 +1403,14 @@ srpc_send_reply(struct srpc_server_rpc *rpc) static void srpc_lnet_ev_handler(lnet_event_t *ev) { - struct srpc_service_cd *scd; - srpc_event_t *rpcev = ev->md.user_ptr; + struct srpc_service_cd *scd; + srpc_event_t *rpcev = ev->md.user_ptr; srpc_client_rpc_t *crpc; srpc_server_rpc_t *srpc; - srpc_buffer_t *buffer; - srpc_service_t *sv; - srpc_msg_t *msg; - srpc_msg_type_t type; + srpc_buffer_t *buffer; + srpc_service_t *sv; + srpc_msg_t *msg; + srpc_msg_type_t type; LASSERT(!in_interrupt()); diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h index fbeb75fe5..b7b00c6b1 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.h +++ b/drivers/staging/lustre/lnet/selftest/rpc.h @@ -79,60 +79,61 @@ typedef struct { } WIRE_ATTR srpc_generic_reqst_t; typedef struct { - __u32 status; - lst_sid_t sid; + __u32 status; + lst_sid_t sid; } WIRE_ATTR srpc_generic_reply_t; /* FRAMEWORK RPCs */ typedef struct { - __u64 mksn_rpyid; /* reply buffer matchbits */ - lst_sid_t mksn_sid; /* session id */ - __u32 mksn_force; /* use brute force */ + __u64 mksn_rpyid; /* reply buffer matchbits */ + lst_sid_t mksn_sid; /* session id */ + __u32 mksn_force; /* use brute force */ char mksn_name[LST_NAME_SIZE]; -} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */ +} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */ typedef struct { - __u32 mksn_status; /* session status */ - lst_sid_t mksn_sid; /* session id */ - __u32 mksn_timeout; /* session timeout */ - char mksn_name[LST_NAME_SIZE]; + __u32 mksn_status; /* session status */ + lst_sid_t mksn_sid; /* session id */ + __u32 mksn_timeout; /* session timeout */ + char mksn_name[LST_NAME_SIZE]; } WIRE_ATTR srpc_mksn_reply_t; /* make session reply */ typedef struct { - __u64 rmsn_rpyid; /* reply buffer matchbits */ - lst_sid_t rmsn_sid; /* session id */ + __u64 rmsn_rpyid; /* reply buffer matchbits */ + lst_sid_t rmsn_sid; /* session id */ } WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */ typedef struct { - __u32 rmsn_status; - lst_sid_t rmsn_sid; /* session id */ + __u32 rmsn_status; + lst_sid_t rmsn_sid; /* session id */ } WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */ typedef struct { - __u64 join_rpyid; /* reply buffer matchbits */ - lst_sid_t join_sid; /* session id to join */ - char join_group[LST_NAME_SIZE]; /* group name */ + __u64 join_rpyid; /* reply buffer matchbits */ + lst_sid_t join_sid; /* session id to join */ + char join_group[LST_NAME_SIZE]; /* group name */ } WIRE_ATTR srpc_join_reqst_t; typedef struct { - __u32 join_status; /* returned status */ - lst_sid_t join_sid; /* session id */ - __u32 join_timeout; /* # seconds' inactivity to expire */ - char join_session[LST_NAME_SIZE]; /* session name */ + __u32 join_status; /* returned status */ + lst_sid_t join_sid; /* session id */ + __u32 join_timeout; /* # seconds' inactivity to + * expire */ + char join_session[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR srpc_join_reply_t; typedef struct { - __u64 dbg_rpyid; /* reply buffer matchbits */ - lst_sid_t dbg_sid; /* session id */ - __u32 dbg_flags; /* bitmap of debug */ + __u64 dbg_rpyid; /* reply buffer matchbits */ + lst_sid_t dbg_sid; /* session id */ + __u32 dbg_flags; /* bitmap of debug */ } WIRE_ATTR srpc_debug_reqst_t; typedef struct { - __u32 dbg_status; /* returned code */ - lst_sid_t dbg_sid; /* session id */ - __u32 dbg_timeout; /* session timeout */ - __u32 dbg_nbatch; /* # of batches in the node */ - char dbg_name[LST_NAME_SIZE]; /* session name */ + __u32 dbg_status; /* returned code */ + lst_sid_t dbg_sid; /* session id */ + __u32 dbg_timeout; /* session timeout */ + __u32 dbg_nbatch; /* # of batches in the node */ + char dbg_name[LST_NAME_SIZE]; /* session name */ } WIRE_ATTR srpc_debug_reply_t; #define SRPC_BATCH_OPC_RUN 1 @@ -140,55 +141,51 @@ typedef struct { #define SRPC_BATCH_OPC_QUERY 3 typedef struct { - __u64 bar_rpyid; /* reply buffer matchbits */ - lst_sid_t bar_sid; /* session id */ - lst_bid_t bar_bid; /* batch id */ - __u32 bar_opc; /* create/start/stop batch */ - __u32 bar_testidx; /* index of test */ - __u32 bar_arg; /* parameters */ + __u64 bar_rpyid; /* reply buffer matchbits */ + lst_sid_t bar_sid; /* session id */ + lst_bid_t bar_bid; /* batch id */ + __u32 bar_opc; /* create/start/stop batch */ + __u32 bar_testidx; /* index of test */ + __u32 bar_arg; /* parameters */ } WIRE_ATTR srpc_batch_reqst_t; typedef struct { - __u32 bar_status; /* status of request */ - lst_sid_t bar_sid; /* session id */ - __u32 bar_active; /* # of active tests in batch/test */ - __u32 bar_time; /* remained time */ + __u32 bar_status; /* status of request */ + lst_sid_t bar_sid; /* session id */ + __u32 bar_active; /* # of active tests in batch/test */ + __u32 bar_time; /* remained time */ } WIRE_ATTR srpc_batch_reply_t; typedef struct { - __u64 str_rpyid; /* reply buffer matchbits */ - lst_sid_t str_sid; /* session id */ - __u32 str_type; /* type of stat */ + __u64 str_rpyid; /* reply buffer matchbits */ + lst_sid_t str_sid; /* session id */ + __u32 str_type; /* type of stat */ } WIRE_ATTR srpc_stat_reqst_t; typedef struct { - __u32 str_status; - lst_sid_t str_sid; - sfw_counters_t str_fw; - srpc_counters_t str_rpc; - lnet_counters_t str_lnet; + __u32 str_status; + lst_sid_t str_sid; + sfw_counters_t str_fw; + srpc_counters_t str_rpc; + lnet_counters_t str_lnet; } WIRE_ATTR srpc_stat_reply_t; typedef struct { - __u32 blk_opc; /* bulk operation code */ - __u32 blk_npg; /* # of pages */ - __u32 blk_flags; /* reserved flags */ + __u32 blk_opc; /* bulk operation code */ + __u32 blk_npg; /* # of pages */ + __u32 blk_flags; /* reserved flags */ } WIRE_ATTR test_bulk_req_t; typedef struct { - /** bulk operation code */ - __u16 blk_opc; - /** data check flags */ - __u16 blk_flags; - /** data length */ - __u32 blk_len; - /** reserved: offset */ - __u32 blk_offset; + __u16 blk_opc; /* bulk operation code */ + __u16 blk_flags; /* data check flags */ + __u32 blk_len; /* data length */ + __u32 blk_offset; /* reserved: offset */ } WIRE_ATTR test_bulk_req_v1_t; typedef struct { - __u32 png_size; /* size of ping message */ - __u32 png_flags; /* reserved flags */ + __u32 png_size; /* size of ping message */ + __u32 png_flags; /* reserved flags */ } WIRE_ATTR test_ping_req_t; typedef struct { @@ -197,8 +194,8 @@ typedef struct { lst_sid_t tsr_sid; /* session id */ lst_bid_t tsr_bid; /* batch id */ __u32 tsr_service; /* test type: bulk|ping|... */ - /* test client loop count or # server buffers needed */ - __u32 tsr_loop; + __u32 tsr_loop; /* test client loop count or + * # server buffers needed */ __u32 tsr_concur; /* concurrency of test */ __u8 tsr_is_client; /* is test client or not */ __u8 tsr_stop_onerr; /* stop on error */ @@ -234,8 +231,8 @@ typedef struct { typedef struct { __u64 brw_rpyid; /* reply buffer matchbits */ __u64 brw_bulkid; /* bulk buffer matchbits */ - __u32 brw_rw; /* read or write */ - __u32 brw_len; /* bulk data len */ + __u32 brw_rw; /* read or write */ + __u32 brw_len; /* bulk data len */ __u32 brw_flags; /* bulk data patterns */ } WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */ @@ -243,20 +240,16 @@ typedef struct { __u32 brw_status; } WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */ -#define SRPC_MSG_MAGIC 0xeeb0f00d -#define SRPC_MSG_VERSION 1 +#define SRPC_MSG_MAGIC 0xeeb0f00d +#define SRPC_MSG_VERSION 1 typedef struct srpc_msg { - /** magic number */ - __u32 msg_magic; - /** message version number */ - __u32 msg_version; - /** type of message body: srpc_msg_type_t */ - __u32 msg_type; + __u32 msg_magic; /* magic number */ + __u32 msg_version; /* message version number */ + __u32 msg_type; /* type of message body: srpc_msg_type_t */ __u32 msg_reserved0; __u32 msg_reserved1; - /** test session features */ - __u32 msg_ses_feats; + __u32 msg_ses_feats; /* test session features */ union { srpc_generic_reqst_t reqst; srpc_generic_reply_t reply; diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index d48701834..7939e4e04 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -57,14 +57,14 @@ #endif -#define SWI_STATE_NEWBORN 0 -#define SWI_STATE_REPLY_SUBMITTED 1 -#define SWI_STATE_REPLY_SENT 2 -#define SWI_STATE_REQUEST_SUBMITTED 3 -#define SWI_STATE_REQUEST_SENT 4 -#define SWI_STATE_REPLY_RECEIVED 5 -#define SWI_STATE_BULK_STARTED 6 -#define SWI_STATE_DONE 10 +#define SWI_STATE_NEWBORN 0 +#define SWI_STATE_REPLY_SUBMITTED 1 +#define SWI_STATE_REPLY_SENT 2 +#define SWI_STATE_REQUEST_SUBMITTED 3 +#define SWI_STATE_REQUEST_SENT 4 +#define SWI_STATE_REPLY_RECEIVED 5 +#define SWI_STATE_BULK_STARTED 6 +#define SWI_STATE_DONE 10 /* forward refs */ struct srpc_service; @@ -75,24 +75,24 @@ struct sfw_test_instance; /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework * services, e.g. create/modify session. */ -#define SRPC_SERVICE_DEBUG 0 -#define SRPC_SERVICE_MAKE_SESSION 1 -#define SRPC_SERVICE_REMOVE_SESSION 2 -#define SRPC_SERVICE_BATCH 3 -#define SRPC_SERVICE_TEST 4 -#define SRPC_SERVICE_QUERY_STAT 5 -#define SRPC_SERVICE_JOIN 6 -#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 +#define SRPC_SERVICE_DEBUG 0 +#define SRPC_SERVICE_MAKE_SESSION 1 +#define SRPC_SERVICE_REMOVE_SESSION 2 +#define SRPC_SERVICE_BATCH 3 +#define SRPC_SERVICE_TEST 4 +#define SRPC_SERVICE_QUERY_STAT 5 +#define SRPC_SERVICE_JOIN 6 +#define SRPC_FRAMEWORK_SERVICE_MAX_ID 10 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */ -#define SRPC_SERVICE_BRW 11 -#define SRPC_SERVICE_PING 12 -#define SRPC_SERVICE_MAX_ID 12 +#define SRPC_SERVICE_BRW 11 +#define SRPC_SERVICE_PING 12 +#define SRPC_SERVICE_MAX_ID 12 -#define SRPC_REQUEST_PORTAL 50 +#define SRPC_REQUEST_PORTAL 50 /* a lazy portal for framework RPC requests */ -#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 +#define SRPC_FRAMEWORK_REQUEST_PORTAL 51 /* all reply/bulk RDMAs go to this portal */ -#define SRPC_RDMA_PORTAL 52 +#define SRPC_RDMA_PORTAL 52 static inline srpc_msg_type_t srpc_service2request (int service) @@ -136,7 +136,8 @@ srpc_service2reply (int service) } typedef enum { - SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) received */ + SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source) + * received */ SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */ SRPC_BULK_GET_RPLD = 3, /* active bulk GET replied (sink) */ SRPC_REPLY_RCVD = 4, /* incoming reply received */ @@ -149,114 +150,114 @@ typedef enum { typedef struct { srpc_event_type_t ev_type; /* what's up */ lnet_event_kind_t ev_lnet; /* LNet event type */ - int ev_fired; /* LNet event fired? */ - int ev_status; /* LNet event status */ - void *ev_data; /* owning server/client RPC */ + int ev_fired; /* LNet event fired? */ + int ev_status; /* LNet event status */ + void *ev_data; /* owning server/client RPC */ } srpc_event_t; typedef struct { - int bk_len; /* len of bulk data */ + int bk_len; /* len of bulk data */ lnet_handle_md_t bk_mdh; - int bk_sink; /* sink/source */ - int bk_niov; /* # iov in bk_iovs */ + int bk_sink; /* sink/source */ + int bk_niov; /* # iov in bk_iovs */ lnet_kiov_t bk_iovs[0]; } srpc_bulk_t; /* bulk descriptor */ /* message buffer descriptor */ typedef struct srpc_buffer { - struct list_head buf_list; /* chain on srpc_service::*_msgq */ - srpc_msg_t buf_msg; - lnet_handle_md_t buf_mdh; - lnet_nid_t buf_self; - lnet_process_id_t buf_peer; + struct list_head buf_list; /* chain on srpc_service::*_msgq */ + srpc_msg_t buf_msg; + lnet_handle_md_t buf_mdh; + lnet_nid_t buf_self; + lnet_process_id_t buf_peer; } srpc_buffer_t; struct swi_workitem; typedef int (*swi_action_t) (struct swi_workitem *); typedef struct swi_workitem { - struct cfs_wi_sched *swi_sched; - cfs_workitem_t swi_workitem; - swi_action_t swi_action; - int swi_state; + struct cfs_wi_sched *swi_sched; + cfs_workitem_t swi_workitem; + swi_action_t swi_action; + int swi_state; } swi_workitem_t; /* server-side state of a RPC */ typedef struct srpc_server_rpc { /* chain on srpc_service::*_rpcq */ - struct list_head srpc_list; + struct list_head srpc_list; struct srpc_service_cd *srpc_scd; - swi_workitem_t srpc_wi; - srpc_event_t srpc_ev; /* bulk/reply event */ - lnet_nid_t srpc_self; - lnet_process_id_t srpc_peer; - srpc_msg_t srpc_replymsg; - lnet_handle_md_t srpc_replymdh; - srpc_buffer_t *srpc_reqstbuf; - srpc_bulk_t *srpc_bulk; - - unsigned int srpc_aborted; /* being given up */ - int srpc_status; - void (*srpc_done)(struct srpc_server_rpc *); + swi_workitem_t srpc_wi; + srpc_event_t srpc_ev; /* bulk/reply event */ + lnet_nid_t srpc_self; + lnet_process_id_t srpc_peer; + srpc_msg_t srpc_replymsg; + lnet_handle_md_t srpc_replymdh; + srpc_buffer_t *srpc_reqstbuf; + srpc_bulk_t *srpc_bulk; + + unsigned int srpc_aborted; /* being given up */ + int srpc_status; + void (*srpc_done)(struct srpc_server_rpc *); } srpc_server_rpc_t; /* client-side state of a RPC */ typedef struct srpc_client_rpc { - struct list_head crpc_list; /* chain on user's lists */ - spinlock_t crpc_lock; /* serialize */ - int crpc_service; - atomic_t crpc_refcount; - int crpc_timeout; /* # seconds to wait for reply */ - stt_timer_t crpc_timer; - swi_workitem_t crpc_wi; - lnet_process_id_t crpc_dest; - - void (*crpc_done)(struct srpc_client_rpc *); - void (*crpc_fini)(struct srpc_client_rpc *); - int crpc_status; /* completion status */ - void *crpc_priv; /* caller data */ + struct list_head crpc_list; /* chain on user's lists */ + spinlock_t crpc_lock; /* serialize */ + int crpc_service; + atomic_t crpc_refcount; + int crpc_timeout; /* # seconds to wait for reply */ + stt_timer_t crpc_timer; + swi_workitem_t crpc_wi; + lnet_process_id_t crpc_dest; + + void (*crpc_done)(struct srpc_client_rpc *); + void (*crpc_fini)(struct srpc_client_rpc *); + int crpc_status; /* completion status */ + void *crpc_priv; /* caller data */ /* state flags */ - unsigned int crpc_aborted:1; /* being given up */ - unsigned int crpc_closed:1; /* completed */ + unsigned int crpc_aborted:1; /* being given up */ + unsigned int crpc_closed:1; /* completed */ /* RPC events */ - srpc_event_t crpc_bulkev; /* bulk event */ - srpc_event_t crpc_reqstev; /* request event */ - srpc_event_t crpc_replyev; /* reply event */ + srpc_event_t crpc_bulkev; /* bulk event */ + srpc_event_t crpc_reqstev; /* request event */ + srpc_event_t crpc_replyev; /* reply event */ /* bulk, request(reqst), and reply exchanged on wire */ - srpc_msg_t crpc_reqstmsg; - srpc_msg_t crpc_replymsg; - lnet_handle_md_t crpc_reqstmdh; - lnet_handle_md_t crpc_replymdh; - srpc_bulk_t crpc_bulk; + srpc_msg_t crpc_reqstmsg; + srpc_msg_t crpc_replymsg; + lnet_handle_md_t crpc_reqstmdh; + lnet_handle_md_t crpc_replymdh; + srpc_bulk_t crpc_bulk; } srpc_client_rpc_t; -#define srpc_client_rpc_size(rpc) \ +#define srpc_client_rpc_size(rpc) \ offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov]) -#define srpc_client_rpc_addref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - atomic_inc(&(rpc)->crpc_refcount); \ +#define srpc_client_rpc_addref(rpc) \ +do { \ + CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \ + (rpc), libcfs_id2str((rpc)->crpc_dest), \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + atomic_inc(&(rpc)->crpc_refcount); \ } while (0) -#define srpc_client_rpc_decref(rpc) \ -do { \ - CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ - (rpc), libcfs_id2str((rpc)->crpc_dest), \ - atomic_read(&(rpc)->crpc_refcount)); \ - LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ - if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ - srpc_destroy_client_rpc(rpc); \ +#define srpc_client_rpc_decref(rpc) \ +do { \ + CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \ + (rpc), libcfs_id2str((rpc)->crpc_dest), \ + atomic_read(&(rpc)->crpc_refcount)); \ + LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \ + if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \ + srpc_destroy_client_rpc(rpc); \ } while (0) -#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \ - (rpc)->crpc_reqstev.ev_fired == 0 || \ +#define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \ + (rpc)->crpc_reqstev.ev_fired == 0 || \ (rpc)->crpc_replyev.ev_fired == 0) /* CPU partition data of srpc service */ @@ -268,9 +269,9 @@ struct srpc_service_cd { /** event buffer */ srpc_event_t scd_ev; /** free RPC descriptors */ - struct list_head scd_rpc_free; + struct list_head scd_rpc_free; /** in-flight RPCs */ - struct list_head scd_rpc_active; + struct list_head scd_rpc_active; /** workitem for posting buffer */ swi_workitem_t scd_buf_wi; /** CPT id */ @@ -278,7 +279,7 @@ struct srpc_service_cd { /** error code for scd_buf_wi */ int scd_buf_err; /** timestamp for scd_buf_err */ - unsigned long scd_buf_err_stamp; + unsigned long scd_buf_err_stamp; /** total # request buffers */ int scd_buf_total; /** # posted request buffers */ @@ -290,9 +291,9 @@ struct srpc_service_cd { /** increase/decrease some buffers */ int scd_buf_adjust; /** posted message buffers */ - struct list_head scd_buf_posted; + struct list_head scd_buf_posted; /** blocked for RPC descriptor */ - struct list_head scd_buf_blocked; + struct list_head scd_buf_blocked; }; /* number of server workitems (mini-thread) for testing service */ @@ -318,40 +319,42 @@ typedef struct srpc_service { * - sv_handler: process incoming RPC request * - sv_bulk_ready: notify bulk data */ - int (*sv_handler) (srpc_server_rpc_t *); - int (*sv_bulk_ready) (srpc_server_rpc_t *, int); + int (*sv_handler) (srpc_server_rpc_t *); + int (*sv_bulk_ready) (srpc_server_rpc_t *, int); } srpc_service_t; typedef struct { - struct list_head sn_list; /* chain on fw_zombie_sessions */ - lst_sid_t sn_id; /* unique identifier */ - unsigned int sn_timeout; /* # seconds' inactivity to expire */ - int sn_timer_active; - unsigned int sn_features; - stt_timer_t sn_timer; - struct list_head sn_batches; /* list of batches */ - char sn_name[LST_NAME_SIZE]; - atomic_t sn_refcount; - atomic_t sn_brw_errors; - atomic_t sn_ping_errors; - unsigned long sn_started; + struct list_head sn_list; /* chain on fw_zombie_sessions */ + lst_sid_t sn_id; /* unique identifier */ + unsigned int sn_timeout; /* # seconds' inactivity to expire */ + int sn_timer_active; + unsigned int sn_features; + stt_timer_t sn_timer; + struct list_head sn_batches; /* list of batches */ + char sn_name[LST_NAME_SIZE]; + atomic_t sn_refcount; + atomic_t sn_brw_errors; + atomic_t sn_ping_errors; + unsigned long sn_started; } sfw_session_t; #define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \ (sid0).ses_stamp == (sid1).ses_stamp) typedef struct { - struct list_head bat_list; /* chain on sn_batches */ - lst_bid_t bat_id; /* batch id */ - int bat_error; /* error code of batch */ - sfw_session_t *bat_session; /* batch's session */ - atomic_t bat_nactive; /* # of active tests */ - struct list_head bat_tests; /* test instances */ + struct list_head bat_list; /* chain on sn_batches */ + lst_bid_t bat_id; /* batch id */ + int bat_error; /* error code of batch */ + sfw_session_t *bat_session; /* batch's session */ + atomic_t bat_nactive; /* # of active tests */ + struct list_head bat_tests; /* test instances */ } sfw_batch_t; typedef struct { - int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test client */ - void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */ + int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test + * client */ + void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test + * client */ int (*tso_prep_rpc)(struct sfw_test_unit *tsu, lnet_process_id_t dest, srpc_client_rpc_t **rpc); /* prep a tests rpc */ @@ -360,29 +363,31 @@ typedef struct { } sfw_test_client_ops_t; typedef struct sfw_test_instance { - struct list_head tsi_list; /* chain on batch */ - int tsi_service; /* test type */ - sfw_batch_t *tsi_batch; /* batch */ - sfw_test_client_ops_t *tsi_ops; /* test client operations */ + struct list_head tsi_list; /* chain on batch */ + int tsi_service; /* test type */ + sfw_batch_t *tsi_batch; /* batch */ + sfw_test_client_ops_t *tsi_ops; /* test client operation + */ /* public parameter for all test units */ - unsigned int tsi_is_client:1; /* is test client */ - unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ - int tsi_concur; /* concurrency */ - int tsi_loop; /* loop count */ + unsigned int tsi_is_client:1; /* is test client */ + unsigned int tsi_stoptsu_onerr:1; /* stop tsu on error */ + int tsi_concur; /* concurrency */ + int tsi_loop; /* loop count */ /* status of test instance */ - spinlock_t tsi_lock; /* serialize */ - unsigned int tsi_stopping:1; /* test is stopping */ - atomic_t tsi_nactive; /* # of active test unit */ - struct list_head tsi_units; /* test units */ - struct list_head tsi_free_rpcs; /* free rpcs */ - struct list_head tsi_active_rpcs; /* active rpcs */ + spinlock_t tsi_lock; /* serialize */ + unsigned int tsi_stopping:1; /* test is stopping */ + atomic_t tsi_nactive; /* # of active test + * unit */ + struct list_head tsi_units; /* test units */ + struct list_head tsi_free_rpcs; /* free rpcs */ + struct list_head tsi_active_rpcs; /* active rpcs */ union { - test_ping_req_t ping; /* ping parameter */ - test_bulk_req_t bulk_v0; /* bulk parameter */ - test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */ + test_ping_req_t ping; /* ping parameter */ + test_bulk_req_t bulk_v0; /* bulk parameter */ + test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */ } tsi_u; } sfw_test_instance_t; @@ -394,18 +399,18 @@ typedef struct sfw_test_instance { #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) typedef struct sfw_test_unit { - struct list_head tsu_list; /* chain on lst_test_instance */ - lnet_process_id_t tsu_dest; /* id of dest node */ - int tsu_loop; /* loop count of the test */ - sfw_test_instance_t *tsu_instance; /* pointer to test instance */ - void *tsu_private; /* private data */ - swi_workitem_t tsu_worker; /* workitem of the test unit */ + struct list_head tsu_list; /* chain on lst_test_instance */ + lnet_process_id_t tsu_dest; /* id of dest node */ + int tsu_loop; /* loop count of the test */ + sfw_test_instance_t *tsu_instance; /* pointer to test instance */ + void *tsu_private; /* private data */ + swi_workitem_t tsu_worker; /* workitem of the test unit */ } sfw_test_unit_t; typedef struct sfw_test_case { - struct list_head tsc_list; /* chain on fw_tests */ - srpc_service_t *tsc_srv_service; /* test service */ - sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ + struct list_head tsc_list; /* chain on fw_tests */ + srpc_service_t *tsc_srv_service; /* test service */ + sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */ } sfw_test_case_t; srpc_client_rpc_t * @@ -501,9 +506,9 @@ void srpc_shutdown(void); static inline void srpc_destroy_client_rpc (srpc_client_rpc_t *rpc) { - LASSERT (rpc != NULL); - LASSERT (!srpc_event_pending(rpc)); - LASSERT (atomic_read(&rpc->crpc_refcount) == 0); + LASSERT(rpc != NULL); + LASSERT(!srpc_event_pending(rpc)); + LASSERT(atomic_read(&rpc->crpc_refcount) == 0); if (rpc->crpc_fini == NULL) { LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc)); @@ -520,7 +525,7 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, void (*rpc_done)(srpc_client_rpc_t *), void (*rpc_fini)(srpc_client_rpc_t *), void *priv) { - LASSERT (nbulkiov <= LNET_MAX_IOV); + LASSERT(nbulkiov <= LNET_MAX_IOV); memset(rpc, 0, offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[nbulkiov])); @@ -531,13 +536,13 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer, spin_lock_init(&rpc->crpc_lock); atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */ - rpc->crpc_dest = peer; - rpc->crpc_priv = priv; + rpc->crpc_dest = peer; + rpc->crpc_priv = priv; rpc->crpc_service = service; rpc->crpc_bulk.bk_len = bulklen; rpc->crpc_bulk.bk_niov = nbulkiov; - rpc->crpc_done = rpc_done; - rpc->crpc_fini = rpc_fini; + rpc->crpc_done = rpc_done; + rpc->crpc_fini = rpc_fini; LNetInvalidateHandle(&rpc->crpc_reqstmdh); LNetInvalidateHandle(&rpc->crpc_replymdh); LNetInvalidateHandle(&rpc->crpc_bulk.bk_mdh); diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c index 441f9472a..6133b54f4 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.c +++ b/drivers/staging/lustre/lnet/selftest/timer.c @@ -50,7 +50,7 @@ * sorted by increasing expiry time. The number of slots is 2**7 (128), * to cover a time period of 1024 seconds into the future before wrapping. */ -#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ +#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */ #define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL) #define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1)) #define STTIMER_NSLOTS (1 << 7) @@ -58,13 +58,13 @@ (STTIMER_NSLOTS - 1))]) static struct st_timer_data { - spinlock_t stt_lock; - /* start time of the slot processed previously */ - unsigned long stt_prev_slot; - struct list_head stt_hash[STTIMER_NSLOTS]; - int stt_shuttingdown; - wait_queue_head_t stt_waitq; - int stt_nthreads; + spinlock_t stt_lock; + unsigned long stt_prev_slot; /* start time of the slot processed + * previously */ + struct list_head stt_hash[STTIMER_NSLOTS]; + int stt_shuttingdown; + wait_queue_head_t stt_waitq; + int stt_nthreads; } stt_data; void @@ -124,7 +124,7 @@ stt_del_timer(stt_timer_t *timer) static int stt_expire_list(struct list_head *slot, unsigned long now) { - int expired = 0; + int expired = 0; stt_timer_t *timer; while (!list_empty(slot)) { @@ -148,7 +148,7 @@ stt_expire_list(struct list_head *slot, unsigned long now) static int stt_check_timers(unsigned long *last) { - int expired = 0; + int expired = 0; unsigned long now; unsigned long this_slot; diff --git a/drivers/staging/lustre/lnet/selftest/timer.h b/drivers/staging/lustre/lnet/selftest/timer.h index d727c1e2b..2a8803d89 100644 --- a/drivers/staging/lustre/lnet/selftest/timer.h +++ b/drivers/staging/lustre/lnet/selftest/timer.h @@ -39,15 +39,15 @@ #define __SELFTEST_TIMER_H__ typedef struct { - struct list_head stt_list; - unsigned long stt_expires; - void (*stt_func) (void *); - void *stt_data; + struct list_head stt_list; + unsigned long stt_expires; + void (*stt_func) (void *); + void *stt_data; } stt_timer_t; -void stt_add_timer (stt_timer_t *timer); -int stt_del_timer (stt_timer_t *timer); -int stt_startup (void); -void stt_shutdown (void); +void stt_add_timer(stt_timer_t *timer); +int stt_del_timer(stt_timer_t *timer); +int stt_startup(void); +void stt_shutdown(void); #endif /* __SELFTEST_TIMER_H__ */ -- cgit v1.2.3-54-g00ecf