diff options
Diffstat (limited to 'drivers/staging/lustre/lnet/klnds')
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c | 474 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h | 681 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c | 484 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c | 48 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/Makefile | 2 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c | 485 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h | 601 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c | 402 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h | 86 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c (renamed from drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c) | 172 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c | 33 | ||||
-rw-r--r-- | drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c | 171 |
12 files changed, 1827 insertions, 1812 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c index 3bad441de..4eb24a11b 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c @@ -38,25 +38,26 @@ * Author: Eric Barton <eric@bartonsoftware.com> */ -#include "o2iblnd.h" #include <asm/div64.h> +#include <asm/page.h> +#include "o2iblnd.h" static lnd_t the_o2iblnd = { - .lnd_type = O2IBLND, - .lnd_startup = kiblnd_startup, - .lnd_shutdown = kiblnd_shutdown, - .lnd_ctl = kiblnd_ctl, - .lnd_query = kiblnd_query, - .lnd_send = kiblnd_send, - .lnd_recv = kiblnd_recv, + .lnd_type = O2IBLND, + .lnd_startup = kiblnd_startup, + .lnd_shutdown = kiblnd_shutdown, + .lnd_ctl = kiblnd_ctl, + .lnd_query = kiblnd_query, + .lnd_send = kiblnd_send, + .lnd_recv = kiblnd_recv, }; -kib_data_t kiblnd_data; +kib_data_t kiblnd_data; static __u32 kiblnd_cksum(void *ptr, int nob) { - char *c = ptr; - __u32 sum = 0; + char *c = ptr; + __u32 sum = 0; while (nob-- > 0) sum = ((sum << 1) | (sum >> 31)) + *c++; @@ -138,10 +139,10 @@ static int kiblnd_msgtype2size(int type) static int kiblnd_unpack_rd(kib_msg_t *msg, int flip) { - kib_rdma_desc_t *rd; - int nob; - int n; - int i; + kib_rdma_desc_t *rd; + int nob; + int n; + int i; LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ || msg->ibm_type == IBLND_MSG_PUT_ACK); @@ -210,10 +211,10 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version, int kiblnd_unpack_msg(kib_msg_t *msg, int nob) { const int hdr_size = offsetof(kib_msg_t, ibm_u); - __u32 msg_cksum; - __u16 version; - int msg_nob; - int flip; + __u32 msg_cksum; + __u16 version; + int msg_nob; + int flip; /* 6 bytes are enough to have received magic + version */ if (nob < 6) { @@ -320,10 +321,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob) int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid) { - kib_peer_t *peer; - kib_net_t *net = ni->ni_data; - int cpt = lnet_cpt_of_nid(nid); - unsigned long flags; + kib_peer_t *peer; + kib_net_t *net = ni->ni_data; + int cpt = lnet_cpt_of_nid(nid); + unsigned long flags; LASSERT(net != NULL); LASSERT(nid != LNET_NID_ANY); @@ -385,9 +386,9 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid) { /* the caller is responsible for accounting the additional reference * that this creates */ - struct list_head *peer_list = kiblnd_nid2peerlist(nid); - struct list_head *tmp; - kib_peer_t *peer; + struct list_head *peer_list = kiblnd_nid2peerlist(nid); + struct list_head *tmp; + kib_peer_t *peer; list_for_each(tmp, peer_list) { @@ -422,10 +423,10 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer) static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, lnet_nid_t *nidp, int *count) { - kib_peer_t *peer; - struct list_head *ptmp; - int i; - unsigned long flags; + kib_peer_t *peer; + struct list_head *ptmp; + int i; + unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -459,9 +460,9 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index, static void kiblnd_del_peer_locked(kib_peer_t *peer) { - struct list_head *ctmp; - struct list_head *cnxt; - kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + kib_conn_t *conn; if (list_empty(&peer->ibp_conns)) { kiblnd_unlink_peer_locked(peer); @@ -480,14 +481,14 @@ static void kiblnd_del_peer_locked(kib_peer_t *peer) static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) { LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - kib_peer_t *peer; - int lo; - int hi; - int i; - unsigned long flags; - int rc = -ENOENT; + struct list_head *ptmp; + struct list_head *pnxt; + kib_peer_t *peer; + int lo; + int hi; + int i; + unsigned long flags; + int rc = -ENOENT; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -532,12 +533,12 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid) static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index) { - kib_peer_t *peer; - struct list_head *ptmp; - kib_conn_t *conn; - struct list_head *ctmp; - int i; - unsigned long flags; + kib_peer_t *peer; + struct list_head *ptmp; + kib_conn_t *conn; + struct list_head *ctmp; + int i; + unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -593,7 +594,7 @@ int kiblnd_translate_mtu(int value) static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) { - int mtu; + int mtu; /* XXX There is no path record for iWARP, set by netdev->change_mtu? */ if (cmid->route.path_rec == NULL) @@ -607,11 +608,11 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid) static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt) { - cpumask_t *mask; - int vectors; - int off; - int i; - lnet_nid_t nid = conn->ibc_peer->ibp_nid; + cpumask_t *mask; + int vectors; + int off; + int i; + lnet_nid_t nid = conn->ibc_peer->ibp_nid; vectors = conn->ibc_cmid->device->num_comp_vectors; if (vectors <= 1) @@ -642,17 +643,18 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, * she must dispose of 'cmid'. (Actually I'd block forever if I tried * to destroy 'cmid' here since I'm called from the CM which still has * its ref on 'cmid'). */ - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_net_t *net = peer->ibp_ni->ni_data; - kib_dev_t *dev; + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_net_t *net = peer->ibp_ni->ni_data; + kib_dev_t *dev; struct ib_qp_init_attr *init_qp_attr; - struct kib_sched_info *sched; - kib_conn_t *conn; - struct ib_cq *cq; - unsigned long flags; - int cpt; - int rc; - int i; + struct kib_sched_info *sched; + struct ib_cq_init_attr cq_attr = {}; + kib_conn_t *conn; + struct ib_cq *cq; + unsigned long flags; + int cpt; + int rc; + int i; LASSERT(net != NULL); LASSERT(!in_interrupt()); @@ -742,10 +744,11 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, kiblnd_map_rx_descs(conn); + cq_attr.cqe = IBLND_CQ_ENTRIES(version); + cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt); cq = ib_create_cq(cmid->device, kiblnd_cq_completion, kiblnd_cq_event, conn, - IBLND_CQ_ENTRIES(version), - kiblnd_get_completion_vector(conn, cpt)); + &cq_attr); if (IS_ERR(cq)) { CERROR("Can't create CQ: %ld, cqe: %d\n", PTR_ERR(cq), IBLND_CQ_ENTRIES(version)); @@ -837,8 +840,8 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid, void kiblnd_destroy_conn(kib_conn_t *conn) { struct rdma_cm_id *cmid = conn->ibc_cmid; - kib_peer_t *peer = conn->ibc_peer; - int rc; + kib_peer_t *peer = conn->ibc_peer; + int rc; LASSERT(!in_interrupt()); LASSERT(atomic_read(&conn->ibc_refcount) == 0); @@ -904,10 +907,10 @@ void kiblnd_destroy_conn(kib_conn_t *conn) int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) { - kib_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; + kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); @@ -926,10 +929,10 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why) int kiblnd_close_stale_conns_locked(kib_peer_t *peer, int version, __u64 incarnation) { - kib_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; + kib_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) { conn = list_entry(ctmp, kib_conn_t, ibc_list); @@ -953,14 +956,14 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer, static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) { - kib_peer_t *peer; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - unsigned long flags; - int count = 0; + kib_peer_t *peer; + struct list_head *ptmp; + struct list_head *pnxt; + int lo; + int hi; + int i; + unsigned long flags; + int count = 0; write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1001,17 +1004,17 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid) int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) { struct libcfs_ioctl_data *data = arg; - int rc = -EINVAL; + int rc = -EINVAL; switch (cmd) { case IOC_LIBCFS_GET_PEER: { - lnet_nid_t nid = 0; - int count = 0; + lnet_nid_t nid = 0; + int count = 0; rc = kiblnd_get_peer_info(ni, data->ioc_count, &nid, &count); - data->ioc_nid = nid; - data->ioc_count = count; + data->ioc_nid = nid; + data->ioc_count = count; break; } @@ -1053,11 +1056,11 @@ int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { - unsigned long last_alive = 0; - unsigned long now = cfs_time_current(); - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_peer_t *peer; - unsigned long flags; + unsigned long last_alive = 0; + unsigned long now = cfs_time_current(); + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_peer_t *peer; + unsigned long flags; read_lock_irqsave(glock, flags); @@ -1086,8 +1089,8 @@ void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) void kiblnd_free_pages(kib_pages_t *p) { - int npages = p->ibp_npages; - int i; + int npages = p->ibp_npages; + int i; for (i = 0; i < npages; i++) { if (p->ibp_pages[i] != NULL) @@ -1099,8 +1102,8 @@ void kiblnd_free_pages(kib_pages_t *p) int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) { - kib_pages_t *p; - int i; + kib_pages_t *p; + int i; LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt, offsetof(kib_pages_t, ibp_pages[npages])); @@ -1130,7 +1133,7 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) void kiblnd_unmap_rx_descs(kib_conn_t *conn) { kib_rx_t *rx; - int i; + int i; LASSERT(conn->ibc_rxs != NULL); LASSERT(conn->ibc_hdev != NULL); @@ -1153,14 +1156,13 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn) void kiblnd_map_rx_descs(kib_conn_t *conn) { - kib_rx_t *rx; - struct page *pg; - int pg_off; - int ipg; - int i; + kib_rx_t *rx; + struct page *pg; + int pg_off; + int ipg; + int i; - for (pg_off = ipg = i = 0; - i < IBLND_RX_MSGS(conn->ibc_version); i++) { + for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) { pg = conn->ibc_rx_pages->ibp_pages[ipg]; rx = &conn->ibc_rxs[i]; @@ -1177,7 +1179,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n", i, rx->rx_msg, rx->rx_msgaddr, - lnet_page2phys(pg) + pg_off); + (__u64)(page_to_phys(pg) + pg_off)); pg_off += IBLND_MSG_SIZE; LASSERT(pg_off <= PAGE_SIZE); @@ -1192,9 +1194,9 @@ void kiblnd_map_rx_descs(kib_conn_t *conn) static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) { - kib_hca_dev_t *hdev = tpo->tpo_hdev; - kib_tx_t *tx; - int i; + kib_hca_dev_t *hdev = tpo->tpo_hdev; + kib_tx_t *tx; + int i; LASSERT(tpo->tpo_pool.po_allocated == 0); @@ -1216,8 +1218,8 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo) static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) { kib_hca_dev_t *hdev; - unsigned long flags; - int i = 0; + unsigned long flags; + int i = 0; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); while (dev->ibd_failover) { @@ -1240,15 +1242,15 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev) static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) { - kib_pages_t *txpgs = tpo->tpo_tx_pages; - kib_pool_t *pool = &tpo->tpo_pool; - kib_net_t *net = pool->po_owner->ps_net; - kib_dev_t *dev; - struct page *page; - kib_tx_t *tx; - int page_offset; - int ipage; - int i; + kib_pages_t *txpgs = tpo->tpo_tx_pages; + kib_pool_t *pool = &tpo->tpo_pool; + kib_net_t *net = pool->po_owner->ps_net; + kib_dev_t *dev; + struct page *page; + kib_tx_t *tx; + int page_offset; + int ipage; + int i; LASSERT(net != NULL); @@ -1291,7 +1293,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo) struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size) { - __u64 index; + __u64 index; LASSERT(hdev->ibh_mrs[0] != NULL); @@ -1311,7 +1313,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd) { struct ib_mr *prev_mr; struct ib_mr *mr; - int i; + int i; LASSERT(hdev->ibh_mrs[0] != NULL); @@ -1382,18 +1384,18 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo) { /* FMR pool for RDMA */ - kib_dev_t *dev = fps->fps_net->ibn_dev; - kib_fmr_pool_t *fpo; + kib_dev_t *dev = fps->fps_net->ibn_dev; + kib_fmr_pool_t *fpo; struct ib_fmr_pool_param param = { .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE, - .page_shift = PAGE_SHIFT, - .access = (IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE), - .pool_size = fps->fps_pool_size, + .page_shift = PAGE_SHIFT, + .access = (IB_ACCESS_LOCAL_WRITE | + IB_ACCESS_REMOTE_WRITE), + .pool_size = fps->fps_pool_size, .dirty_watermark = fps->fps_flush_trigger, .flush_function = NULL, - .flush_arg = NULL, - .cache = !!*kiblnd_tunables.kib_fmr_cache}; + .flush_arg = NULL, + .cache = !!*kiblnd_tunables.kib_fmr_cache}; int rc; LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo)); @@ -1454,7 +1456,7 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int flush_trigger) { kib_fmr_pool_t *fpo; - int rc; + int rc; memset(fps, 0, sizeof(kib_fmr_poolset_t)); @@ -1485,11 +1487,11 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now) void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status) { LIST_HEAD(zombies); - kib_fmr_pool_t *fpo = fmr->fmr_pool; + kib_fmr_pool_t *fpo = fmr->fmr_pool; kib_fmr_poolset_t *fps = fpo->fpo_owner; - unsigned long now = cfs_time_current(); - kib_fmr_pool_t *tmp; - int rc; + unsigned long now = cfs_time_current(); + kib_fmr_pool_t *tmp; + int rc; rc = ib_fmr_pool_unmap(fmr->fmr_pfmr); LASSERT(rc == 0); @@ -1525,9 +1527,9 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages, __u64 iov, kib_fmr_t *fmr) { struct ib_pool_fmr *pfmr; - kib_fmr_pool_t *fpo; - __u64 version; - int rc; + kib_fmr_pool_t *fpo; + __u64 version; + int rc; again: spin_lock(&fps->fps_lock); @@ -1658,13 +1660,13 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt, kib_ps_node_init_t nd_init, kib_ps_node_fini_t nd_fini) { - kib_pool_t *pool; - int rc; + kib_pool_t *pool; + int rc; memset(ps, 0, sizeof(kib_poolset_t)); - ps->ps_cpt = cpt; - ps->ps_net = net; + ps->ps_cpt = cpt; + ps->ps_net = net; ps->ps_pool_create = po_create; ps->ps_pool_destroy = po_destroy; ps->ps_node_init = nd_init; @@ -1698,9 +1700,9 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now) void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) { LIST_HEAD(zombies); - kib_poolset_t *ps = pool->po_owner; - kib_pool_t *tmp; - unsigned long now = cfs_time_current(); + kib_poolset_t *ps = pool->po_owner; + kib_pool_t *tmp; + unsigned long now = cfs_time_current(); spin_lock(&ps->ps_lock); @@ -1727,9 +1729,9 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node) struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) { - struct list_head *node; - kib_pool_t *pool; - int rc; + struct list_head *node; + kib_pool_t *pool; + int rc; again: spin_lock(&ps->ps_lock); @@ -1789,8 +1791,8 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps) void kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr) { - kib_pmr_pool_t *ppo = pmr->pmr_pool; - struct ib_mr *mr = pmr->pmr_mr; + kib_pmr_pool_t *ppo = pmr->pmr_pool; + struct ib_mr *mr = pmr->pmr_mr; pmr->pmr_mr = NULL; kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list); @@ -1802,9 +1804,9 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr) { kib_phys_mr_t *pmr; - struct list_head *node; - int rc; - int i; + struct list_head *node; + int rc; + int i; node = kiblnd_pool_alloc_node(&pps->pps_poolset); if (node == NULL) { @@ -1846,7 +1848,7 @@ int kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev, static void kiblnd_destroy_pmr_pool(kib_pool_t *pool) { kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool); - kib_phys_mr_t *pmr; + kib_phys_mr_t *pmr; kib_phys_mr_t *tmp; LASSERT(pool->po_allocated == 0); @@ -1881,10 +1883,10 @@ static inline int kiblnd_pmr_pool_size(int ncpts) static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) { - struct kib_pmr_pool *ppo; - struct kib_pool *pool; - kib_phys_mr_t *pmr; - int i; + struct kib_pmr_pool *ppo; + struct kib_pool *pool; + kib_phys_mr_t *pmr; + int i; LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(), ps->ps_cpt, sizeof(kib_pmr_pool_t)); @@ -1923,8 +1925,8 @@ static int kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, static void kiblnd_destroy_tx_pool(kib_pool_t *pool) { - kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); - int i; + kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool); + int i; LASSERT(pool->po_allocated == 0); @@ -1979,9 +1981,9 @@ static int kiblnd_tx_pool_size(int ncpts) static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po) { - int i; - int npg; - kib_pool_t *pool; + int i; + int npg; + kib_pool_t *pool; kib_tx_pool_t *tpo; LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo)); @@ -2064,19 +2066,19 @@ static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node) { kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t, tps_poolset); - kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); + kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); tx->tx_cookie = tps->tps_next_tx_cookie++; } static void kiblnd_net_fini_pools(kib_net_t *net) { - int i; + int i; cfs_cpt_for_each(i, lnet_cpt_table()) { - kib_tx_poolset_t *tps; - kib_fmr_poolset_t *fps; - kib_pmr_poolset_t *pps; + kib_tx_poolset_t *tps; + kib_fmr_poolset_t *fps; + kib_pmr_poolset_t *pps; if (net->ibn_tx_ps != NULL) { tps = net->ibn_tx_ps[i]; @@ -2112,16 +2114,15 @@ static void kiblnd_net_fini_pools(kib_net_t *net) static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) { - unsigned long flags; - int cpt; - int rc; - int i; + unsigned long flags; + int cpt; + int rc; + int i; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (*kiblnd_tunables.kib_map_on_demand == 0 && net->ibn_dev->ibd_hdev->ibh_nmrs == 1) { - read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); goto create_tx_pool; } @@ -2241,7 +2242,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts) static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) { struct ib_device_attr *attr; - int rc; + int rc; /* It's safe to assume a HCA can handle a page size * matching that of the native system */ @@ -2284,7 +2285,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev) static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev) { - int i; + int i; if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL) return; @@ -2317,12 +2318,11 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev) static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) { struct ib_mr *mr; - int i; - int rc; - __u64 mm_size; - __u64 mr_size; - int acflags = IB_ACCESS_LOCAL_WRITE | - IB_ACCESS_REMOTE_WRITE; + int i; + int rc; + __u64 mm_size; + __u64 mr_size; + int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; rc = kiblnd_hdev_get_attr(hdev); if (rc != 0) @@ -2371,11 +2371,11 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev) for (i = 0; i < hdev->ibh_nmrs; i++) { struct ib_phys_buf ipb; - __u64 iova; + __u64 iova; ipb.size = hdev->ibh_mr_size; ipb.addr = i * mr_size; - iova = ipb.addr; + iova = ipb.addr; mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova); if (IS_ERR(mr)) { @@ -2406,10 +2406,10 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid, static int kiblnd_dev_need_failover(kib_dev_t *dev) { - struct rdma_cm_id *cmid; - struct sockaddr_in srcaddr; - struct sockaddr_in dstaddr; - int rc; + struct rdma_cm_id *cmid; + struct sockaddr_in srcaddr; + struct sockaddr_in dstaddr; + int rc; if (dev->ibd_hdev == NULL || /* initializing */ dev->ibd_hdev->ibh_cmid == NULL || /* listener is dead */ @@ -2435,7 +2435,7 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev) } memset(&srcaddr, 0, sizeof(srcaddr)); - srcaddr.sin_family = AF_INET; + srcaddr.sin_family = AF_INET; srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip); memset(&dstaddr, 0, sizeof(dstaddr)); @@ -2464,15 +2464,14 @@ int kiblnd_dev_failover(kib_dev_t *dev) LIST_HEAD(zombie_tpo); LIST_HEAD(zombie_ppo); LIST_HEAD(zombie_fpo); - struct rdma_cm_id *cmid = NULL; - kib_hca_dev_t *hdev = NULL; - kib_hca_dev_t *old; - struct ib_pd *pd; - kib_net_t *net; - struct sockaddr_in addr; - unsigned long flags; - int rc = 0; - int i; + struct rdma_cm_id *cmid = NULL; + kib_hca_dev_t *hdev = NULL; + struct ib_pd *pd; + kib_net_t *net; + struct sockaddr_in addr; + unsigned long flags; + int rc = 0; + int i; LASSERT(*kiblnd_tunables.kib_dev_failover > 1 || dev->ibd_can_failover || @@ -2558,9 +2557,7 @@ int kiblnd_dev_failover(kib_dev_t *dev) write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - old = dev->ibd_hdev; - dev->ibd_hdev = hdev; /* take over the refcount */ - hdev = old; + swap(dev->ibd_hdev, hdev); /* take over the refcount */ list_for_each_entry(net, &dev->ibd_nets, ibn_list) { cfs_cpt_for_each(i, lnet_cpt_table()) { @@ -2614,13 +2611,13 @@ void kiblnd_destroy_dev(kib_dev_t *dev) static kib_dev_t *kiblnd_create_dev(char *ifname) { struct net_device *netdev; - kib_dev_t *dev; - __u32 netmask; - __u32 ip; - int up; - int rc; + kib_dev_t *dev; + __u32 netmask; + __u32 ip; + int up; + int rc; - rc = libcfs_ipif_query(ifname, &up, &ip, &netmask); + rc = lnet_ipif_query(ifname, &up, &ip, &netmask); if (rc != 0) { CERROR("Can't query IPoIB interface %s: %d\n", ifname, rc); @@ -2665,8 +2662,8 @@ static kib_dev_t *kiblnd_create_dev(char *ifname) static void kiblnd_base_shutdown(void) { - struct kib_sched_info *sched; - int i; + struct kib_sched_info *sched; + int i; LASSERT(list_empty(&kiblnd_data.kib_devs)); @@ -2732,10 +2729,10 @@ static void kiblnd_base_shutdown(void) void kiblnd_shutdown(lnet_ni_t *ni) { - kib_net_t *net = ni->ni_data; - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - int i; - unsigned long flags; + kib_net_t *net = ni->ni_data; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + int i; + unsigned long flags; LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL); @@ -2804,9 +2801,9 @@ out: static int kiblnd_base_startup(void) { - struct kib_sched_info *sched; - int rc; - int i; + struct kib_sched_info *sched; + int rc; + int i; LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING); @@ -2821,8 +2818,7 @@ static int kiblnd_base_startup(void) kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE; LIBCFS_ALLOC(kiblnd_data.kib_peers, - sizeof(struct list_head) * - kiblnd_data.kib_peer_hash_size); + sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size); if (kiblnd_data.kib_peers == NULL) goto failed; for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) @@ -2840,7 +2836,7 @@ static int kiblnd_base_startup(void) goto failed; cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) { - int nthrs; + int nthrs; spin_lock_init(&sched->ibs_lock); INIT_LIST_HEAD(&sched->ibs_conns); @@ -2893,9 +2889,9 @@ static int kiblnd_base_startup(void) static int kiblnd_start_schedulers(struct kib_sched_info *sched) { - int rc = 0; - int nthrs; - int i; + int rc = 0; + int nthrs; + int i; if (sched->ibs_nthreads == 0) { if (*kiblnd_tunables.kib_nscheds > 0) { @@ -2913,8 +2909,8 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) } for (i = 0; i < nthrs; i++) { - long id; - char name[20]; + long id; + char name[20]; id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i); snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld", @@ -2935,9 +2931,9 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched) static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts) { - int cpt; - int rc; - int i; + int cpt; + int rc; + int i; for (i = 0; i < ncpts; i++) { struct kib_sched_info *sched; @@ -2960,10 +2956,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, static kib_dev_t *kiblnd_dev_search(char *ifname) { - kib_dev_t *alias = NULL; - kib_dev_t *dev; - char *colon; - char *colon2; + kib_dev_t *alias = NULL; + kib_dev_t *dev; + char *colon; + char *colon2; colon = strchr(ifname, ':'); list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { @@ -2992,13 +2988,13 @@ static kib_dev_t *kiblnd_dev_search(char *ifname) int kiblnd_startup(lnet_ni_t *ni) { - char *ifname; - kib_dev_t *ibdev = NULL; - kib_net_t *net; - struct timeval tv; - unsigned long flags; - int rc; - int newdev; + char *ifname; + kib_dev_t *ibdev = NULL; + kib_net_t *net; + struct timeval tv; + unsigned long flags; + int rc; + int newdev; LASSERT(ni->ni_lnd == &the_o2iblnd); @@ -3091,7 +3087,7 @@ static void __exit kiblnd_module_fini(void) static int __init kiblnd_module_init(void) { - int rc; + int rc; CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE); CLASSERT(offsetof(kib_msg_t, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h index cd664d025..f5d1d9f8f 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h @@ -65,7 +65,6 @@ #include "../../../include/linux/libcfs/libcfs.h" #include "../../../include/linux/lnet/lnet.h" #include "../../../include/linux/lnet/lib-lnet.h" -#include "../../../include/linux/lnet/lnet-sysctl.h" #include <rdma/rdma_cm.h> #include <rdma/ib_cm.h> @@ -80,42 +79,47 @@ #define IBLND_N_SCHED_HIGH 4 typedef struct { - int *kib_dev_failover; /* HCA failover */ - unsigned int *kib_service; /* IB service number */ - int *kib_min_reconnect_interval; /* first failed connection retry... */ - int *kib_max_reconnect_interval; /* ...exponentially increasing to this */ - int *kib_cksum; /* checksum kib_msg_t? */ - int *kib_timeout; /* comms timeout (seconds) */ - int *kib_keepalive; /* keepalive timeout (seconds) */ - int *kib_ntx; /* # tx descs */ - int *kib_credits; /* # concurrent sends */ - int *kib_peertxcredits; /* # concurrent sends to 1 peer */ - int *kib_peerrtrcredits; /* # per-peer router buffer credits */ - int *kib_peercredits_hiw; /* # when eagerly to return credits */ - int *kib_peertimeout; /* seconds to consider peer dead */ - char **kib_default_ipif; /* default IPoIB interface */ - int *kib_retry_count; - int *kib_rnr_retry_count; - int *kib_concurrent_sends; /* send work queue sizing */ - int *kib_ib_mtu; /* IB MTU */ - int *kib_map_on_demand; /* map-on-demand if RD has more fragments - * than this value, 0 disable map-on-demand */ - int *kib_pmr_pool_size; /* # physical MR in pool */ - int *kib_fmr_pool_size; /* # FMRs in pool */ - int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ - int *kib_fmr_cache; /* enable FMR pool cache? */ - int *kib_require_priv_port;/* accept only privileged ports */ - int *kib_use_priv_port; /* use privileged port for active connect */ - /* # threads on each CPT */ - int *kib_nscheds; + int *kib_dev_failover; /* HCA failover */ + unsigned int *kib_service; /* IB service number */ + int *kib_min_reconnect_interval; /* first failed connection + * retry... */ + int *kib_max_reconnect_interval; /* ...exponentially increasing + * to this */ + int *kib_cksum; /* checksum kib_msg_t? */ + int *kib_timeout; /* comms timeout (seconds) */ + int *kib_keepalive; /* keepalive timeout (seconds) */ + int *kib_ntx; /* # tx descs */ + int *kib_credits; /* # concurrent sends */ + int *kib_peertxcredits; /* # concurrent sends to 1 peer */ + int *kib_peerrtrcredits; /* # per-peer router buffer + * credits */ + int *kib_peercredits_hiw; /* # when eagerly to return + * credits */ + int *kib_peertimeout; /* seconds to consider peer dead */ + char **kib_default_ipif; /* default IPoIB interface */ + int *kib_retry_count; + int *kib_rnr_retry_count; + int *kib_concurrent_sends; /* send work queue sizing */ + int *kib_ib_mtu; /* IB MTU */ + int *kib_map_on_demand; /* map-on-demand if RD has more + * fragments than this value, 0 + * disable map-on-demand */ + int *kib_pmr_pool_size; /* # physical MR in pool */ + int *kib_fmr_pool_size; /* # FMRs in pool */ + int *kib_fmr_flush_trigger; /* When to trigger FMR flush */ + int *kib_fmr_cache; /* enable FMR pool cache? */ + int *kib_require_priv_port; /* accept only privileged ports */ + int *kib_use_priv_port; /* use privileged port for active + * connect */ + int *kib_nscheds; /* # threads on each CPT */ } kib_tunables_t; extern kib_tunables_t kiblnd_tunables; -#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ -#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ +#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */ +#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */ -#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ +#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */ #define IBLND_CREDITS_MAX ((typeof(((kib_msg_t*) 0)->ibm_credits)) - 1) /* Max # of peer credits */ #define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \ @@ -186,34 +190,36 @@ struct kib_hca_dev; #endif typedef struct { - struct list_head ibd_list; /* chain on kib_devs */ - struct list_head ibd_fail_list; /* chain on kib_failed_devs */ - __u32 ibd_ifip; /* IPoIB interface IP */ - /** IPoIB interface name */ - char ibd_ifname[KIB_IFNAME_SIZE]; - int ibd_nnets; /* # nets extant */ - - unsigned long ibd_next_failover; - int ibd_failed_failover; /* # failover failures */ - unsigned int ibd_failover; /* failover in progress */ - unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */ - struct list_head ibd_nets; - struct kib_hca_dev *ibd_hdev; + struct list_head ibd_list; /* chain on kib_devs */ + struct list_head ibd_fail_list; /* chain on kib_failed_devs */ + __u32 ibd_ifip; /* IPoIB interface IP */ + + /* IPoIB interface name */ + char ibd_ifname[KIB_IFNAME_SIZE]; + int ibd_nnets; /* # nets extant */ + + unsigned long ibd_next_failover; + int ibd_failed_failover; /* # failover failures */ + unsigned int ibd_failover; /* failover in progress */ + unsigned int ibd_can_failover; /* IPoIB interface is a bonding + * master */ + struct list_head ibd_nets; + struct kib_hca_dev *ibd_hdev; } kib_dev_t; typedef struct kib_hca_dev { - struct rdma_cm_id *ibh_cmid; /* listener cmid */ - struct ib_device *ibh_ibdev; /* IB device */ - int ibh_page_shift; /* page shift of current HCA */ - int ibh_page_size; /* page size of current HCA */ - __u64 ibh_page_mask; /* page mask of current HCA */ - int ibh_mr_shift; /* bits shift of max MR size */ - __u64 ibh_mr_size; /* size of MR */ - int ibh_nmrs; /* # of global MRs */ - struct ib_mr **ibh_mrs; /* global MR */ - struct ib_pd *ibh_pd; /* PD */ - kib_dev_t *ibh_dev; /* owner */ - atomic_t ibh_ref; /* refcount */ + struct rdma_cm_id *ibh_cmid; /* listener cmid */ + struct ib_device *ibh_ibdev; /* IB device */ + int ibh_page_shift; /* page shift of current HCA */ + int ibh_page_size; /* page size of current HCA */ + __u64 ibh_page_mask; /* page mask of current HCA */ + int ibh_mr_shift; /* bits shift of max MR size */ + __u64 ibh_mr_size; /* size of MR */ + int ibh_nmrs; /* # of global MRs */ + struct ib_mr **ibh_mrs; /* global MR */ + struct ib_pd *ibh_pd; /* PD */ + kib_dev_t *ibh_dev; /* owner */ + atomic_t ibh_ref; /* refcount */ } kib_hca_dev_t; /** # of seconds to keep pool alive */ @@ -222,19 +228,19 @@ typedef struct kib_hca_dev { #define IBLND_POOL_RETRY 1 typedef struct { - int ibp_npages; /* # pages */ - struct page *ibp_pages[0]; /* page array */ + int ibp_npages; /* # pages */ + struct page *ibp_pages[0]; /* page array */ } kib_pages_t; struct kib_pmr_pool; typedef struct { - struct list_head pmr_list; /* chain node */ - struct ib_phys_buf *pmr_ipb; /* physical buffer */ - struct ib_mr *pmr_mr; /* IB MR */ - struct kib_pmr_pool *pmr_pool; /* owner of this MR */ - __u64 pmr_iova; /* Virtual I/O address */ - int pmr_refcount; /* reference count */ + struct list_head pmr_list; /* chain node */ + struct ib_phys_buf *pmr_ipb; /* physical buffer */ + struct ib_mr *pmr_mr; /* IB MR */ + struct kib_pmr_pool *pmr_pool; /* owner of this MR */ + __u64 pmr_iova; /* Virtual I/O address */ + int pmr_refcount; /* reference count */ } kib_phys_mr_t; struct kib_pool; @@ -251,97 +257,99 @@ struct kib_net; #define IBLND_POOL_NAME_LEN 32 typedef struct kib_poolset { - spinlock_t ps_lock; /* serialize */ - struct kib_net *ps_net; /* network it belongs to */ - char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ - struct list_head ps_pool_list; /* list of pools */ - struct list_head ps_failed_pool_list; /* failed pool list */ - unsigned long ps_next_retry; /* time stamp for retry if failed to allocate */ - int ps_increasing; /* is allocating new pool */ - int ps_pool_size; /* new pool size */ - int ps_cpt; /* CPT id */ - - kib_ps_pool_create_t ps_pool_create; /* create a new pool */ - kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ - kib_ps_node_init_t ps_node_init; /* initialize new allocated node */ - kib_ps_node_fini_t ps_node_fini; /* finalize node */ + spinlock_t ps_lock; /* serialize */ + struct kib_net *ps_net; /* network it belongs to */ + char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */ + struct list_head ps_pool_list; /* list of pools */ + struct list_head ps_failed_pool_list;/* failed pool list */ + unsigned long ps_next_retry; /* time stamp for retry if + * failed to allocate */ + int ps_increasing; /* is allocating new pool */ + int ps_pool_size; /* new pool size */ + int ps_cpt; /* CPT id */ + + kib_ps_pool_create_t ps_pool_create; /* create a new pool */ + kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */ + kib_ps_node_init_t ps_node_init; /* initialize new allocated + * node */ + kib_ps_node_fini_t ps_node_fini; /* finalize node */ } kib_poolset_t; typedef struct kib_pool { - struct list_head po_list; /* chain on pool list */ - struct list_head po_free_list; /* pre-allocated node */ - kib_poolset_t *po_owner; /* pool_set of this pool */ - unsigned long po_deadline; /* deadline of this pool */ - int po_allocated; /* # of elements in use */ - int po_failed; /* pool is created on failed HCA */ - int po_size; /* # of pre-allocated elements */ + struct list_head po_list; /* chain on pool list */ + struct list_head po_free_list; /* pre-allocated node */ + kib_poolset_t *po_owner; /* pool_set of this pool */ + unsigned long po_deadline; /* deadline of this pool */ + int po_allocated; /* # of elements in use */ + int po_failed; /* pool is created on failed + * HCA */ + int po_size; /* # of pre-allocated elements */ } kib_pool_t; typedef struct { - kib_poolset_t tps_poolset; /* pool-set */ - __u64 tps_next_tx_cookie; /* cookie of TX */ + kib_poolset_t tps_poolset; /* pool-set */ + __u64 tps_next_tx_cookie; /* cookie of TX */ } kib_tx_poolset_t; typedef struct { - kib_pool_t tpo_pool; /* pool */ - struct kib_hca_dev *tpo_hdev; /* device for this pool */ - struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ - kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ + kib_pool_t tpo_pool; /* pool */ + struct kib_hca_dev *tpo_hdev; /* device for this pool */ + struct kib_tx *tpo_tx_descs; /* all the tx descriptors */ + kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */ } kib_tx_pool_t; typedef struct { - kib_poolset_t pps_poolset; /* pool-set */ + kib_poolset_t pps_poolset; /* pool-set */ } kib_pmr_poolset_t; typedef struct kib_pmr_pool { - struct kib_hca_dev *ppo_hdev; /* device for this pool */ - kib_pool_t ppo_pool; /* pool */ + struct kib_hca_dev *ppo_hdev; /* device for this pool */ + kib_pool_t ppo_pool; /* pool */ } kib_pmr_pool_t; typedef struct { - spinlock_t fps_lock; /* serialize */ - struct kib_net *fps_net; /* IB network */ - struct list_head fps_pool_list; /* FMR pool list */ - struct list_head fps_failed_pool_list; /* FMR pool list */ - __u64 fps_version; /* validity stamp */ - int fps_cpt; /* CPT id */ - int fps_pool_size; - int fps_flush_trigger; - /* is allocating new pool */ - int fps_increasing; - /* time stamp for retry if failed to allocate */ - unsigned long fps_next_retry; + spinlock_t fps_lock; /* serialize */ + struct kib_net *fps_net; /* IB network */ + struct list_head fps_pool_list; /* FMR pool list */ + struct list_head fps_failed_pool_list;/* FMR pool list */ + __u64 fps_version; /* validity stamp */ + int fps_cpt; /* CPT id */ + int fps_pool_size; + int fps_flush_trigger; + int fps_increasing; /* is allocating new pool */ + unsigned long fps_next_retry; /* time stamp for retry if + * failed to allocate */ } kib_fmr_poolset_t; typedef struct { - struct list_head fpo_list; /* chain on pool list */ - struct kib_hca_dev *fpo_hdev; /* device for this pool */ - kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ - struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ - unsigned long fpo_deadline; /* deadline of this pool */ - int fpo_failed; /* fmr pool is failed */ - int fpo_map_count; /* # of mapped FMR */ + struct list_head fpo_list; /* chain on pool list */ + struct kib_hca_dev *fpo_hdev; /* device for this pool */ + kib_fmr_poolset_t *fpo_owner; /* owner of this pool */ + struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */ + unsigned long fpo_deadline; /* deadline of this pool */ + int fpo_failed; /* fmr pool is failed */ + int fpo_map_count; /* # of mapped FMR */ } kib_fmr_pool_t; typedef struct { - struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ - kib_fmr_pool_t *fmr_pool; /* pool of FMR */ + struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */ + kib_fmr_pool_t *fmr_pool; /* pool of FMR */ } kib_fmr_t; typedef struct kib_net { - struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */ - __u64 ibn_incarnation; /* my epoch */ - int ibn_init; /* initialisation state */ - int ibn_shutdown; /* shutting down? */ + struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */ + __u64 ibn_incarnation;/* my epoch */ + int ibn_init; /* initialisation state */ + int ibn_shutdown; /* shutting down? */ - atomic_t ibn_npeers; /* # peers extant */ - atomic_t ibn_nconns; /* # connections extant */ + atomic_t ibn_npeers; /* # peers extant */ + atomic_t ibn_nconns; /* # connections extant */ - kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ - kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ - kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */ + kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */ + kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */ + kib_pmr_poolset_t **ibn_pmr_ps; /* pmr pool-set */ - kib_dev_t *ibn_dev; /* underlying IB device */ + kib_dev_t *ibn_dev; /* underlying IB device */ } kib_net_t; #define KIB_THREAD_SHIFT 16 @@ -350,51 +358,45 @@ typedef struct kib_net { #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1)) struct kib_sched_info { - /* serialise */ - spinlock_t ibs_lock; - /* schedulers sleep here */ - wait_queue_head_t ibs_waitq; - /* conns to check for rx completions */ - struct list_head ibs_conns; - /* number of scheduler threads */ - int ibs_nthreads; - /* max allowed scheduler threads */ - int ibs_nthreads_max; - int ibs_cpt; /* CPT id */ + spinlock_t ibs_lock; /* serialise */ + wait_queue_head_t ibs_waitq; /* schedulers sleep here */ + struct list_head ibs_conns; /* conns to check for rx completions */ + int ibs_nthreads; /* number of scheduler threads */ + int ibs_nthreads_max; /* max allowed scheduler threads */ + int ibs_cpt; /* CPT id */ }; typedef struct { - int kib_init; /* initialisation state */ - int kib_shutdown; /* shut down? */ - struct list_head kib_devs; /* IB devices extant */ - /* list head of failed devices */ - struct list_head kib_failed_devs; - /* schedulers sleep here */ - wait_queue_head_t kib_failover_waitq; - atomic_t kib_nthreads; /* # live threads */ - /* stabilize net/dev/peer/conn ops */ - rwlock_t kib_global_lock; - /* hash table of all my known peers */ - struct list_head *kib_peers; - /* size of kib_peers */ - int kib_peer_hash_size; - /* the connd task (serialisation assertions) */ - void *kib_connd; - /* connections to setup/teardown */ - struct list_head kib_connd_conns; - /* connections with zero refcount */ - struct list_head kib_connd_zombies; - /* connection daemon sleeps here */ - wait_queue_head_t kib_connd_waitq; - spinlock_t kib_connd_lock; /* serialise */ - struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ - /* percpt data for schedulers */ - struct kib_sched_info **kib_scheds; + int kib_init; /* initialisation state */ + int kib_shutdown; /* shut down? */ + struct list_head kib_devs; /* IB devices extant */ + struct list_head kib_failed_devs; /* list head of failed + * devices */ + wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */ + atomic_t kib_nthreads; /* # live threads */ + rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn + * ops */ + struct list_head *kib_peers; /* hash table of all my known + * peers */ + int kib_peer_hash_size; /* size of kib_peers */ + void *kib_connd; /* the connd task + * (serialisation assertions) + */ + struct list_head kib_connd_conns; /* connections to + * setup/teardown */ + struct list_head kib_connd_zombies; /* connections with zero + * refcount */ + wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps + * here */ + spinlock_t kib_connd_lock; /* serialise */ + struct ib_qp_attr kib_error_qpa; /* QP->ERROR */ + struct kib_sched_info **kib_scheds; /* percpt data for schedulers + */ } kib_data_t; -#define IBLND_INIT_NOTHING 0 -#define IBLND_INIT_DATA 1 -#define IBLND_INIT_ALL 2 +#define IBLND_INIT_NOTHING 0 +#define IBLND_INIT_DATA 1 +#define IBLND_INIT_ALL 2 /************************************************************************ * IB Wire message format. @@ -402,228 +404,243 @@ typedef struct { */ typedef struct kib_connparams { - __u16 ibcp_queue_depth; - __u16 ibcp_max_frags; - __u32 ibcp_max_msg_size; + __u16 ibcp_queue_depth; + __u16 ibcp_max_frags; + __u32 ibcp_max_msg_size; } WIRE_ATTR kib_connparams_t; typedef struct { - lnet_hdr_t ibim_hdr; /* portals header */ - char ibim_payload[0]; /* piggy-backed payload */ + lnet_hdr_t ibim_hdr; /* portals header */ + char ibim_payload[0]; /* piggy-backed payload */ } WIRE_ATTR kib_immediate_msg_t; typedef struct { - __u32 rf_nob; /* # bytes this frag */ - __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ + __u32 rf_nob; /* # bytes this frag */ + __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */ } WIRE_ATTR kib_rdma_frag_t; typedef struct { - __u32 rd_key; /* local/remote key */ - __u32 rd_nfrags; /* # fragments */ - kib_rdma_frag_t rd_frags[0]; /* buffer frags */ + __u32 rd_key; /* local/remote key */ + __u32 rd_nfrags; /* # fragments */ + kib_rdma_frag_t rd_frags[0]; /* buffer frags */ } WIRE_ATTR kib_rdma_desc_t; typedef struct { - lnet_hdr_t ibprm_hdr; /* portals header */ - __u64 ibprm_cookie; /* opaque completion cookie */ + lnet_hdr_t ibprm_hdr; /* portals header */ + __u64 ibprm_cookie; /* opaque completion cookie */ } WIRE_ATTR kib_putreq_msg_t; typedef struct { - __u64 ibpam_src_cookie; /* reflected completion cookie */ - __u64 ibpam_dst_cookie; /* opaque completion cookie */ - kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */ + __u64 ibpam_src_cookie; /* reflected completion cookie */ + __u64 ibpam_dst_cookie; /* opaque completion cookie */ + kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */ } WIRE_ATTR kib_putack_msg_t; typedef struct { - lnet_hdr_t ibgm_hdr; /* portals header */ - __u64 ibgm_cookie; /* opaque completion cookie */ - kib_rdma_desc_t ibgm_rd; /* rdma descriptor */ + lnet_hdr_t ibgm_hdr; /* portals header */ + __u64 ibgm_cookie; /* opaque completion cookie */ + kib_rdma_desc_t ibgm_rd; /* rdma descriptor */ } WIRE_ATTR kib_get_msg_t; typedef struct { - __u64 ibcm_cookie; /* opaque completion cookie */ - __s32 ibcm_status; /* < 0 failure: >= 0 length */ + __u64 ibcm_cookie; /* opaque completion cookie */ + __s32 ibcm_status; /* < 0 failure: >= 0 length */ } WIRE_ATTR kib_completion_msg_t; typedef struct { /* First 2 fields fixed FOR ALL TIME */ - __u32 ibm_magic; /* I'm an ibnal message */ - __u16 ibm_version; /* this is my version number */ - - __u8 ibm_type; /* msg type */ - __u8 ibm_credits; /* returned credits */ - __u32 ibm_nob; /* # bytes in whole message */ - __u32 ibm_cksum; /* checksum (0 == no checksum) */ - __u64 ibm_srcnid; /* sender's NID */ - __u64 ibm_srcstamp; /* sender's incarnation */ - __u64 ibm_dstnid; /* destination's NID */ - __u64 ibm_dststamp; /* destination's incarnation */ + __u32 ibm_magic; /* I'm an ibnal message */ + __u16 ibm_version; /* this is my version number */ + + __u8 ibm_type; /* msg type */ + __u8 ibm_credits; /* returned credits */ + __u32 ibm_nob; /* # bytes in whole message */ + __u32 ibm_cksum; /* checksum (0 == no checksum) */ + __u64 ibm_srcnid; /* sender's NID */ + __u64 ibm_srcstamp; /* sender's incarnation */ + __u64 ibm_dstnid; /* destination's NID */ + __u64 ibm_dststamp; /* destination's incarnation */ union { - kib_connparams_t connparams; - kib_immediate_msg_t immediate; - kib_putreq_msg_t putreq; - kib_putack_msg_t putack; - kib_get_msg_t get; - kib_completion_msg_t completion; + kib_connparams_t connparams; + kib_immediate_msg_t immediate; + kib_putreq_msg_t putreq; + kib_putack_msg_t putack; + kib_get_msg_t get; + kib_completion_msg_t completion; } WIRE_ATTR ibm_u; } WIRE_ATTR kib_msg_t; -#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ +#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */ -#define IBLND_MSG_VERSION_1 0x11 -#define IBLND_MSG_VERSION_2 0x12 -#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2 +#define IBLND_MSG_VERSION_1 0x11 +#define IBLND_MSG_VERSION_2 0x12 +#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2 -#define IBLND_MSG_CONNREQ 0xc0 /* connection request */ -#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */ -#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */ -#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */ -#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */ -#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */ -#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */ -#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */ -#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ -#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ +#define IBLND_MSG_CONNREQ 0xc0 /* connection request */ +#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */ +#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */ +#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */ +#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */ +#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */ +#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */ +#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */ +#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */ +#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */ typedef struct { - __u32 ibr_magic; /* sender's magic */ - __u16 ibr_version; /* sender's version */ - __u8 ibr_why; /* reject reason */ - __u8 ibr_padding; /* padding */ - __u64 ibr_incarnation; /* incarnation of peer */ - kib_connparams_t ibr_cp; /* connection parameters */ + __u32 ibr_magic; /* sender's magic */ + __u16 ibr_version; /* sender's version */ + __u8 ibr_why; /* reject reason */ + __u8 ibr_padding; /* padding */ + __u64 ibr_incarnation; /* incarnation of peer */ + kib_connparams_t ibr_cp; /* connection parameters */ } WIRE_ATTR kib_rej_t; /* connection rejection reasons */ -#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ -#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ -#define IBLND_REJECT_FATAL 3 /* Anything else */ - -#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ -#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ - -#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match mine */ -#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't match mine */ +#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */ +#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */ +#define IBLND_REJECT_FATAL 3 /* Anything else */ +#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */ +#define IBLND_REJECT_CONN_STALE 5 /* stale peer */ +#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match + * mine */ +#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't + * match mine */ /***********************************************************************/ -typedef struct kib_rx /* receive message */ +typedef struct kib_rx /* receive message */ { - struct list_head rx_list; /* queue for attention */ - struct kib_conn *rx_conn; /* owning conn */ - int rx_nob; /* # bytes received (-1 while posted) */ - enum ib_wc_status rx_status; /* completion status */ - kib_msg_t *rx_msg; /* message buffer (host vaddr) */ - __u64 rx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */ - struct ib_recv_wr rx_wrq; /* receive work item... */ - struct ib_sge rx_sge; /* ...and its memory */ + struct list_head rx_list; /* queue for attention */ + struct kib_conn *rx_conn; /* owning conn */ + int rx_nob; /* # bytes received (-1 while + * posted) */ + enum ib_wc_status rx_status; /* completion status */ + kib_msg_t *rx_msg; /* message buffer (host vaddr) */ + __u64 rx_msgaddr; /* message buffer (I/O addr) */ + DECLARE_PCI_UNMAP_ADDR (rx_msgunmap); /* for dma_unmap_single() */ + struct ib_recv_wr rx_wrq; /* receive work item... */ + struct ib_sge rx_sge; /* ...and its memory */ } kib_rx_t; -#define IBLND_POSTRX_DONT_POST 0 /* don't post */ -#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ -#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ -#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved credit */ +#define IBLND_POSTRX_DONT_POST 0 /* don't post */ +#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */ +#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */ +#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved + * credit */ -typedef struct kib_tx /* transmit message */ +typedef struct kib_tx /* transmit message */ { - struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */ - kib_tx_pool_t *tx_pool; /* pool I'm from */ - struct kib_conn *tx_conn; /* owning conn */ - short tx_sending; /* # tx callbacks outstanding */ - short tx_queued; /* queued for sending */ - short tx_waiting; /* waiting for peer */ - int tx_status; /* LNET completion status */ - unsigned long tx_deadline; /* completion deadline */ - __u64 tx_cookie; /* completion cookie */ - lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */ - kib_msg_t *tx_msg; /* message buffer (host vaddr) */ - __u64 tx_msgaddr; /* message buffer (I/O addr) */ - DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */ - int tx_nwrq; /* # send work items */ - struct ib_send_wr *tx_wrq; /* send work items... */ - struct ib_sge *tx_sge; /* ...and their memory */ - kib_rdma_desc_t *tx_rd; /* rdma descriptor */ - int tx_nfrags; /* # entries in... */ - struct scatterlist *tx_frags; /* dma_map_sg descriptor */ - __u64 *tx_pages; /* rdma phys page addrs */ + struct list_head tx_list; /* queue on idle_txs ibc_tx_queue + * etc. */ + kib_tx_pool_t *tx_pool; /* pool I'm from */ + struct kib_conn *tx_conn; /* owning conn */ + short tx_sending; /* # tx callbacks outstanding */ + short tx_queued; /* queued for sending */ + short tx_waiting; /* waiting for peer */ + int tx_status; /* LNET completion status */ + unsigned long tx_deadline; /* completion deadline */ + __u64 tx_cookie; /* completion cookie */ + lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on + * completion */ + kib_msg_t *tx_msg; /* message buffer (host vaddr) */ + __u64 tx_msgaddr; /* message buffer (I/O addr) */ + DECLARE_PCI_UNMAP_ADDR (tx_msgunmap); /* for dma_unmap_single() */ + int tx_nwrq; /* # send work items */ + struct ib_send_wr *tx_wrq; /* send work items... */ + struct ib_sge *tx_sge; /* ...and their memory */ + kib_rdma_desc_t *tx_rd; /* rdma descriptor */ + int tx_nfrags; /* # entries in... */ + struct scatterlist *tx_frags; /* dma_map_sg descriptor */ + __u64 *tx_pages; /* rdma phys page addrs */ union { - kib_phys_mr_t *pmr; /* MR for physical buffer */ - kib_fmr_t fmr; /* FMR */ - } tx_u; - int tx_dmadir; /* dma direction */ + kib_phys_mr_t *pmr; /* MR for physical buffer */ + kib_fmr_t fmr; /* FMR */ + } tx_u; + int tx_dmadir; /* dma direction */ } kib_tx_t; typedef struct kib_connvars { - /* connection-in-progress variables */ - kib_msg_t cv_msg; + kib_msg_t cv_msg; /* connection-in-progress variables */ } kib_connvars_t; typedef struct kib_conn { - struct kib_sched_info *ibc_sched; /* scheduler information */ - struct kib_peer *ibc_peer; /* owning peer */ - kib_hca_dev_t *ibc_hdev; /* HCA bound on */ - struct list_head ibc_list; /* stash on peer's conn list */ - struct list_head ibc_sched_list; /* schedule for attention */ - __u16 ibc_version; /* version of connection */ - __u64 ibc_incarnation; /* which instance of the peer */ - atomic_t ibc_refcount; /* # users */ - int ibc_state; /* what's happening */ - int ibc_nsends_posted; /* # uncompleted sends */ - int ibc_noops_posted; /* # uncompleted NOOPs */ - int ibc_credits; /* # credits I have */ - int ibc_outstanding_credits; /* # credits to return */ - int ibc_reserved_credits;/* # ACK/DONE msg credits */ - int ibc_comms_error; /* set on comms error */ - unsigned int ibc_nrx:16; /* receive buffers owned */ - unsigned int ibc_scheduled:1; /* scheduled for attention */ - unsigned int ibc_ready:1; /* CQ callback fired */ - /* time of last send */ - unsigned long ibc_last_send; - /** link chain for kiblnd_check_conns only */ - struct list_head ibc_connd_list; - /** rxs completed before ESTABLISHED */ - struct list_head ibc_early_rxs; - /** IBLND_MSG_NOOPs for IBLND_MSG_VERSION_1 */ - struct list_head ibc_tx_noops; - struct list_head ibc_tx_queue; /* sends that need a credit */ - struct list_head ibc_tx_queue_nocred;/* sends that don't need a credit */ - struct list_head ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */ - struct list_head ibc_active_txs; /* active tx awaiting completion */ - spinlock_t ibc_lock; /* serialise */ - kib_rx_t *ibc_rxs; /* the rx descs */ - kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ - - struct rdma_cm_id *ibc_cmid; /* CM id */ - struct ib_cq *ibc_cq; /* completion queue */ - - kib_connvars_t *ibc_connvars; /* in-progress connection state */ + struct kib_sched_info *ibc_sched; /* scheduler information */ + struct kib_peer *ibc_peer; /* owning peer */ + kib_hca_dev_t *ibc_hdev; /* HCA bound on */ + struct list_head ibc_list; /* stash on peer's conn + * list */ + struct list_head ibc_sched_list; /* schedule for attention */ + __u16 ibc_version; /* version of connection */ + __u64 ibc_incarnation; /* which instance of the + * peer */ + atomic_t ibc_refcount; /* # users */ + int ibc_state; /* what's happening */ + int ibc_nsends_posted; /* # uncompleted sends */ + int ibc_noops_posted; /* # uncompleted NOOPs */ + int ibc_credits; /* # credits I have */ + int ibc_outstanding_credits; /* # credits to return */ + int ibc_reserved_credits; /* # ACK/DONE msg credits */ + int ibc_comms_error; /* set on comms error */ + unsigned int ibc_nrx:16; /* receive buffers owned */ + unsigned int ibc_scheduled:1; /* scheduled for attention + */ + unsigned int ibc_ready:1; /* CQ callback fired */ + unsigned long ibc_last_send; /* time of last send */ + struct list_head ibc_connd_list; /* link chain for + * kiblnd_check_conns only + */ + struct list_head ibc_early_rxs; /* rxs completed before + * ESTABLISHED */ + struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for + * IBLND_MSG_VERSION_1 */ + struct list_head ibc_tx_queue; /* sends that need a credit + */ + struct list_head ibc_tx_queue_nocred; /* sends that don't need a + * credit */ + struct list_head ibc_tx_queue_rsrvd; /* sends that need to + * reserve an ACK/DONE msg + */ + struct list_head ibc_active_txs; /* active tx awaiting + * completion */ + spinlock_t ibc_lock; /* serialise */ + kib_rx_t *ibc_rxs; /* the rx descs */ + kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */ + + struct rdma_cm_id *ibc_cmid; /* CM id */ + struct ib_cq *ibc_cq; /* completion queue */ + + kib_connvars_t *ibc_connvars; /* in-progress connection + * state */ } kib_conn_t; -#define IBLND_CONN_INIT 0 /* being initialised */ -#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ -#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ -#define IBLND_CONN_ESTABLISHED 3 /* connection established */ -#define IBLND_CONN_CLOSING 4 /* being closed */ -#define IBLND_CONN_DISCONNECTED 5 /* disconnected */ +#define IBLND_CONN_INIT 0 /* being initialised */ +#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */ +#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */ +#define IBLND_CONN_ESTABLISHED 3 /* connection established */ +#define IBLND_CONN_CLOSING 4 /* being closed */ +#define IBLND_CONN_DISCONNECTED 5 /* disconnected */ typedef struct kib_peer { - struct list_head ibp_list; /* stash on global peer list */ - lnet_nid_t ibp_nid; /* who's on the other end(s) */ - lnet_ni_t *ibp_ni; /* LNet interface */ - atomic_t ibp_refcount; /* # users */ - struct list_head ibp_conns; /* all active connections */ - struct list_head ibp_tx_queue; /* msgs waiting for a conn */ - __u16 ibp_version; /* version of peer */ - __u64 ibp_incarnation; /* incarnation of peer */ - int ibp_connecting; /* current active connection attempts */ - int ibp_accepting; /* current passive connection attempts */ - int ibp_error; /* errno on closing this peer */ - unsigned long ibp_last_alive; /* when (in jiffies) I was last alive */ + struct list_head ibp_list; /* stash on global peer list */ + lnet_nid_t ibp_nid; /* who's on the other end(s) */ + lnet_ni_t *ibp_ni; /* LNet interface */ + atomic_t ibp_refcount; /* # users */ + struct list_head ibp_conns; /* all active connections */ + struct list_head ibp_tx_queue; /* msgs waiting for a conn */ + __u16 ibp_version; /* version of peer */ + __u64 ibp_incarnation; /* incarnation of peer */ + int ibp_connecting; /* current active connection attempts + */ + int ibp_accepting; /* current passive connection attempts + */ + int ibp_error; /* errno on closing this peer */ + unsigned long ibp_last_alive; /* when (in jiffies) I was last alive + */ } kib_peer_t; -extern kib_data_t kiblnd_data; +extern kib_data_t kiblnd_data; extern void kiblnd_hdev_destroy(kib_hca_dev_t *hdev); @@ -941,8 +958,8 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev, * right because OFED1.2 defines it as const, to use it we have to add * (void *) cast to overcome "const" */ -#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) -#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) +#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data) +#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len) struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index dbf374983..477aa8b76 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -44,9 +44,9 @@ static void kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) { lnet_msg_t *lntmsg[2]; - kib_net_t *net = ni->ni_data; - int rc; - int i; + kib_net_t *net = ni->ni_data; + int rc; + int i; LASSERT(net != NULL); LASSERT(!in_interrupt()); @@ -102,10 +102,10 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) static kib_tx_t * kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) { - kib_net_t *net = (kib_net_t *)ni->ni_data; - struct list_head *node; - kib_tx_t *tx; - kib_tx_poolset_t *tps; + kib_net_t *net = (kib_net_t *)ni->ni_data; + struct list_head *node; + kib_tx_t *tx; + kib_tx_poolset_t *tps; tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); @@ -130,9 +130,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) static void kiblnd_drop_rx(kib_rx_t *rx) { - kib_conn_t *conn = rx->rx_conn; - struct kib_sched_info *sched = conn->ibc_sched; - unsigned long flags; + kib_conn_t *conn = rx->rx_conn; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; spin_lock_irqsave(&sched->ibs_lock, flags); LASSERT(conn->ibc_nrx > 0); @@ -145,11 +145,11 @@ kiblnd_drop_rx(kib_rx_t *rx) int kiblnd_post_rx(kib_rx_t *rx, int credit) { - kib_conn_t *conn = rx->rx_conn; - kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; - struct ib_recv_wr *bad_wrq = NULL; - struct ib_mr *mr; - int rc; + kib_conn_t *conn = rx->rx_conn; + kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; + struct ib_recv_wr *bad_wrq = NULL; + struct ib_mr *mr; + int rc; LASSERT(net != NULL); LASSERT(!in_interrupt()); @@ -164,10 +164,10 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) rx->rx_sge.addr = rx->rx_msgaddr; rx->rx_sge.length = IBLND_MSG_SIZE; - rx->rx_wrq.next = NULL; + rx->rx_wrq.next = NULL; rx->rx_wrq.sg_list = &rx->rx_sge; rx->rx_wrq.num_sge = 1; - rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); + rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX); LASSERT(conn->ibc_state >= IBLND_CONN_INIT); LASSERT(rx->rx_nob >= 0); /* not posted */ @@ -212,7 +212,7 @@ kiblnd_post_rx(kib_rx_t *rx, int credit) static kib_tx_t * kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) { - struct list_head *tmp; + struct list_head *tmp; list_for_each(tmp, &conn->ibc_active_txs) { kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); @@ -237,9 +237,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) static void kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) { - kib_tx_t *tx; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - int idle; + kib_tx_t *tx; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int idle; spin_lock(&conn->ibc_lock); @@ -276,8 +276,8 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) static void kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) { - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for completion %x for %s\n", @@ -295,14 +295,14 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) static void kiblnd_handle_rx(kib_rx_t *rx) { - kib_msg_t *msg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - int credits = msg->ibm_credits; - kib_tx_t *tx; - int rc = 0; - int rc2; - int post_credit; + kib_msg_t *msg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int credits = msg->ibm_credits; + kib_tx_t *tx; + int rc = 0; + int rc2; + int post_credit; LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -456,12 +456,12 @@ kiblnd_handle_rx(kib_rx_t *rx) static void kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) { - kib_msg_t *msg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_net_t *net = ni->ni_data; - int rc; - int err = -EIO; + kib_msg_t *msg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_net_t *net = ni->ni_data; + int rc; + int err = -EIO; LASSERT(net != NULL); LASSERT(rx->rx_nob < 0); /* was posted */ @@ -502,8 +502,8 @@ kiblnd_rx_complete(kib_rx_t *rx, int status, int nob) /* racing with connection establishment/teardown! */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; write_lock_irqsave(g_lock, flags); /* must check holding global lock to eliminate race */ @@ -550,19 +550,19 @@ kiblnd_kvaddr_to_page(unsigned long vaddr) static int kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { - kib_hca_dev_t *hdev; - __u64 *pages = tx->tx_pages; - kib_fmr_poolset_t *fps; - int npages; - int size; - int cpt; - int rc; - int i; + kib_hca_dev_t *hdev; + __u64 *pages = tx->tx_pages; + kib_fmr_poolset_t *fps; + int npages; + int size; + int cpt; + int rc; + int i; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); - hdev = tx->tx_pool->tpo_hdev; + hdev = tx->tx_pool->tpo_hdev; for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { for (size = 0; size < rd->rd_frags[i].rf_nob; @@ -586,7 +586,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : tx->tx_u.fmr.fmr_pfmr->fmr->lkey; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; - rd->rd_frags[0].rf_nob = nob; + rd->rd_frags[0].rf_nob = nob; rd->rd_nfrags = 1; return 0; @@ -595,11 +595,11 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) static int kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { - kib_hca_dev_t *hdev; - kib_pmr_poolset_t *pps; - __u64 iova; - int cpt; - int rc; + kib_hca_dev_t *hdev; + kib_pmr_poolset_t *pps; + __u64 iova; + int cpt; + int rc; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); @@ -623,7 +623,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) tx->tx_u.pmr->pmr_mr->lkey; rd->rd_nfrags = 1; rd->rd_frags[0].rf_addr = iova; - rd->rd_frags[0].rf_nob = nob; + rd->rd_frags[0].rf_nob = nob; return 0; } @@ -631,7 +631,7 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { - kib_net_t *net = ni->ni_data; + kib_net_t *net = ni->ni_data; LASSERT(net != NULL); @@ -655,20 +655,19 @@ int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - kib_net_t *net = ni->ni_data; - struct ib_mr *mr = NULL; - __u32 nob; - int i; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + kib_net_t *net = ni->ni_data; + struct ib_mr *mr = NULL; + __u32 nob; + int i; /* If rd is not tx_rd, it's going to get sent to a peer and I'm the * RDMA sink */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; - rd->rd_nfrags = - kiblnd_dma_map_sg(hdev->ibh_ibdev, - tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, + tx->tx_nfrags, tx->tx_dmadir); for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( @@ -699,12 +698,12 @@ static int kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, unsigned int niov, struct kvec *iov, int offset, int nob) { - kib_net_t *net = ni->ni_data; - struct page *page; + kib_net_t *net = ni->ni_data; + struct page *page; struct scatterlist *sg; - unsigned long vaddr; - int fragnob; - int page_offset; + unsigned long vaddr; + int fragnob; + int page_offset; LASSERT(nob > 0); LASSERT(niov > 0); @@ -752,9 +751,9 @@ static int kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) { - kib_net_t *net = ni->ni_data; + kib_net_t *net = ni->ni_data; struct scatterlist *sg; - int fragnob; + int fragnob; CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); @@ -793,11 +792,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) __releases(conn->ibc_lock) __acquires(conn->ibc_lock) { - kib_msg_t *msg = tx->tx_msg; - kib_peer_t *peer = conn->ibc_peer; - int ver = conn->ibc_version; - int rc; - int done; + kib_msg_t *msg = tx->tx_msg; + kib_peer_t *peer = conn->ibc_peer; + int ver = conn->ibc_version; + int rc; + int done; struct ib_send_wr *bad_wrq; LASSERT(tx->tx_queued); @@ -878,8 +877,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) /* close_conn will launch failover */ rc = -ENETDOWN; } else { - rc = ib_post_send(conn->ibc_cmid->qp, - tx->tx_wrq, &bad_wrq); + rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq); } conn->ibc_last_send = jiffies; @@ -925,9 +923,9 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) void kiblnd_check_sends(kib_conn_t *conn) { - int ver = conn->ibc_version; + int ver = conn->ibc_version; lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx; + kib_tx_t *tx; /* Don't send anything until after the connection is established */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { @@ -997,9 +995,9 @@ kiblnd_check_sends(kib_conn_t *conn) static void kiblnd_tx_complete(kib_tx_t *tx, int status) { - int failed = (status != IB_WC_SUCCESS); - kib_conn_t *conn = tx->tx_conn; - int idle; + int failed = (status != IB_WC_SUCCESS); + kib_conn_t *conn = tx->tx_conn; + int idle; LASSERT(tx->tx_sending > 0); @@ -1051,11 +1049,11 @@ kiblnd_tx_complete(kib_tx_t *tx, int status) void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof(kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr; + int nob = offsetof(kib_msg_t, ibm_u) + body_nob; + struct ib_mr *mr; LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); @@ -1086,14 +1084,14 @@ int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { - kib_msg_t *ibmsg = tx->tx_msg; - kib_rdma_desc_t *srcrd = tx->tx_rd; - struct ib_sge *sge = &tx->tx_sge[0]; + kib_msg_t *ibmsg = tx->tx_msg; + kib_rdma_desc_t *srcrd = tx->tx_rd; + struct ib_sge *sge = &tx->tx_sge[0]; struct ib_send_wr *wrq = &tx->tx_wrq[0]; - int rc = resid; - int srcidx; - int dstidx; - int wrknob; + int rc = resid; + int srcidx; + int dstidx; + int wrknob; LASSERT(!in_interrupt()); LASSERT(tx->tx_nwrq == 0); @@ -1144,7 +1142,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, wrq->send_flags = 0; wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); - wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); + wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); @@ -1170,7 +1168,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { - struct list_head *q; + struct list_head *q; LASSERT(tx->tx_nwrq > 0); /* work items set up */ LASSERT(!tx->tx_queued); /* not queued for sending already */ @@ -1271,11 +1269,11 @@ static void kiblnd_connect_peer(kib_peer_t *peer) { struct rdma_cm_id *cmid; - kib_dev_t *dev; - kib_net_t *net = peer->ibp_ni->ni_data; + kib_dev_t *dev; + kib_net_t *net = peer->ibp_ni->ni_data; struct sockaddr_in srcaddr; struct sockaddr_in dstaddr; - int rc; + int rc; LASSERT(net != NULL); LASSERT(peer->ibp_connecting > 0); @@ -1335,12 +1333,12 @@ kiblnd_connect_peer(kib_peer_t *peer) void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) { - kib_peer_t *peer; - kib_peer_t *peer2; - kib_conn_t *conn; - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - int rc; + kib_peer_t *peer; + kib_peer_t *peer2; + kib_conn_t *conn; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; + int rc; /* If I get here, I've committed to send, so I complete the tx with * failure on any problems */ @@ -1456,20 +1454,20 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - lnet_hdr_t *hdr = &lntmsg->msg_hdr; - int type = lntmsg->msg_type; + lnet_hdr_t *hdr = &lntmsg->msg_hdr; + int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; - int target_is_router = lntmsg->msg_target_is_router; - int routing = lntmsg->msg_routing; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - kib_msg_t *ibmsg; - kib_tx_t *tx; - int nob; - int rc; + int target_is_router = lntmsg->msg_target_is_router; + int routing = lntmsg->msg_routing; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + kib_msg_t *ibmsg; + kib_tx_t *tx; + int nob; + int rc; /* NB 'private' is different depending on what we're sending.... */ @@ -1628,13 +1626,13 @@ static void kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; - unsigned int niov = lntmsg->msg_niov; - struct kvec *iov = lntmsg->msg_iov; - lnet_kiov_t *kiov = lntmsg->msg_kiov; - unsigned int offset = lntmsg->msg_offset; - unsigned int nob = lntmsg->msg_len; - kib_tx_t *tx; - int rc; + unsigned int niov = lntmsg->msg_niov; + struct kvec *iov = lntmsg->msg_iov; + lnet_kiov_t *kiov = lntmsg->msg_kiov; + unsigned int offset = lntmsg->msg_offset; + unsigned int nob = lntmsg->msg_len; + kib_tx_t *tx; + int rc; tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { @@ -1691,14 +1689,14 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - kib_rx_t *rx = private; - kib_msg_t *rxmsg = rx->rx_msg; - kib_conn_t *conn = rx->rx_conn; - kib_tx_t *tx; - kib_msg_t *txmsg; - int nob; - int post_credit = IBLND_POSTRX_PEER_CREDIT; - int rc = 0; + kib_rx_t *rx = private; + kib_msg_t *rxmsg = rx->rx_msg; + kib_conn_t *conn = rx->rx_conn; + kib_tx_t *tx; + kib_msg_t *txmsg; + int nob; + int post_credit = IBLND_POSTRX_PEER_CREDIT; + int rc = 0; LASSERT(mlen <= rlen); LASSERT(!in_interrupt()); @@ -1828,8 +1826,8 @@ kiblnd_peer_alive(kib_peer_t *peer) static void kiblnd_peer_notify(kib_peer_t *peer) { - int error = 0; - unsigned long last_alive = 0; + int error = 0; + unsigned long last_alive = 0; unsigned long flags; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -1860,9 +1858,9 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error) * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). * Caller holds kib_global_lock exclusively in irq context */ - kib_peer_t *peer = conn->ibc_peer; - kib_dev_t *dev; - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + kib_dev_t *dev; + unsigned long flags; LASSERT(error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -1934,8 +1932,8 @@ kiblnd_close_conn(kib_conn_t *conn, int error) static void kiblnd_handle_early_rxs(kib_conn_t *conn) { - unsigned long flags; - kib_rx_t *rx; + unsigned long flags; + kib_rx_t *rx; kib_rx_t *tmp; LASSERT(!in_interrupt()); @@ -1957,9 +1955,9 @@ static void kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) { LIST_HEAD(zombies); - struct list_head *tmp; - struct list_head *nxt; - kib_tx_t *tx; + struct list_head *tmp; + struct list_head *nxt; + kib_tx_t *tx; spin_lock(&conn->ibc_lock); @@ -2018,7 +2016,7 @@ void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { LIST_HEAD(zombies); - unsigned long flags; + unsigned long flags; LASSERT(error != 0); LASSERT(!in_interrupt()); @@ -2071,12 +2069,12 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) void kiblnd_connreq_done(kib_conn_t *conn, int status) { - kib_peer_t *peer = conn->ibc_peer; - kib_tx_t *tx; + kib_peer_t *peer = conn->ibc_peer; + kib_tx_t *tx; kib_tx_t *tmp; - struct list_head txs; - unsigned long flags; - int active; + struct list_head txs; + unsigned long flags; + int active; active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2166,7 +2164,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) static void kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) { - int rc; + int rc; rc = rdma_reject(cmid, rej, sizeof(*rej)); @@ -2177,22 +2175,22 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) static int kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - kib_msg_t *reqmsg = priv; - kib_msg_t *ackmsg; - kib_dev_t *ibdev; - kib_peer_t *peer; - kib_peer_t *peer2; - kib_conn_t *conn; - lnet_ni_t *ni = NULL; - kib_net_t *net = NULL; - lnet_nid_t nid; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + kib_msg_t *reqmsg = priv; + kib_msg_t *ackmsg; + kib_dev_t *ibdev; + kib_peer_t *peer; + kib_peer_t *peer2; + kib_conn_t *conn; + lnet_ni_t *ni = NULL; + kib_net_t *net = NULL; + lnet_nid_t nid; struct rdma_conn_param cp; - kib_rej_t rej; - int version = IBLND_MSG_VERSION; - unsigned long flags; - int rc; - struct sockaddr_in *peer_addr; + kib_rej_t rej; + int version = IBLND_MSG_VERSION; + unsigned long flags; + int rc; + struct sockaddr_in *peer_addr; LASSERT(!in_interrupt()); /* cmid inherits 'context' from the corresponding listener id */ @@ -2200,8 +2198,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) LASSERT(ibdev != NULL); memset(&rej, 0, sizeof(rej)); - rej.ibr_magic = IBLND_MSG_MAGIC; - rej.ibr_why = IBLND_REJECT_FATAL; + rej.ibr_magic = IBLND_MSG_MAGIC; + rej.ibr_why = IBLND_REJECT_FATAL; rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); @@ -2243,7 +2241,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } nid = reqmsg->ibm_srcnid; - ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); + ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); if (ni != NULL) { net = (kib_net_t *)ni->ni_data; @@ -2394,7 +2392,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) * CM callback doesn't destroy cmid. */ conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); + conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(version)); @@ -2412,12 +2410,12 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) memset(&cp, 0, sizeof(cp)); cp.private_data = ackmsg; - cp.private_data_len = ackmsg->ibm_nob; + cp.private_data_len = ackmsg->ibm_nob; cp.responder_resources = 0; /* No atomic ops or RDMA reads */ - cp.initiator_depth = 0; + cp.initiator_depth = 0; cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; - cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; + cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); @@ -2439,7 +2437,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (ni != NULL) lnet_ni_decref(ni); - rej.ibr_version = version; + rej.ibr_version = version; rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); kiblnd_reject(cmid, &rej); @@ -2451,10 +2449,10 @@ static void kiblnd_reconnect(kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) { - kib_peer_t *peer = conn->ibc_peer; - char *reason; - int retry = 0; - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + char *reason; + int retry = 0; + unsigned long flags; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ @@ -2513,7 +2511,7 @@ kiblnd_reconnect(kib_conn_t *conn, int version, static void kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; + kib_peer_t *peer = conn->ibc_peer; LASSERT(!in_interrupt()); LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2532,10 +2530,10 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) case IB_CM_REJ_CONSUMER_DEFINED: if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) { - kib_rej_t *rej = priv; - kib_connparams_t *cp = NULL; - int flip = 0; - __u64 incarnation = -1; + kib_rej_t *rej = priv; + kib_connparams_t *cp = NULL; + int flip = 0; + __u64 incarnation = -1; /* NB. default incarnation is -1 because: * a) V1 will ignore dst incarnation in connreq. @@ -2652,13 +2650,13 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob) static void kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; - lnet_ni_t *ni = peer->ibp_ni; - kib_net_t *net = ni->ni_data; - kib_msg_t *msg = priv; - int ver = conn->ibc_version; - int rc = kiblnd_unpack_msg(msg, priv_nob); - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + lnet_ni_t *ni = peer->ibp_ni; + kib_net_t *net = ni->ni_data; + kib_msg_t *msg = priv; + int ver = conn->ibc_version; + int rc = kiblnd_unpack_msg(msg, priv_nob); + unsigned long flags; LASSERT(net != NULL); @@ -2726,8 +2724,8 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = + conn->ibc_incarnation = msg->ibm_srcstamp; + conn->ibc_credits = conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(ver)); @@ -2749,20 +2747,20 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob) static int kiblnd_active_connect(struct rdma_cm_id *cmid) { - kib_peer_t *peer = (kib_peer_t *)cmid->context; - kib_conn_t *conn; - kib_msg_t *msg; - struct rdma_conn_param cp; - int version; - __u64 incarnation; - unsigned long flags; - int rc; + kib_peer_t *peer = (kib_peer_t *)cmid->context; + kib_conn_t *conn; + kib_msg_t *msg; + struct rdma_conn_param cp; + int version; + __u64 incarnation; + unsigned long flags; + int rc; read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : - peer->ibp_version; + version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : + peer->ibp_version; read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2793,8 +2791,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) cp.private_data_len = msg->ibm_nob; cp.responder_resources = 0; /* No atomic ops or RDMA reads */ cp.initiator_depth = 0; - cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.flow_control = 1; + cp.retry_count = *kiblnd_tunables.kib_retry_count; cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; LASSERT(cmid->context == (void *)conn); @@ -2814,9 +2812,9 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { - kib_peer_t *peer; - kib_conn_t *conn; - int rc; + kib_peer_t *peer; + kib_conn_t *conn; + int rc; switch (event->event) { default: @@ -2983,8 +2981,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) static int kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) { - kib_tx_t *tx; - struct list_head *ttmp; + kib_tx_t *tx; + struct list_head *ttmp; list_for_each(ttmp, txs) { tx = list_entry(ttmp, kib_tx_t, tx_list); @@ -3022,13 +3020,13 @@ kiblnd_check_conns(int idx) { LIST_HEAD(closes); LIST_HEAD(checksends); - struct list_head *peers = &kiblnd_data.kib_peers[idx]; - struct list_head *ptmp; - kib_peer_t *peer; - kib_conn_t *conn; + struct list_head *peers = &kiblnd_data.kib_peers[idx]; + struct list_head *ptmp; + kib_peer_t *peer; + kib_conn_t *conn; kib_conn_t *tmp; - struct list_head *ctmp; - unsigned long flags; + struct list_head *ctmp; + unsigned long flags; /* NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we @@ -3114,14 +3112,14 @@ kiblnd_disconnect_conn(kib_conn_t *conn) int kiblnd_connd(void *arg) { - wait_queue_t wait; - unsigned long flags; - kib_conn_t *conn; - int timeout; - int i; - int dropped_lock; - int peer_index = 0; - unsigned long deadline = jiffies; + wait_queue_t wait; + unsigned long flags; + kib_conn_t *conn; + int timeout; + int i; + int dropped_lock; + int peer_index = 0; + unsigned long deadline = jiffies; cfs_block_allsigs(); @@ -3169,7 +3167,7 @@ kiblnd_connd(void *arg) if (timeout <= 0) { const int n = 4; const int p = 1; - int chunk = kiblnd_data.kib_peer_hash_size; + int chunk = kiblnd_data.kib_peer_hash_size; spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); dropped_lock = 1; @@ -3273,9 +3271,9 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg) * consuming my CQ I could be called after all completions have * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 * and this CQ is about to be destroyed so I NOOP. */ - kib_conn_t *conn = (kib_conn_t *)arg; - struct kib_sched_info *sched = conn->ibc_sched; - unsigned long flags; + kib_conn_t *conn = (kib_conn_t *)arg; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; LASSERT(cq == conn->ibc_cq); @@ -3309,15 +3307,15 @@ kiblnd_cq_event(struct ib_event *event, void *arg) int kiblnd_scheduler(void *arg) { - long id = (long)arg; - struct kib_sched_info *sched; - kib_conn_t *conn; - wait_queue_t wait; - unsigned long flags; - struct ib_wc wc; - int did_something; - int busy_loops = 0; - int rc; + long id = (long)arg; + struct kib_sched_info *sched; + kib_conn_t *conn; + wait_queue_t wait; + unsigned long flags; + struct ib_wc wc; + int did_something; + int busy_loops = 0; + int rc; cfs_block_allsigs(); @@ -3432,11 +3430,11 @@ kiblnd_scheduler(void *arg) int kiblnd_failover_thread(void *arg) { - rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_dev_t *dev; - wait_queue_t wait; - unsigned long flags; - int rc; + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_dev_t *dev; + wait_queue_t wait; + unsigned long flags; + int rc; LASSERT(*kiblnd_tunables.kib_dev_failover != 0); @@ -3446,8 +3444,8 @@ kiblnd_failover_thread(void *arg) write_lock_irqsave(glock, flags); while (!kiblnd_data.kib_shutdown) { - int do_failover = 0; - int long_sleep; + int do_failover = 0; + int long_sleep; list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, ibd_fail_list) { diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c index eedf01afd..b0e00361c 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c @@ -150,30 +150,30 @@ module_param(use_privileged_port, int, 0644); MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection"); kib_tunables_t kiblnd_tunables = { - .kib_dev_failover = &dev_failover, - .kib_service = &service, - .kib_cksum = &cksum, - .kib_timeout = &timeout, - .kib_keepalive = &keepalive, - .kib_ntx = &ntx, - .kib_credits = &credits, - .kib_peertxcredits = &peer_credits, - .kib_peercredits_hiw = &peer_credits_hiw, - .kib_peerrtrcredits = &peer_buffer_credits, - .kib_peertimeout = &peer_timeout, - .kib_default_ipif = &ipif_name, - .kib_retry_count = &retry_count, - .kib_rnr_retry_count = &rnr_retry_count, - .kib_concurrent_sends = &concurrent_sends, - .kib_ib_mtu = &ib_mtu, - .kib_map_on_demand = &map_on_demand, - .kib_fmr_pool_size = &fmr_pool_size, - .kib_fmr_flush_trigger = &fmr_flush_trigger, - .kib_fmr_cache = &fmr_cache, - .kib_pmr_pool_size = &pmr_pool_size, - .kib_require_priv_port = &require_privileged_port, - .kib_use_priv_port = &use_privileged_port, - .kib_nscheds = &nscheds + .kib_dev_failover = &dev_failover, + .kib_service = &service, + .kib_cksum = &cksum, + .kib_timeout = &timeout, + .kib_keepalive = &keepalive, + .kib_ntx = &ntx, + .kib_credits = &credits, + .kib_peertxcredits = &peer_credits, + .kib_peercredits_hiw = &peer_credits_hiw, + .kib_peerrtrcredits = &peer_buffer_credits, + .kib_peertimeout = &peer_timeout, + .kib_default_ipif = &ipif_name, + .kib_retry_count = &retry_count, + .kib_rnr_retry_count = &rnr_retry_count, + .kib_concurrent_sends = &concurrent_sends, + .kib_ib_mtu = &ib_mtu, + .kib_map_on_demand = &map_on_demand, + .kib_fmr_pool_size = &fmr_pool_size, + .kib_fmr_flush_trigger = &fmr_flush_trigger, + .kib_fmr_cache = &fmr_cache, + .kib_pmr_pool_size = &pmr_pool_size, + .kib_require_priv_port = &require_privileged_port, + .kib_use_priv_port = &use_privileged_port, + .kib_nscheds = &nscheds }; int diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile index f3fb8778c..c011581d3 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/Makefile +++ b/drivers/staging/lustre/lnet/klnds/socklnd/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_LNET) += ksocklnd.o -ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib-linux.o +ksocklnd-y := socklnd.o socklnd_cb.o socklnd_proto.o socklnd_modparams.o socklnd_lib.o diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c index 7586b7e40..4128a9221 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c @@ -49,8 +49,8 @@ ksock_nal_data_t ksocknal_data; static ksock_interface_t * ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip) { - ksock_net_t *net = ni->ni_data; - int i; + ksock_net_t *net = ni->ni_data; + int i; ksock_interface_t *iface; for (i = 0; i < net->ksnn_ninterfaces; i++) { @@ -102,8 +102,8 @@ ksocknal_destroy_route(ksock_route_t *route) static int ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) { - ksock_net_t *net = ni->ni_data; - ksock_peer_t *peer; + ksock_net_t *net = ni->ni_data; + ksock_peer_t *peer; LASSERT(id.nid != LNET_NID_ANY); LASSERT(id.pid != LNET_PID_ANY); @@ -149,7 +149,7 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id) void ksocknal_destroy_peer(ksock_peer_t *peer) { - ksock_net_t *net = peer->ksnp_ni->ni_data; + ksock_net_t *net = peer->ksnp_ni->ni_data; CDEBUG(D_NET, "peer %s %p deleted\n", libcfs_id2str(peer->ksnp_id), peer); @@ -175,9 +175,9 @@ ksocknal_destroy_peer(ksock_peer_t *peer) ksock_peer_t * ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) { - struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); - struct list_head *tmp; - ksock_peer_t *peer; + struct list_head *peer_list = ksocknal_nid2peerlist(id.nid); + struct list_head *tmp; + ksock_peer_t *peer; list_for_each(tmp, peer_list) { @@ -203,7 +203,7 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id) ksock_peer_t * ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) { - ksock_peer_t *peer; + ksock_peer_t *peer; read_lock(&ksocknal_data.ksnd_global_lock); peer = ksocknal_find_peer_locked(ni, id); @@ -217,8 +217,8 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id) static void ksocknal_unlink_peer_locked(ksock_peer_t *peer) { - int i; - __u32 ip; + int i; + __u32 ip; ksock_interface_t *iface; for (i = 0; i < peer->ksnp_n_passive_ips; i++) { @@ -249,13 +249,13 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip, int *port, int *conn_count, int *share_count) { - ksock_peer_t *peer; - struct list_head *ptmp; - ksock_route_t *route; - struct list_head *rtmp; - int i; - int j; - int rc = -ENOENT; + ksock_peer_t *peer; + struct list_head *ptmp; + ksock_route_t *route; + struct list_head *rtmp; + int i; + int j; + int rc = -ENOENT; read_lock(&ksocknal_data.ksnd_global_lock); @@ -322,8 +322,8 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index, static void ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) { - ksock_peer_t *peer = route->ksnr_peer; - int type = conn->ksnc_type; + ksock_peer_t *peer = route->ksnr_peer; + int type = conn->ksnc_type; ksock_interface_t *iface; conn->ksnc_route = route; @@ -366,9 +366,9 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn) static void ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) { - struct list_head *tmp; - ksock_conn_t *conn; - ksock_route_t *route2; + struct list_head *tmp; + ksock_conn_t *conn; + ksock_route_t *route2; LASSERT(!peer->ksnp_closing); LASSERT(route->ksnr_peer == NULL); @@ -407,11 +407,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route) static void ksocknal_del_route_locked(ksock_route_t *route) { - ksock_peer_t *peer = route->ksnr_peer; + ksock_peer_t *peer = route->ksnr_peer; ksock_interface_t *iface; - ksock_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; LASSERT(!route->ksnr_deleted); @@ -447,12 +447,12 @@ ksocknal_del_route_locked(ksock_route_t *route) int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) { - struct list_head *tmp; - ksock_peer_t *peer; - ksock_peer_t *peer2; - ksock_route_t *route; - ksock_route_t *route2; - int rc; + struct list_head *tmp; + ksock_peer_t *peer; + ksock_peer_t *peer2; + ksock_route_t *route; + ksock_route_t *route2; + int rc; if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY) @@ -509,11 +509,11 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port) static void ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip) { - ksock_conn_t *conn; - ksock_route_t *route; - struct list_head *tmp; - struct list_head *nxt; - int nshared; + ksock_conn_t *conn; + ksock_route_t *route; + struct list_head *tmp; + struct list_head *nxt; + int nshared; LASSERT(!peer->ksnp_closing); @@ -565,13 +565,13 @@ static int ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) { LIST_HEAD(zombies); - struct list_head *ptmp; - struct list_head *pnxt; - ksock_peer_t *peer; - int lo; - int hi; - int i; - int rc = -ENOENT; + struct list_head *ptmp; + struct list_head *pnxt; + ksock_peer_t *peer; + int lo; + int hi; + int i; + int rc = -ENOENT; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -623,11 +623,11 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip) static ksock_conn_t * ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index) { - ksock_peer_t *peer; - struct list_head *ptmp; - ksock_conn_t *conn; - struct list_head *ctmp; - int i; + ksock_peer_t *peer; + struct list_head *ptmp; + ksock_conn_t *conn; + struct list_head *ctmp; + int i; read_lock(&ksocknal_data.ksnd_global_lock); @@ -661,8 +661,8 @@ static ksock_sched_t * ksocknal_choose_scheduler_locked(unsigned int cpt) { struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt]; - ksock_sched_t *sched; - int i; + ksock_sched_t *sched; + int i; LASSERT(info->ksi_nthreads > 0); @@ -683,9 +683,9 @@ ksocknal_choose_scheduler_locked(unsigned int cpt) static int ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) { - ksock_net_t *net = ni->ni_data; - int i; - int nip; + ksock_net_t *net = ni->ni_data; + int i; + int nip; read_lock(&ksocknal_data.ksnd_global_lock); @@ -711,12 +711,12 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs) static int ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) { - int best_netmatch = 0; - int best_xor = 0; - int best = -1; - int this_xor; - int this_netmatch; - int i; + int best_netmatch = 0; + int best_xor = 0; + int best = -1; + int this_xor; + int this_netmatch; + int i; for (i = 0; i < nips; i++) { if (ips[i] == 0) @@ -743,19 +743,19 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips) static int ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips) { - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - ksock_net_t *net = peer->ksnp_ni->ni_data; - ksock_interface_t *iface; - ksock_interface_t *best_iface; - int n_ips; - int i; - int j; - int k; - __u32 ip; - __u32 xor; - int this_netmatch; - int best_netmatch; - int best_npeers; + rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; + ksock_net_t *net = peer->ksnp_ni->ni_data; + ksock_interface_t *iface; + ksock_interface_t *best_iface; + int n_ips; + int i; + int j; + int k; + __u32 ip; + __u32 xor; + int this_netmatch; + int best_netmatch; + int best_npeers; /* CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only @@ -846,19 +846,19 @@ static void ksocknal_create_routes(ksock_peer_t *peer, int port, __u32 *peer_ipaddrs, int npeer_ipaddrs) { - ksock_route_t *newroute = NULL; - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; - lnet_ni_t *ni = peer->ksnp_ni; - ksock_net_t *net = ni->ni_data; - struct list_head *rtmp; - ksock_route_t *route; - ksock_interface_t *iface; - ksock_interface_t *best_iface; - int best_netmatch; - int this_netmatch; - int best_nroutes; - int i; - int j; + ksock_route_t *newroute = NULL; + rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; + lnet_ni_t *ni = peer->ksnp_ni; + ksock_net_t *net = ni->ni_data; + struct list_head *rtmp; + ksock_route_t *route; + ksock_interface_t *iface; + ksock_interface_t *best_iface; + int best_netmatch; + int this_netmatch; + int best_nroutes; + int i; + int j; /* CAVEAT EMPTOR: We do all our interface matching with an * exclusive hold of global lock at IRQ priority. We're only @@ -963,12 +963,12 @@ ksocknal_create_routes(ksock_peer_t *peer, int port, int ksocknal_accept(lnet_ni_t *ni, struct socket *sock) { - ksock_connreq_t *cr; - int rc; - __u32 peer_ip; - int peer_port; + ksock_connreq_t *cr; + int rc; + __u32 peer_ip; + int peer_port; - rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); + rc = lnet_sock_getaddr(sock, 1, &peer_ip, &peer_port); LASSERT(rc == 0); /* we succeeded before */ LIBCFS_ALLOC(cr, sizeof(*cr)); @@ -994,7 +994,7 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock) static int ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr) { - ksock_route_t *route; + ksock_route_t *route; list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) { @@ -1008,23 +1008,23 @@ int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, struct socket *sock, int type) { - rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; + rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock; LIST_HEAD(zombies); - lnet_process_id_t peerid; - struct list_head *tmp; - __u64 incarnation; - ksock_conn_t *conn; - ksock_conn_t *conn2; - ksock_peer_t *peer = NULL; - ksock_peer_t *peer2; - ksock_sched_t *sched; + lnet_process_id_t peerid; + struct list_head *tmp; + __u64 incarnation; + ksock_conn_t *conn; + ksock_conn_t *conn2; + ksock_peer_t *peer = NULL; + ksock_peer_t *peer2; + ksock_sched_t *sched; ksock_hello_msg_t *hello; - int cpt; - ksock_tx_t *tx; - ksock_tx_t *txtmp; - int rc; - int active; - char *warn = NULL; + int cpt; + ksock_tx_t *tx; + ksock_tx_t *txtmp; + int rc; + int active; + char *warn = NULL; active = (route != NULL); @@ -1378,15 +1378,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route, ksocknal_txlist_done(ni, &zombies, 1); ksocknal_peer_decref(peer); - failed_1: +failed_1: if (hello != NULL) LIBCFS_FREE(hello, offsetof(ksock_hello_msg_t, kshm_ips[LNET_MAX_INTERFACES])); LIBCFS_FREE(conn, sizeof(*conn)); - failed_0: - libcfs_sock_release(sock); +failed_0: + sock_release(sock); return rc; } @@ -1396,10 +1396,10 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) /* This just does the immmediate housekeeping, and queues the * connection for the reaper to terminate. * Caller holds ksnd_global_lock exclusively in irq context */ - ksock_peer_t *peer = conn->ksnc_peer; - ksock_route_t *route; - ksock_conn_t *conn2; - struct list_head *tmp; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_route_t *route; + ksock_conn_t *conn2; + struct list_head *tmp; LASSERT(peer->ksnp_error == 0); LASSERT(!conn->ksnc_closing); @@ -1479,7 +1479,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error) void ksocknal_peer_failed(ksock_peer_t *peer) { - int notify = 0; + int notify = 0; unsigned long last_alive = 0; /* There has been a connection failure or comms error; but I'll only @@ -1506,9 +1506,9 @@ ksocknal_peer_failed(ksock_peer_t *peer) void ksocknal_finalize_zcreq(ksock_conn_t *conn) { - ksock_peer_t *peer = conn->ksnc_peer; - ksock_tx_t *tx; - ksock_tx_t *tmp; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_tx_t *tx; + ksock_tx_t *tmp; LIST_HEAD(zlist); /* NB safe to finalize TXs because closing of socket will @@ -1546,9 +1546,9 @@ ksocknal_terminate_conn(ksock_conn_t *conn) * disengage the socket from its callbacks and close it. * ksnc_refcount will eventually hit zero, and then the reaper will * destroy it. */ - ksock_peer_t *peer = conn->ksnc_peer; - ksock_sched_t *sched = conn->ksnc_scheduler; - int failed = 0; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_sched_t *sched = conn->ksnc_scheduler; + int failed = 0; LASSERT(conn->ksnc_closing); @@ -1617,7 +1617,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn) void ksocknal_destroy_conn(ksock_conn_t *conn) { - unsigned long last_rcv; + unsigned long last_rcv; /* Final coup-de-grace of the reaper */ CDEBUG(D_NET, "connection %p\n", conn); @@ -1677,10 +1677,10 @@ ksocknal_destroy_conn(ksock_conn_t *conn) int ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) { - ksock_conn_t *conn; - struct list_head *ctmp; - struct list_head *cnxt; - int count = 0; + ksock_conn_t *conn; + struct list_head *ctmp; + struct list_head *cnxt; + int count = 0; list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) { conn = list_entry(ctmp, ksock_conn_t, ksnc_list); @@ -1698,9 +1698,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why) int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) { - ksock_peer_t *peer = conn->ksnc_peer; - __u32 ipaddr = conn->ksnc_ipaddr; - int count; + ksock_peer_t *peer = conn->ksnc_peer; + __u32 ipaddr = conn->ksnc_ipaddr; + int count; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -1714,13 +1714,13 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why) int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr) { - ksock_peer_t *peer; - struct list_head *ptmp; - struct list_head *pnxt; - int lo; - int hi; - int i; - int count = 0; + ksock_peer_t *peer; + struct list_head *ptmp; + struct list_head *pnxt; + int lo; + int hi; + int i; + int count = 0; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -1762,7 +1762,7 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) { /* The router is telling me she's been notified of a change in * gateway state.... */ - lnet_process_id_t id = {0}; + lnet_process_id_t id = {0}; id.nid = gw_nid; id.pid = LNET_PID_ANY; @@ -1783,20 +1783,20 @@ ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive) void ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) { - int connect = 1; - unsigned long last_alive = 0; - unsigned long now = cfs_time_current(); - ksock_peer_t *peer = NULL; - rwlock_t *glock = &ksocknal_data.ksnd_global_lock; - lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; + int connect = 1; + unsigned long last_alive = 0; + unsigned long now = cfs_time_current(); + ksock_peer_t *peer = NULL; + rwlock_t *glock = &ksocknal_data.ksnd_global_lock; + lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID}; read_lock(glock); peer = ksocknal_find_peer_locked(ni, id); if (peer != NULL) { - struct list_head *tmp; - ksock_conn_t *conn; - int bufnob; + struct list_head *tmp; + ksock_conn_t *conn; + int bufnob; list_for_each(tmp, &peer->ksnp_conns) { conn = list_entry(tmp, ksock_conn_t, ksnc_list); @@ -1844,10 +1844,10 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when) static void ksocknal_push_peer(ksock_peer_t *peer) { - int index; - int i; - struct list_head *tmp; - ksock_conn_t *conn; + int index; + int i; + struct list_head *tmp; + ksock_conn_t *conn; for (index = 0; ; index++) { read_lock(&ksocknal_data.ksnd_global_lock); @@ -1877,12 +1877,12 @@ ksocknal_push_peer(ksock_peer_t *peer) static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) { - ksock_peer_t *peer; - struct list_head *tmp; - int index; - int i; - int j; - int rc = -ENOENT; + ksock_peer_t *peer; + struct list_head *tmp; + int index; + int i; + int j; + int rc = -ENOENT; for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) { for (j = 0; ; j++) { @@ -1926,15 +1926,15 @@ ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id) static int ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) { - ksock_net_t *net = ni->ni_data; + ksock_net_t *net = ni->ni_data; ksock_interface_t *iface; - int rc; - int i; - int j; - struct list_head *ptmp; - ksock_peer_t *peer; - struct list_head *rtmp; - ksock_route_t *route; + int rc; + int i; + int j; + struct list_head *ptmp; + ksock_peer_t *peer; + struct list_head *rtmp; + ksock_route_t *route; if (ipaddress == 0 || netmask == 0) @@ -1988,12 +1988,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask) static void ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) { - struct list_head *tmp; - struct list_head *nxt; - ksock_route_t *route; - ksock_conn_t *conn; - int i; - int j; + struct list_head *tmp; + struct list_head *nxt; + ksock_route_t *route; + ksock_conn_t *conn; + int i; + int j; for (i = 0; i < peer->ksnp_n_passive_ips; i++) if (peer->ksnp_passive_ips[i] == ipaddr) { @@ -2029,14 +2029,14 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr) static int ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress) { - ksock_net_t *net = ni->ni_data; - int rc = -ENOENT; - struct list_head *tmp; - struct list_head *nxt; - ksock_peer_t *peer; - __u32 this_ip; - int i; - int j; + ksock_net_t *net = ni->ni_data; + int rc = -ENOENT; + struct list_head *tmp; + struct list_head *nxt; + ksock_peer_t *peer; + __u32 this_ip; + int i; + int j; write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -2114,11 +2114,11 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) data->ioc_u32[0]); /* IP address */ case IOC_LIBCFS_GET_PEER: { - __u32 myip = 0; - __u32 ip = 0; - int port = 0; - int conn_count = 0; - int share_count = 0; + __u32 myip = 0; + __u32 ip = 0; + int port = 0; + int conn_count = 0; + int share_count = 0; rc = ksocknal_get_peer_info(ni, data->ioc_count, &id, &myip, &ip, &port, @@ -2150,9 +2150,9 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg) data->ioc_u32[0]); /* IP */ case IOC_LIBCFS_GET_CONN: { - int txmem; - int rxmem; - int nagle; + int txmem; + int rxmem; + int nagle; ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count); if (conn == NULL) @@ -2207,8 +2207,8 @@ ksocknal_free_buffers(void) LASSERT(atomic_read(&ksocknal_data.ksnd_nactive_txs) == 0); if (ksocknal_data.ksnd_sched_info != NULL) { - struct ksock_sched_info *info; - int i; + struct ksock_sched_info *info; + int i; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { if (info->ksi_scheds != NULL) { @@ -2227,8 +2227,8 @@ ksocknal_free_buffers(void) spin_lock(&ksocknal_data.ksnd_tx_lock); if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) { - struct list_head zlist; - ksock_tx_t *tx; + struct list_head zlist; + ksock_tx_t *tx; list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs); list_del_init(&ksocknal_data.ksnd_idle_noop_txs); @@ -2248,9 +2248,9 @@ static void ksocknal_base_shutdown(void) { struct ksock_sched_info *info; - ksock_sched_t *sched; - int i; - int j; + ksock_sched_t *sched; + int i; + int j; CDEBUG(D_MALLOC, "before NAL cleanup: kmem %d\n", atomic_read(&libcfs_kmemory)); @@ -2351,8 +2351,8 @@ static int ksocknal_base_startup(void) { struct ksock_sched_info *info; - int rc; - int i; + int rc; + int i; LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_NOTHING); LASSERT(ksocknal_data.ksnd_nnets == 0); @@ -2398,8 +2398,8 @@ ksocknal_base_startup(void) goto failed; cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) { - ksock_sched_t *sched; - int nthrs; + ksock_sched_t *sched; + int nthrs; nthrs = cfs_cpt_weight(lnet_cpt_table(), i); if (*ksocknal_tunables.ksnd_nscheds > 0) { @@ -2430,9 +2430,9 @@ ksocknal_base_startup(void) } } - ksocknal_data.ksnd_connd_starting = 0; - ksocknal_data.ksnd_connd_failed_stamp = 0; - ksocknal_data.ksnd_connd_starting_stamp = get_seconds(); + ksocknal_data.ksnd_connd_starting = 0; + ksocknal_data.ksnd_connd_failed_stamp = 0; + ksocknal_data.ksnd_connd_starting_stamp = get_seconds(); /* must have at least 2 connds to remain responsive to accepts while * connecting */ if (*ksocknal_tunables.ksnd_nconnds < SOCKNAL_CONND_RESV + 1) @@ -2482,9 +2482,9 @@ ksocknal_base_startup(void) static void ksocknal_debug_peerhash(lnet_ni_t *ni) { - ksock_peer_t *peer = NULL; - struct list_head *tmp; - int i; + ksock_peer_t *peer = NULL; + struct list_head *tmp; + int i; read_lock(&ksocknal_data.ksnd_global_lock); @@ -2536,12 +2536,12 @@ ksocknal_debug_peerhash(lnet_ni_t *ni) void ksocknal_shutdown(lnet_ni_t *ni) { - ksock_net_t *net = ni->ni_data; - int i; + ksock_net_t *net = ni->ni_data; + int i; lnet_process_id_t anyid = {0}; - anyid.nid = LNET_NID_ANY; - anyid.pid = LNET_PID_ANY; + anyid.nid = LNET_NID_ANY; + anyid.pid = LNET_PID_ANY; LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL); LASSERT(ksocknal_data.ksnd_nnets > 0); @@ -2588,27 +2588,27 @@ ksocknal_shutdown(lnet_ni_t *ni) static int ksocknal_enumerate_interfaces(ksock_net_t *net) { - char **names; - int i; - int j; - int rc; - int n; + char **names; + int i; + int j; + int rc; + int n; - n = libcfs_ipif_enumerate(&names); + n = lnet_ipif_enumerate(&names); if (n <= 0) { CERROR("Can't enumerate interfaces: %d\n", n); return n; } for (i = j = 0; i < n; i++) { - int up; - __u32 ip; - __u32 mask; + int up; + __u32 ip; + __u32 mask; if (!strcmp(names[i], "lo")) /* skip the loopback IF */ continue; - rc = libcfs_ipif_query(names[i], &up, &ip, &mask); + rc = lnet_ipif_query(names[i], &up, &ip, &mask); if (rc != 0) { CWARN("Can't get interface %s info: %d\n", names[i], rc); @@ -2634,7 +2634,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) j++; } - libcfs_ipif_free_enumeration(names, n); + lnet_ipif_free_enumeration(names, n); if (j == 0) CERROR("Can't find any usable interfaces\n"); @@ -2645,15 +2645,15 @@ ksocknal_enumerate_interfaces(ksock_net_t *net) static int ksocknal_search_new_ipif(ksock_net_t *net) { - int new_ipif = 0; - int i; + int new_ipif = 0; + int i; for (i = 0; i < net->ksnn_ninterfaces; i++) { - char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; - char *colon = strchr(ifnam, ':'); - int found = 0; - ksock_net_t *tmp; - int j; + char *ifnam = &net->ksnn_interfaces[i].ksni_name[0]; + char *colon = strchr(ifnam, ':'); + int found = 0; + ksock_net_t *tmp; + int j; if (colon != NULL) /* ignore alias device */ *colon = 0; @@ -2687,9 +2687,9 @@ ksocknal_search_new_ipif(ksock_net_t *net) static int ksocknal_start_schedulers(struct ksock_sched_info *info) { - int nthrs; - int rc = 0; - int i; + int nthrs; + int rc = 0; + int i; if (info->ksi_nthreads == 0) { if (*ksocknal_tunables.ksnd_nscheds > 0) { @@ -2708,9 +2708,9 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) } for (i = 0; i < nthrs; i++) { - long id; - char name[20]; - ksock_sched_t *sched; + long id; + char name[20]; + ksock_sched_t *sched; id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i); sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; snprintf(name, sizeof(name), "socknal_sd%02d_%02d", @@ -2733,14 +2733,14 @@ ksocknal_start_schedulers(struct ksock_sched_info *info) static int ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) { - int newif = ksocknal_search_new_ipif(net); - int rc; - int i; + int newif = ksocknal_search_new_ipif(net); + int rc; + int i; LASSERT(ncpts > 0 && ncpts <= cfs_cpt_number(lnet_cpt_table())); for (i = 0; i < ncpts; i++) { - struct ksock_sched_info *info; + struct ksock_sched_info *info; int cpt = (cpts == NULL) ? i : cpts[i]; LASSERT(cpt < cfs_cpt_number(lnet_cpt_table())); @@ -2759,9 +2759,9 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts) int ksocknal_startup(lnet_ni_t *ni) { - ksock_net_t *net; - int rc; - int i; + ksock_net_t *net; + int rc; + int i; LASSERT(ni->ni_lnd == &the_ksocklnd); @@ -2791,13 +2791,12 @@ ksocknal_startup(lnet_ni_t *ni) net->ksnn_ninterfaces = 1; } else { for (i = 0; i < LNET_MAX_INTERFACES; i++) { - int up; + int up; if (ni->ni_interfaces[i] == NULL) break; - rc = libcfs_ipif_query( - ni->ni_interfaces[i], &up, + rc = lnet_ipif_query(ni->ni_interfaces[i], &up, &net->ksnn_interfaces[i].ksni_ipaddr, &net->ksnn_interfaces[i].ksni_netmask); @@ -2851,7 +2850,7 @@ ksocknal_module_fini(void) static int __init ksocknal_module_init(void) { - int rc; + int rc; /* check ksnr_connected/connecting field large enough */ CLASSERT(SOCKLND_CONN_NTYPES <= 4); diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h index c54c99551..8a9d4a0de 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h @@ -25,26 +25,48 @@ * */ +#ifndef _SOCKLND_SOCKLND_H_ +#define _SOCKLND_SOCKLND_H_ + #define DEBUG_PORTAL_ALLOC #define DEBUG_SUBSYSTEM S_LND -#include "socklnd_lib-linux.h" +#include <linux/crc32.h> +#include <linux/errno.h> +#include <linux/if.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/kmod.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/stat.h> +#include <linux/string.h> +#include <linux/syscalls.h> +#include <linux/sysctl.h> +#include <linux/uio.h> +#include <linux/unistd.h> +#include <net/sock.h> +#include <net/tcp.h> #include "../../../include/linux/libcfs/libcfs.h" #include "../../../include/linux/lnet/lnet.h" #include "../../../include/linux/lnet/lib-lnet.h" #include "../../../include/linux/lnet/socklnd.h" -#include "../../../include/linux/lnet/lnet-sysctl.h" -#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ -#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ -#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ -#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */ +/* assume one thread for each connection type */ +#define SOCKNAL_NSCHEDS 3 +#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) + +#define SOCKNAL_PEER_HASH_SIZE 101 /* # peer lists */ +#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ +#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ +#define SOCKNAL_ENOMEM_RETRY CFS_TICK /* jiffies between retries */ -#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ -#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ +#define SOCKNAL_SINGLE_FRAG_TX 0 /* disable multi-fragment sends */ +#define SOCKNAL_SINGLE_FRAG_RX 0 /* disable multi-fragment receives */ -#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ +#define SOCKNAL_VERSION_DEBUG 0 /* enable protocol version debugging */ /* risk kmap deadlock on multi-frag I/O (backs off to single-frag if disabled). * no risk if we're not running on a CONFIG_HIGHMEM platform. */ @@ -58,33 +80,31 @@ struct ksock_sched_info; typedef struct /* per scheduler state */ { - spinlock_t kss_lock; /* serialise */ - struct list_head kss_rx_conns; /* conn waiting to be read */ - /* conn waiting to be written */ - struct list_head kss_tx_conns; - /* zombie noop tx list */ - struct list_head kss_zombie_noop_txs; - wait_queue_head_t kss_waitq; /* where scheduler sleeps */ - /* # connections assigned to this scheduler */ - int kss_nconns; - struct ksock_sched_info *kss_info; /* owner of it */ - struct page *kss_rx_scratch_pgs[LNET_MAX_IOV]; - struct kvec kss_scratch_iov[LNET_MAX_IOV]; + spinlock_t kss_lock; /* serialise */ + struct list_head kss_rx_conns; /* conn waiting to be read */ + struct list_head kss_tx_conns; /* conn waiting to be written */ + struct list_head kss_zombie_noop_txs; /* zombie noop tx list */ + wait_queue_head_t kss_waitq; /* where scheduler sleeps */ + int kss_nconns; /* # connections assigned to + * this scheduler */ + struct ksock_sched_info *kss_info; /* owner of it */ + struct page *kss_rx_scratch_pgs[LNET_MAX_IOV]; + struct kvec kss_scratch_iov[LNET_MAX_IOV]; } ksock_sched_t; struct ksock_sched_info { - int ksi_nthreads_max; /* max allowed threads */ - int ksi_nthreads; /* number of threads */ - int ksi_cpt; /* CPT id */ - ksock_sched_t *ksi_scheds; /* array of schedulers */ + int ksi_nthreads_max; /* max allowed threads */ + int ksi_nthreads; /* number of threads */ + int ksi_cpt; /* CPT id */ + ksock_sched_t *ksi_scheds; /* array of schedulers */ }; -#define KSOCK_CPT_SHIFT 16 -#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid)) -#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT) -#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1)) +#define KSOCK_CPT_SHIFT 16 +#define KSOCK_THREAD_ID(cpt, sid) (((cpt) << KSOCK_CPT_SHIFT) | (sid)) +#define KSOCK_THREAD_CPT(id) ((id) >> KSOCK_CPT_SHIFT) +#define KSOCK_THREAD_SID(id) ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1)) -typedef struct /* in-use interface */ +typedef struct /* in-use interface */ { __u32 ksni_ipaddr; /* interface's IP address */ __u32 ksni_netmask; /* interface's network mask */ @@ -94,35 +114,48 @@ typedef struct /* in-use interface */ } ksock_interface_t; typedef struct { - /* "stuck" socket timeout (seconds) */ - int *ksnd_timeout; - /* # scheduler threads in each pool while starting */ - int *ksnd_nscheds; - int *ksnd_nconnds; /* # connection daemons */ - int *ksnd_nconnds_max; /* max # connection daemons */ - int *ksnd_min_reconnectms; /* first connection retry after (ms)... */ - int *ksnd_max_reconnectms; /* ...exponentially increasing to this */ - int *ksnd_eager_ack; /* make TCP ack eagerly? */ - int *ksnd_typed_conns; /* drive sockets by type? */ - int *ksnd_min_bulk; /* smallest "large" message */ - int *ksnd_tx_buffer_size; /* socket tx buffer size */ - int *ksnd_rx_buffer_size; /* socket rx buffer size */ - int *ksnd_nagle; /* enable NAGLE? */ - int *ksnd_round_robin; /* round robin for multiple interfaces */ - int *ksnd_keepalive; /* # secs for sending keepalive NOOP */ - int *ksnd_keepalive_idle; /* # idle secs before 1st probe */ - int *ksnd_keepalive_count; /* # probes */ - int *ksnd_keepalive_intvl; /* time between probes */ - int *ksnd_credits; /* # concurrent sends */ - int *ksnd_peertxcredits; /* # concurrent sends to 1 peer */ - int *ksnd_peerrtrcredits; /* # per-peer router buffer credits */ - int *ksnd_peertimeout; /* seconds to consider peer dead */ - int *ksnd_enable_csum; /* enable check sum */ - int *ksnd_inject_csum_error; /* set non-zero to inject checksum error */ - int *ksnd_nonblk_zcack; /* always send zc-ack on non-blocking connection */ - unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload size */ - int *ksnd_zc_recv; /* enable ZC receive (for Chelsio TOE) */ - int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to enable ZC receive */ + int *ksnd_timeout; /* "stuck" socket timeout + * (seconds) */ + int *ksnd_nscheds; /* # scheduler threads in each + * pool while starting */ + int *ksnd_nconnds; /* # connection daemons */ + int *ksnd_nconnds_max; /* max # connection daemons */ + int *ksnd_min_reconnectms; /* first connection retry after + * (ms)... */ + int *ksnd_max_reconnectms; /* ...exponentially increasing to + * this */ + int *ksnd_eager_ack; /* make TCP ack eagerly? */ + int *ksnd_typed_conns; /* drive sockets by type? */ + int *ksnd_min_bulk; /* smallest "large" message */ + int *ksnd_tx_buffer_size; /* socket tx buffer size */ + int *ksnd_rx_buffer_size; /* socket rx buffer size */ + int *ksnd_nagle; /* enable NAGLE? */ + int *ksnd_round_robin; /* round robin for multiple + * interfaces */ + int *ksnd_keepalive; /* # secs for sending keepalive + * NOOP */ + int *ksnd_keepalive_idle; /* # idle secs before 1st probe + */ + int *ksnd_keepalive_count; /* # probes */ + int *ksnd_keepalive_intvl; /* time between probes */ + int *ksnd_credits; /* # concurrent sends */ + int *ksnd_peertxcredits; /* # concurrent sends to 1 peer + */ + int *ksnd_peerrtrcredits; /* # per-peer router buffer + * credits */ + int *ksnd_peertimeout; /* seconds to consider peer dead + */ + int *ksnd_enable_csum; /* enable check sum */ + int *ksnd_inject_csum_error; /* set non-zero to inject + * checksum error */ + int *ksnd_nonblk_zcack; /* always send zc-ack on + * non-blocking connection */ + unsigned int *ksnd_zc_min_payload; /* minimum zero copy payload + * size */ + int *ksnd_zc_recv; /* enable ZC receive (for + * Chelsio TOE) */ + int *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to + * enable ZC receive */ } ksock_tunables_t; typedef struct { @@ -141,55 +174,67 @@ typedef struct { #define SOCKNAL_CONND_RESV 1 typedef struct { - int ksnd_init; /* initialisation state */ - int ksnd_nnets; /* # networks set up */ - struct list_head ksnd_nets; /* list of nets */ - /* stabilize peer/conn ops */ - rwlock_t ksnd_global_lock; - /* hash table of all my known peers */ - struct list_head *ksnd_peers; - int ksnd_peer_hash_size; /* size of ksnd_peers */ - - int ksnd_nthreads; /* # live threads */ - int ksnd_shuttingdown; /* tell threads to exit */ - /* schedulers information */ - struct ksock_sched_info **ksnd_sched_info; - - atomic_t ksnd_nactive_txs; /* #active txs */ - - struct list_head ksnd_deathrow_conns; /* conns to close: reaper_lock*/ - struct list_head ksnd_zombie_conns; /* conns to free: reaper_lock */ - struct list_head ksnd_enomem_conns; /* conns to retry: reaper_lock*/ - wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ - unsigned long ksnd_reaper_waketime;/* when reaper will wake */ - spinlock_t ksnd_reaper_lock; /* serialise */ - - int ksnd_enomem_tx; /* test ENOMEM sender */ - int ksnd_stall_tx; /* test sluggish sender */ - int ksnd_stall_rx; /* test sluggish receiver */ - - struct list_head ksnd_connd_connreqs; /* incoming connection requests */ - struct list_head ksnd_connd_routes; /* routes waiting to be connected */ - wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ - int ksnd_connd_connecting;/* # connds connecting */ - /** time stamp of the last failed connecting attempt */ - long ksnd_connd_failed_stamp; - /** # starting connd */ - unsigned ksnd_connd_starting; - /** time stamp of the last starting connd */ - long ksnd_connd_starting_stamp; - /** # running connd */ - unsigned ksnd_connd_running; - spinlock_t ksnd_connd_lock; /* serialise */ - - struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */ - spinlock_t ksnd_tx_lock; /* serialise, g_lock unsafe */ + int ksnd_init; /* initialisation state + */ + int ksnd_nnets; /* # networks set up */ + struct list_head ksnd_nets; /* list of nets */ + rwlock_t ksnd_global_lock; /* stabilize peer/conn + * ops */ + struct list_head *ksnd_peers; /* hash table of all my + * known peers */ + int ksnd_peer_hash_size; /* size of ksnd_peers */ + + int ksnd_nthreads; /* # live threads */ + int ksnd_shuttingdown; /* tell threads to exit + */ + struct ksock_sched_info **ksnd_sched_info; /* schedulers info */ + + atomic_t ksnd_nactive_txs; /* #active txs */ + + struct list_head ksnd_deathrow_conns; /* conns to close: + * reaper_lock*/ + struct list_head ksnd_zombie_conns; /* conns to free: + * reaper_lock */ + struct list_head ksnd_enomem_conns; /* conns to retry: + * reaper_lock*/ + wait_queue_head_t ksnd_reaper_waitq; /* reaper sleeps here */ + unsigned long ksnd_reaper_waketime; /* when reaper will wake + */ + spinlock_t ksnd_reaper_lock; /* serialise */ + + int ksnd_enomem_tx; /* test ENOMEM sender */ + int ksnd_stall_tx; /* test sluggish sender + */ + int ksnd_stall_rx; /* test sluggish + * receiver */ + + struct list_head ksnd_connd_connreqs; /* incoming connection + * requests */ + struct list_head ksnd_connd_routes; /* routes waiting to be + * connected */ + wait_queue_head_t ksnd_connd_waitq; /* connds sleep here */ + int ksnd_connd_connecting; /* # connds connecting + */ + long ksnd_connd_failed_stamp;/* time stamp of the + * last failed + * connecting attempt */ + unsigned ksnd_connd_starting; /* # starting connd */ + long ksnd_connd_starting_stamp;/* time stamp of the + * last starting connd + */ + unsigned ksnd_connd_running; /* # running connd */ + spinlock_t ksnd_connd_lock; /* serialise */ + + struct list_head ksnd_idle_noop_txs; /* list head for freed + * noop tx */ + spinlock_t ksnd_tx_lock; /* serialise, g_lock + * unsafe */ } ksock_nal_data_t; -#define SOCKNAL_INIT_NOTHING 0 -#define SOCKNAL_INIT_DATA 1 -#define SOCKNAL_INIT_ALL 2 +#define SOCKNAL_INIT_NOTHING 0 +#define SOCKNAL_INIT_DATA 1 +#define SOCKNAL_INIT_ALL 2 /* A packet just assembled for transmission is represented by 1 or more * struct iovec fragments (the first frag contains the portals header), @@ -200,43 +245,45 @@ typedef struct { * received into either struct iovec or lnet_kiov_t fragments, depending on * what the header matched or whether the message needs forwarding. */ -struct ksock_conn; /* forward ref */ -struct ksock_peer; /* forward ref */ -struct ksock_route; /* forward ref */ -struct ksock_proto; /* forward ref */ +struct ksock_conn; /* forward ref */ +struct ksock_peer; /* forward ref */ +struct ksock_route; /* forward ref */ +struct ksock_proto; /* forward ref */ -typedef struct /* transmit packet */ +typedef struct /* transmit packet */ { - struct list_head tx_list; /* queue on conn for transmission etc */ - struct list_head tx_zc_list; /* queue on peer for ZC request */ - atomic_t tx_refcount; /* tx reference count */ - int tx_nob; /* # packet bytes */ - int tx_resid; /* residual bytes */ - int tx_niov; /* # packet iovec frags */ - struct kvec *tx_iov; /* packet iovec frags */ - int tx_nkiov; /* # packet page frags */ - unsigned short tx_zc_aborted; /* aborted ZC request */ - unsigned short tx_zc_capable:1; /* payload is large enough for ZC */ - unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */ - unsigned short tx_nonblk:1; /* it's a non-blocking ACK */ - lnet_kiov_t *tx_kiov; /* packet page frags */ - struct ksock_conn *tx_conn; /* owning conn */ - lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() */ - unsigned long tx_deadline; /* when (in jiffies) tx times out */ - ksock_msg_t tx_msg; /* socklnd message buffer */ - int tx_desc_size; /* size of this descriptor */ + struct list_head tx_list; /* queue on conn for transmission etc + */ + struct list_head tx_zc_list; /* queue on peer for ZC request */ + atomic_t tx_refcount; /* tx reference count */ + int tx_nob; /* # packet bytes */ + int tx_resid; /* residual bytes */ + int tx_niov; /* # packet iovec frags */ + struct kvec *tx_iov; /* packet iovec frags */ + int tx_nkiov; /* # packet page frags */ + unsigned short tx_zc_aborted; /* aborted ZC request */ + unsigned short tx_zc_capable:1; /* payload is large enough for ZC */ + unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */ + unsigned short tx_nonblk:1; /* it's a non-blocking ACK */ + lnet_kiov_t *tx_kiov; /* packet page frags */ + struct ksock_conn *tx_conn; /* owning conn */ + lnet_msg_t *tx_lnetmsg; /* lnet message for lnet_finalize() + */ + unsigned long tx_deadline; /* when (in jiffies) tx times out */ + ksock_msg_t tx_msg; /* socklnd message buffer */ + int tx_desc_size; /* size of this descriptor */ union { struct { - struct kvec iov; /* virt hdr */ - lnet_kiov_t kiov[0]; /* paged payload */ - } paged; + struct kvec iov; /* virt hdr */ + lnet_kiov_t kiov[0]; /* paged payload */ + } paged; struct { - struct kvec iov[1]; /* virt hdr + payload */ - } virt; - } tx_frags; + struct kvec iov[1]; /* virt hdr + payload */ + } virt; + } tx_frags; } ksock_tx_t; -#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) +#define KSOCK_NOOP_TX_SIZE ((int)offsetof(ksock_tx_t, tx_frags.paged.kiov[0])) /* network zero copy callback descriptor embedded in ksock_tx_t */ @@ -247,153 +294,205 @@ typedef union { lnet_kiov_t kiov[LNET_MAX_IOV]; } ksock_rxiovspace_t; -#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ -#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */ -#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */ -#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */ -#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */ -#define SOCKNAL_RX_SLOP 6 /* skipping body */ +#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */ +#define SOCKNAL_RX_LNET_HEADER 2 /* reading lnet message header */ +#define SOCKNAL_RX_PARSE 3 /* Calling lnet_parse() */ +#define SOCKNAL_RX_PARSE_WAIT 4 /* waiting to be told to read the body */ +#define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */ +#define SOCKNAL_RX_SLOP 6 /* skipping body */ typedef struct ksock_conn { - struct ksock_peer *ksnc_peer; /* owning peer */ - struct ksock_route *ksnc_route; /* owning route */ - struct list_head ksnc_list; /* stash on peer's conn list */ - struct socket *ksnc_sock; /* actual socket */ - void *ksnc_saved_data_ready; /* socket's original data_ready() callback */ - void *ksnc_saved_write_space; /* socket's original write_space() callback */ - atomic_t ksnc_conn_refcount; /* conn refcount */ - atomic_t ksnc_sock_refcount; /* sock refcount */ - ksock_sched_t *ksnc_scheduler; /* who schedules this connection */ - __u32 ksnc_myipaddr; /* my IP */ - __u32 ksnc_ipaddr; /* peer's IP */ - int ksnc_port; /* peer's port */ - signed int ksnc_type:3; /* type of connection, - * should be signed value */ - unsigned int ksnc_closing:1; /* being shut down */ - unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ - unsigned int ksnc_zc_capable:1; /* enable to ZC */ - struct ksock_proto *ksnc_proto; /* protocol for the connection */ + struct ksock_peer *ksnc_peer; /* owning peer */ + struct ksock_route *ksnc_route; /* owning route */ + struct list_head ksnc_list; /* stash on peer's conn list */ + struct socket *ksnc_sock; /* actual socket */ + void *ksnc_saved_data_ready; /* socket's original + * data_ready() callback */ + void *ksnc_saved_write_space; /* socket's original + * write_space() callback */ + atomic_t ksnc_conn_refcount;/* conn refcount */ + atomic_t ksnc_sock_refcount;/* sock refcount */ + ksock_sched_t *ksnc_scheduler; /* who schedules this connection + */ + __u32 ksnc_myipaddr; /* my IP */ + __u32 ksnc_ipaddr; /* peer's IP */ + int ksnc_port; /* peer's port */ + signed int ksnc_type:3; /* type of connection, should be + * signed value */ + unsigned int ksnc_closing:1; /* being shut down */ + unsigned int ksnc_flip:1; /* flip or not, only for V2.x */ + unsigned int ksnc_zc_capable:1; /* enable to ZC */ + struct ksock_proto *ksnc_proto; /* protocol for the connection */ /* reader */ - struct list_head ksnc_rx_list; /* where I enq waiting input or a forwarding descriptor */ - unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times out */ - __u8 ksnc_rx_started; /* started receiving a message */ - __u8 ksnc_rx_ready; /* data ready to read */ - __u8 ksnc_rx_scheduled;/* being progressed */ - __u8 ksnc_rx_state; /* what is being read */ - int ksnc_rx_nob_left; /* # bytes to next hdr/body */ - int ksnc_rx_nob_wanted; /* bytes actually wanted */ - int ksnc_rx_niov; /* # iovec frags */ - struct kvec *ksnc_rx_iov; /* the iovec frags */ - int ksnc_rx_nkiov; /* # page frags */ - lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ - ksock_rxiovspace_t ksnc_rx_iov_space;/* space for frag descriptors */ - __u32 ksnc_rx_csum; /* partial checksum for incoming data */ - void *ksnc_cookie; /* rx lnet_finalize passthru arg */ - ksock_msg_t ksnc_msg; /* incoming message buffer: - * V2.x message takes the - * whole struct - * V1.x message is a bare - * lnet_hdr_t, it's stored in - * ksnc_msg.ksm_u.lnetmsg */ + struct list_head ksnc_rx_list; /* where I enq waiting input or a + * forwarding descriptor */ + unsigned long ksnc_rx_deadline; /* when (in jiffies) receive times + * out */ + __u8 ksnc_rx_started; /* started receiving a message */ + __u8 ksnc_rx_ready; /* data ready to read */ + __u8 ksnc_rx_scheduled; /* being progressed */ + __u8 ksnc_rx_state; /* what is being read */ + int ksnc_rx_nob_left; /* # bytes to next hdr/body */ + int ksnc_rx_nob_wanted;/* bytes actually wanted */ + int ksnc_rx_niov; /* # iovec frags */ + struct kvec *ksnc_rx_iov; /* the iovec frags */ + int ksnc_rx_nkiov; /* # page frags */ + lnet_kiov_t *ksnc_rx_kiov; /* the page frags */ + ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */ + __u32 ksnc_rx_csum; /* partial checksum for incoming + * data */ + void *ksnc_cookie; /* rx lnet_finalize passthru arg + */ + ksock_msg_t ksnc_msg; /* incoming message buffer: + * V2.x message takes the + * whole struct + * V1.x message is a bare + * lnet_hdr_t, it's stored in + * ksnc_msg.ksm_u.lnetmsg */ /* WRITER */ - struct list_head ksnc_tx_list; /* where I enq waiting for output space */ - struct list_head ksnc_tx_queue; /* packets waiting to be sent */ - ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet message or ZC-ACK */ - unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */ - int ksnc_tx_bufnob; /* send buffer marker */ - atomic_t ksnc_tx_nob; /* # bytes queued */ - int ksnc_tx_ready; /* write space */ - int ksnc_tx_scheduled; /* being progressed */ - unsigned long ksnc_tx_last_post; /* time stamp of the last posted TX */ + struct list_head ksnc_tx_list; /* where I enq waiting for output + * space */ + struct list_head ksnc_tx_queue; /* packets waiting to be sent */ + ksock_tx_t *ksnc_tx_carrier; /* next TX that can carry a LNet + * message or ZC-ACK */ + unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out + */ + int ksnc_tx_bufnob; /* send buffer marker */ + atomic_t ksnc_tx_nob; /* # bytes queued */ + int ksnc_tx_ready; /* write space */ + int ksnc_tx_scheduled; /* being progressed */ + unsigned long ksnc_tx_last_post; /* time stamp of the last posted + * TX */ } ksock_conn_t; typedef struct ksock_route { - struct list_head ksnr_list; /* chain on peer route list */ - struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ - struct ksock_peer *ksnr_peer; /* owning peer */ - atomic_t ksnr_refcount; /* # users */ - unsigned long ksnr_timeout; /* when (in jiffies) reconnection can happen next */ - long ksnr_retry_interval; /* how long between retries */ - __u32 ksnr_myipaddr; /* my IP */ - __u32 ksnr_ipaddr; /* IP address to connect to */ - int ksnr_port; /* port to connect to */ - unsigned int ksnr_scheduled:1; /* scheduled for attention */ - unsigned int ksnr_connecting:1;/* connection establishment in progress */ - unsigned int ksnr_connected:4; /* connections established by type */ - unsigned int ksnr_deleted:1; /* been removed from peer? */ - unsigned int ksnr_share_count; /* created explicitly? */ - int ksnr_conn_count; /* # conns established by this route */ + struct list_head ksnr_list; /* chain on peer route list */ + struct list_head ksnr_connd_list; /* chain on ksnr_connd_routes */ + struct ksock_peer *ksnr_peer; /* owning peer */ + atomic_t ksnr_refcount; /* # users */ + unsigned long ksnr_timeout; /* when (in jiffies) reconnection + * can happen next */ + long ksnr_retry_interval; /* how long between retries */ + __u32 ksnr_myipaddr; /* my IP */ + __u32 ksnr_ipaddr; /* IP address to connect to */ + int ksnr_port; /* port to connect to */ + unsigned int ksnr_scheduled:1; /* scheduled for attention */ + unsigned int ksnr_connecting:1; /* connection establishment in + * progress */ + unsigned int ksnr_connected:4; /* connections established by + * type */ + unsigned int ksnr_deleted:1; /* been removed from peer? */ + unsigned int ksnr_share_count; /* created explicitly? */ + int ksnr_conn_count; /* # conns established by this + * route */ } ksock_route_t; -#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ +#define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */ typedef struct ksock_peer { - struct list_head ksnp_list; /* stash on global peer list */ - unsigned long ksnp_last_alive; /* when (in jiffies) I was last alive */ - lnet_process_id_t ksnp_id; /* who's on the other end(s) */ - atomic_t ksnp_refcount; /* # users */ - int ksnp_sharecount; /* lconf usage counter */ - int ksnp_closing; /* being closed */ - int ksnp_accepting;/* # passive connections pending */ - int ksnp_error; /* errno on closing last conn */ - __u64 ksnp_zc_next_cookie;/* ZC completion cookie */ - __u64 ksnp_incarnation; /* latest known peer incarnation */ - struct ksock_proto *ksnp_proto; /* latest known peer protocol */ - struct list_head ksnp_conns; /* all active connections */ - struct list_head ksnp_routes; /* routes */ - struct list_head ksnp_tx_queue; /* waiting packets */ - spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ - struct list_head ksnp_zc_req_list; /* zero copy requests wait for ACK */ - unsigned long ksnp_send_keepalive; /* time to send keepalive */ - lnet_ni_t *ksnp_ni; /* which network */ - int ksnp_n_passive_ips; /* # of... */ - __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; /* preferred local interfaces */ + struct list_head ksnp_list; /* stash on global peer list */ + unsigned long ksnp_last_alive; /* when (in jiffies) I was last + * alive */ + lnet_process_id_t ksnp_id; /* who's on the other end(s) */ + atomic_t ksnp_refcount; /* # users */ + int ksnp_sharecount; /* lconf usage counter */ + int ksnp_closing; /* being closed */ + int ksnp_accepting; /* # passive connections pending + */ + int ksnp_error; /* errno on closing last conn */ + __u64 ksnp_zc_next_cookie; /* ZC completion cookie */ + __u64 ksnp_incarnation; /* latest known peer incarnation + */ + struct ksock_proto *ksnp_proto; /* latest known peer protocol */ + struct list_head ksnp_conns; /* all active connections */ + struct list_head ksnp_routes; /* routes */ + struct list_head ksnp_tx_queue; /* waiting packets */ + spinlock_t ksnp_lock; /* serialize, g_lock unsafe */ + struct list_head ksnp_zc_req_list; /* zero copy requests wait for + * ACK */ + unsigned long ksnp_send_keepalive; /* time to send keepalive */ + lnet_ni_t *ksnp_ni; /* which network */ + int ksnp_n_passive_ips; /* # of... */ + + /* preferred local interfaces */ + __u32 ksnp_passive_ips[LNET_MAX_INTERFACES]; } ksock_peer_t; typedef struct ksock_connreq { - struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */ - lnet_ni_t *ksncr_ni; /* chosen NI */ - struct socket *ksncr_sock; /* accepted socket */ + struct list_head ksncr_list; /* stash on ksnd_connd_connreqs */ + lnet_ni_t *ksncr_ni; /* chosen NI */ + struct socket *ksncr_sock; /* accepted socket */ } ksock_connreq_t; extern ksock_nal_data_t ksocknal_data; extern ksock_tunables_t ksocknal_tunables; -#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ -#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ -#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not preferred */ +#define SOCKNAL_MATCH_NO 0 /* TX can't match type of connection */ +#define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */ +#define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not + * preferred */ typedef struct ksock_proto { - int pro_version; /* version number of protocol */ - int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); /* handshake function */ - int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);/* handshake function */ - void (*pro_pack)(ksock_tx_t *); /* message pack */ - void (*pro_unpack)(ksock_msg_t *); /* message unpack */ - ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */ - int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); /* queue ZC ack on the connection */ - int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); /* handle ZC request */ - int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); /* handle ZC ACK */ - int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); /* msg type matches the connection type: - * return value: - * return MATCH_NO : no - * return MATCH_YES : matching type - * return MATCH_MAY : can be backup */ + /* version number of protocol */ + int pro_version; + + /* handshake function */ + int (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *); + + /* handshake function */ + int (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int); + + /* message pack */ + void (*pro_pack)(ksock_tx_t *); + + /* message unpack */ + void (*pro_unpack)(ksock_msg_t *); + + /* queue tx on the connection */ + ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); + + /* queue ZC ack on the connection */ + int (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64); + + /* handle ZC request */ + int (*pro_handle_zcreq)(ksock_conn_t *, __u64, int); + + /* handle ZC ACK */ + int (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64); + + /* msg type matches the connection type: + * return value: + * return MATCH_NO : no + * return MATCH_YES : matching type + * return MATCH_MAY : can be backup */ + int (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int); } ksock_proto_t; extern ksock_proto_t ksocknal_protocol_v1x; extern ksock_proto_t ksocknal_protocol_v2x; extern ksock_proto_t ksocknal_protocol_v3x; -#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR -#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR -#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR +#define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR +#define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR +#define KSOCK_PROTO_V1 KSOCK_PROTO_V1_MAJOR #ifndef CPU_MASK_NONE #define CPU_MASK_NONE 0UL #endif +static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len) +{ +#if 1 + return crc32_le(crc, p, len); +#else + while (len-- > 0) + crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ; + return crc; +#endif +} + static inline int ksocknal_route_mask(void) { @@ -434,7 +533,7 @@ ksocknal_conn_decref(ksock_conn_t *conn) static inline int ksocknal_connsock_addref(ksock_conn_t *conn) { - int rc = -ESHUTDOWN; + int rc = -ESHUTDOWN; read_lock(&ksocknal_data.ksnd_global_lock); if (!conn->ksnc_closing) { @@ -453,7 +552,7 @@ ksocknal_connsock_decref(ksock_conn_t *conn) LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0); if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) { LASSERT(conn->ksnc_closing); - libcfs_sock_release(conn->ksnc_sock); + sock_release(conn->ksnc_sock); conn->ksnc_sock = NULL; ksocknal_finalize_zcreq(conn); } @@ -586,3 +685,5 @@ extern void ksocknal_lib_csum_tx(ksock_tx_t *tx); extern int ksocknal_lib_memory_pressure(ksock_conn_t *conn); extern int ksocknal_lib_bind_thread_to_cpu(int id); + +#endif /* _SOCKLND_SOCKLND_H_ */ diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c index fa7ad883b..fe2a83a54 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c @@ -75,13 +75,13 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk) return NULL; } - tx->tx_conn = NULL; - tx->tx_lnetmsg = NULL; - tx->tx_kiov = NULL; - tx->tx_nkiov = 0; - tx->tx_iov = tx->tx_frags.virt.iov; - tx->tx_niov = 1; - tx->tx_nonblk = nonblk; + tx->tx_conn = NULL; + tx->tx_lnetmsg = NULL; + tx->tx_kiov = NULL; + tx->tx_nkiov = 0; + tx->tx_iov = tx->tx_frags.virt.iov; + tx->tx_niov = 1; + tx->tx_nonblk = nonblk; socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); tx->tx_msg.ksm_zc_cookies[1] = cookie; @@ -110,11 +110,11 @@ ksocknal_free_tx (ksock_tx_t *tx) static int ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) { - struct kvec *iov = tx->tx_iov; - int nob; - int rc; + struct kvec *iov = tx->tx_iov; + int nob; + int rc; - LASSERT (tx->tx_niov > 0); + LASSERT(tx->tx_niov > 0); /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ rc = ksocknal_lib_send_iov(conn, tx); @@ -128,7 +128,7 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) /* "consume" iov */ do { - LASSERT (tx->tx_niov > 0); + LASSERT(tx->tx_niov > 0); if (nob < (int) iov->iov_len) { iov->iov_base = (void *)((char *)iov->iov_base + nob); @@ -147,12 +147,12 @@ ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) static int ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) { - lnet_kiov_t *kiov = tx->tx_kiov; - int nob; - int rc; + lnet_kiov_t *kiov = tx->tx_kiov; + int nob; + int rc; - LASSERT (tx->tx_niov == 0); - LASSERT (tx->tx_nkiov > 0); + LASSERT(tx->tx_niov == 0); + LASSERT(tx->tx_nkiov > 0); /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ rc = ksocknal_lib_send_kiov(conn, tx); @@ -185,15 +185,15 @@ ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) static int ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; - int bufnob; + int rc; + int bufnob; if (ksocknal_data.ksnd_stall_tx != 0) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); } - LASSERT (tx->tx_resid != 0); + LASSERT(tx->tx_resid != 0); rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -252,10 +252,10 @@ static int ksocknal_recv_iov (ksock_conn_t *conn) { struct kvec *iov = conn->ksnc_rx_iov; - int nob; - int rc; + int nob; + int rc; - LASSERT (conn->ksnc_rx_niov > 0); + LASSERT(conn->ksnc_rx_niov > 0); /* Never touch conn->ksnc_rx_iov or change connection * status inside ksocknal_lib_recv_iov */ @@ -277,7 +277,7 @@ ksocknal_recv_iov (ksock_conn_t *conn) conn->ksnc_rx_nob_left -= nob; do { - LASSERT (conn->ksnc_rx_niov > 0); + LASSERT(conn->ksnc_rx_niov > 0); if (nob < (int)iov->iov_len) { iov->iov_len -= nob; @@ -296,10 +296,10 @@ ksocknal_recv_iov (ksock_conn_t *conn) static int ksocknal_recv_kiov (ksock_conn_t *conn) { - lnet_kiov_t *kiov = conn->ksnc_rx_kiov; - int nob; - int rc; - LASSERT (conn->ksnc_rx_nkiov > 0); + lnet_kiov_t *kiov = conn->ksnc_rx_kiov; + int nob; + int rc; + LASSERT(conn->ksnc_rx_nkiov > 0); /* Never touch conn->ksnc_rx_kiov or change connection * status inside ksocknal_lib_recv_iov */ @@ -321,7 +321,7 @@ ksocknal_recv_kiov (ksock_conn_t *conn) conn->ksnc_rx_nob_left -= nob; do { - LASSERT (conn->ksnc_rx_nkiov > 0); + LASSERT(conn->ksnc_rx_nkiov > 0); if (nob < (int) kiov->kiov_len) { kiov->kiov_offset += nob; @@ -343,7 +343,7 @@ ksocknal_receive (ksock_conn_t *conn) /* Return 1 on success, 0 on EOF, < 0 on error. * Caller checks ksnc_rx_nob_wanted to determine * progress/completion. */ - int rc; + int rc; if (ksocknal_data.ksnd_stall_rx != 0) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -388,8 +388,8 @@ ksocknal_receive (ksock_conn_t *conn) void ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) { - lnet_msg_t *lnetmsg = tx->tx_lnetmsg; - int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; + lnet_msg_t *lnetmsg = tx->tx_lnetmsg; + int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; LASSERT(ni != NULL || tx->tx_conn != NULL); @@ -410,7 +410,7 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) ksock_tx_t *tx; while (!list_empty (txlist)) { - tx = list_entry (txlist->next, ksock_tx_t, tx_list); + tx = list_entry(txlist->next, ksock_tx_t, tx_list); if (error && tx->tx_lnetmsg != NULL) { CNETERR("Deleting packet type %d len %d %s->%s\n", @@ -422,18 +422,18 @@ ksocknal_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int error) CNETERR("Deleting noop packet\n"); } - list_del (&tx->tx_list); + list_del(&tx->tx_list); - LASSERT (atomic_read(&tx->tx_refcount) == 1); - ksocknal_tx_done (ni, tx); + LASSERT(atomic_read(&tx->tx_refcount) == 1); + ksocknal_tx_done(ni, tx); } } static void ksocknal_check_zc_req(ksock_tx_t *tx) { - ksock_conn_t *conn = tx->tx_conn; - ksock_peer_t *peer = conn->ksnc_peer; + ksock_conn_t *conn = tx->tx_conn; + ksock_peer_t *peer = conn->ksnc_peer; /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx * to ksnp_zc_req_list if some fragment of this message should be sent @@ -441,8 +441,8 @@ ksocknal_check_zc_req(ksock_tx_t *tx) * she has received this message to tell us we can signal completion. * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on * ksnp_zc_req_list. */ - LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); - LASSERT (tx->tx_zc_capable); + LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); + LASSERT(tx->tx_zc_capable); tx->tx_zc_checked = 1; @@ -461,7 +461,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx) tx->tx_deadline = cfs_time_shift(*ksocknal_tunables.ksnd_timeout); - LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); + LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; @@ -476,7 +476,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx) static void ksocknal_uncheck_zc_req(ksock_tx_t *tx) { - ksock_peer_t *peer = tx->tx_conn->ksnc_peer; + ksock_peer_t *peer = tx->tx_conn->ksnc_peer; LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); LASSERT(tx->tx_zc_capable); @@ -502,14 +502,14 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx) static int ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) { - int rc; + int rc; if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_check_zc_req(tx); rc = ksocknal_transmit (conn, tx); - CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); + CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc); if (tx->tx_resid == 0) { /* Sent everything OK */ @@ -546,7 +546,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) } /* Actual error */ - LASSERT (rc < 0); + LASSERT(rc < 0); if (!conn->ksnc_closing) { switch (rc) { @@ -582,9 +582,9 @@ ksocknal_launch_connection_locked (ksock_route_t *route) /* called holding write lock on ksnd_global_lock */ - LASSERT (!route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); - LASSERT ((ksocknal_route_mask() & ~route->ksnr_connected) != 0); + LASSERT(!route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); + LASSERT((ksocknal_route_mask() & ~route->ksnr_connected) != 0); route->ksnr_scheduled = 1; /* scheduling conn for connd */ ksocknal_route_addref(route); /* extra ref for connd */ @@ -617,22 +617,22 @@ ksocknal_launch_all_connections_locked (ksock_peer_t *peer) ksock_conn_t * ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) { - struct list_head *tmp; - ksock_conn_t *conn; - ksock_conn_t *typed = NULL; - ksock_conn_t *fallback = NULL; - int tnob = 0; - int fnob = 0; + struct list_head *tmp; + ksock_conn_t *conn; + ksock_conn_t *typed = NULL; + ksock_conn_t *fallback = NULL; + int tnob = 0; + int fnob = 0; list_for_each (tmp, &peer->ksnp_conns) { ksock_conn_t *c = list_entry(tmp, ksock_conn_t, ksnc_list); - int nob = atomic_read(&c->ksnc_tx_nob) + - c->ksnc_sock->sk->sk_wmem_queued; - int rc; + int nob = atomic_read(&c->ksnc_tx_nob) + + c->ksnc_sock->sk->sk_wmem_queued; + int rc; - LASSERT (!c->ksnc_closing); - LASSERT (c->ksnc_proto != NULL && - c->ksnc_proto->pro_match_tx != NULL); + LASSERT(!c->ksnc_closing); + LASSERT(c->ksnc_proto != NULL && + c->ksnc_proto->pro_match_tx != NULL); rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); @@ -656,7 +656,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) (fnob == nob && *ksocknal_tunables.ksnd_round_robin && cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) { fallback = c; - fnob = nob; + fnob = nob; } break; } @@ -685,9 +685,9 @@ void ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) { ksock_sched_t *sched = conn->ksnc_scheduler; - ksock_msg_t *msg = &tx->tx_msg; - ksock_tx_t *ztx = NULL; - int bufnob = 0; + ksock_msg_t *msg = &tx->tx_msg; + ksock_tx_t *ztx = NULL; + int bufnob = 0; /* called holding global lock (read or irq-write) and caller may * not have dropped this lock between finding conn and calling me, @@ -708,11 +708,11 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) * * We always expect at least 1 mapped fragment containing the * complete ksocknal message header. */ - LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) + - lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == - (unsigned int)tx->tx_nob); - LASSERT (tx->tx_niov >= 1); - LASSERT (tx->tx_resid == tx->tx_nob); + LASSERT(lnet_iov_nob (tx->tx_niov, tx->tx_iov) + + lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == + (unsigned int)tx->tx_nob); + LASSERT(tx->tx_niov >= 1); + LASSERT(tx->tx_resid == tx->tx_nob); CDEBUG (D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n", tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type: @@ -739,8 +739,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) if (msg->ksm_type == KSOCK_MSG_NOOP) { /* The packet is noop ZC ACK, try to piggyback the ack_cookie * on a normal packet so I don't need to send it */ - LASSERT (msg->ksm_zc_cookies[1] != 0); - LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL); + LASSERT(msg->ksm_zc_cookies[1] != 0); + LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL); if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ @@ -748,8 +748,8 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) } else { /* It's a normal packet - can it piggback a noop zc-ack that * has been queued already? */ - LASSERT (msg->ksm_zc_cookies[1] == 0); - LASSERT (conn->ksnc_proto->pro_queue_tx_msg != NULL); + LASSERT(msg->ksm_zc_cookies[1] == 0); + LASSERT(conn->ksnc_proto->pro_queue_tx_msg != NULL); ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); /* ztx will be released later */ @@ -777,14 +777,14 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) ksock_route_t * ksocknal_find_connectable_route_locked (ksock_peer_t *peer) { - unsigned long now = cfs_time_current(); - struct list_head *tmp; + unsigned long now = cfs_time_current(); + struct list_head *tmp; ksock_route_t *route; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); if (route->ksnr_scheduled) /* connections being established */ continue; @@ -813,13 +813,13 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer) ksock_route_t * ksocknal_find_connecting_route_locked (ksock_peer_t *peer) { - struct list_head *tmp; - ksock_route_t *route; + struct list_head *tmp; + ksock_route_t *route; list_for_each (tmp, &peer->ksnp_routes) { route = list_entry (tmp, ksock_route_t, ksnr_list); - LASSERT (!route->ksnr_connecting || route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting || route->ksnr_scheduled); if (route->ksnr_scheduled) return route; @@ -831,13 +831,13 @@ ksocknal_find_connecting_route_locked (ksock_peer_t *peer) int ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) { - ksock_peer_t *peer; - ksock_conn_t *conn; - rwlock_t *g_lock; - int retry; - int rc; + ksock_peer_t *peer; + ksock_conn_t *conn; + rwlock_t *g_lock; + int retry; + int rc; - LASSERT (tx->tx_conn == NULL); + LASSERT(tx->tx_conn == NULL); g_lock = &ksocknal_data.ksnd_global_lock; @@ -922,17 +922,17 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) int ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { - int mpflag = 1; - int type = lntmsg->msg_type; + int mpflag = 1; + int type = lntmsg->msg_type; lnet_process_id_t target = lntmsg->msg_target; - unsigned int payload_niov = lntmsg->msg_niov; - struct kvec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - ksock_tx_t *tx; - int desc_size; - int rc; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + ksock_tx_t *tx; + int desc_size; + int rc; /* NB 'private' is different depending on what we're sending. * Just ignore it... */ @@ -940,8 +940,8 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT(payload_nob == 0 || payload_niov > 0); + LASSERT(payload_niov <= LNET_MAX_IOV); /* payload is either all vaddrs or all pages */ LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); LASSERT (!in_interrupt ()); @@ -1028,9 +1028,9 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) { static char ksocknal_slop_buffer[4096]; - int nob; - unsigned int niov; - int skipped; + int nob; + unsigned int niov; + int skipped; LASSERT(conn->ksnc_proto != NULL); @@ -1063,7 +1063,7 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space; conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg; - conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); + conn->ksnc_rx_iov[0].iov_len = sizeof (lnet_hdr_t); break; default: @@ -1108,18 +1108,18 @@ ksocknal_new_packet (ksock_conn_t *conn, int nob_to_skip) static int ksocknal_process_receive (ksock_conn_t *conn) { - lnet_hdr_t *lhdr; + lnet_hdr_t *lhdr; lnet_process_id_t *id; - int rc; + int rc; LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0); /* NB: sched lock NOT held */ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */ - LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || - conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || - conn->ksnc_rx_state == SOCKNAL_RX_SLOP); + LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD || + conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER || + conn->ksnc_rx_state == SOCKNAL_RX_SLOP); again: if (conn->ksnc_rx_nob_wanted != 0) { rc = ksocknal_receive(conn); @@ -1229,7 +1229,7 @@ ksocknal_process_receive (ksock_conn_t *conn) if ((conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) != 0) { /* Userspace peer */ lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; + id = &conn->ksnc_peer->ksnp_id; /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); @@ -1277,7 +1277,7 @@ ksocknal_process_receive (ksock_conn_t *conn) LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x); lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr; - id = &conn->ksnc_peer->ksnp_id; + id = &conn->ksnc_peer->ksnp_id; rc = conn->ksnc_proto->pro_handle_zcreq(conn, conn->ksnc_msg.ksm_zc_cookies[0], @@ -1305,7 +1305,7 @@ ksocknal_process_receive (ksock_conn_t *conn) } /* Not Reached */ - LBUG (); + LBUG(); return -EINVAL; /* keep gcc happy */ } @@ -1314,15 +1314,15 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen, unsigned int rlen) { - ksock_conn_t *conn = (ksock_conn_t *)private; + ksock_conn_t *conn = (ksock_conn_t *)private; ksock_sched_t *sched = conn->ksnc_scheduler; - LASSERT (mlen <= rlen); - LASSERT (niov <= LNET_MAX_IOV); + LASSERT(mlen <= rlen); + LASSERT(niov <= LNET_MAX_IOV); conn->ksnc_cookie = msg; conn->ksnc_rx_nob_wanted = mlen; - conn->ksnc_rx_nob_left = rlen; + conn->ksnc_rx_nob_left = rlen; if (mlen == 0 || iov != NULL) { conn->ksnc_rx_nkiov = 0; @@ -1333,18 +1333,18 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, niov, iov, offset, mlen); } else { conn->ksnc_rx_niov = 0; - conn->ksnc_rx_iov = NULL; + conn->ksnc_rx_iov = NULL; conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov; conn->ksnc_rx_nkiov = lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov, niov, kiov, offset, mlen); } - LASSERT (mlen == - lnet_iov_nob (conn->ksnc_rx_niov, conn->ksnc_rx_iov) + - lnet_kiov_nob (conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); + LASSERT(mlen == + lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) + + lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov)); - LASSERT (conn->ksnc_rx_scheduled); + LASSERT(conn->ksnc_rx_scheduled); spin_lock_bh(&sched->kss_lock); @@ -1370,7 +1370,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed, static inline int ksocknal_sched_cansleep(ksock_sched_t *sched) { - int rc; + int rc; spin_lock_bh(&sched->kss_lock); @@ -1384,13 +1384,13 @@ ksocknal_sched_cansleep(ksock_sched_t *sched) int ksocknal_scheduler(void *arg) { - struct ksock_sched_info *info; - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; - int rc; - int nloops = 0; - long id = (long)arg; + struct ksock_sched_info *info; + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; + int rc; + int nloops = 0; + long id = (long)arg; info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)]; sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)]; @@ -1455,7 +1455,7 @@ int ksocknal_scheduler(void *arg) } if (!list_empty (&sched->kss_tx_conns)) { - LIST_HEAD (zlist); + LIST_HEAD(zlist); if (!list_empty(&sched->kss_zombie_noop_txs)) { list_add(&zlist, @@ -1513,9 +1513,9 @@ int ksocknal_scheduler(void *arg) /* Do nothing; after a short timeout, this * conn will be reposted on kss_tx_conns. */ } else if (conn->ksnc_tx_ready && - !list_empty (&conn->ksnc_tx_queue)) { + !list_empty(&conn->ksnc_tx_queue)) { /* reschedule for tx */ - list_add_tail (&conn->ksnc_tx_list, + list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns); } else { conn->ksnc_tx_scheduled = 0; @@ -1606,7 +1606,7 @@ void ksocknal_write_callback (ksock_conn_t *conn) static ksock_proto_t * ksocknal_parse_proto_version (ksock_hello_msg_t *hello) { - __u32 version = 0; + __u32 version = 0; if (hello->kshm_magic == LNET_PROTO_MAGIC) version = hello->kshm_version; @@ -1634,8 +1634,8 @@ ksocknal_parse_proto_version (ksock_hello_msg_t *hello) if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) { lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello; - CLASSERT (sizeof (lnet_magicversion_t) == - offsetof (ksock_hello_msg_t, kshm_src_nid)); + CLASSERT(sizeof (lnet_magicversion_t) == + offsetof (ksock_hello_msg_t, kshm_src_nid)); if (hmv->version_major == cpu_to_le16 (KSOCK_PROTO_V1_MAJOR) && hmv->version_minor == cpu_to_le16 (KSOCK_PROTO_V1_MINOR)) @@ -1650,19 +1650,19 @@ ksocknal_send_hello (lnet_ni_t *ni, ksock_conn_t *conn, lnet_nid_t peer_nid, ksock_hello_msg_t *hello) { /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */ - ksock_net_t *net = (ksock_net_t *)ni->ni_data; + ksock_net_t *net = (ksock_net_t *)ni->ni_data; - LASSERT (hello->kshm_nips <= LNET_MAX_INTERFACES); + LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES); /* rely on caller to hold a ref on socket so it wouldn't disappear */ - LASSERT (conn->ksnc_proto != NULL); + LASSERT(conn->ksnc_proto != NULL); - hello->kshm_src_nid = ni->ni_nid; - hello->kshm_dst_nid = peer_nid; - hello->kshm_src_pid = the_lnet.ln_pid; + hello->kshm_src_nid = ni->ni_nid; + hello->kshm_dst_nid = peer_nid; + hello->kshm_src_pid = the_lnet.ln_pid; hello->kshm_src_incarnation = net->ksnn_incarnation; - hello->kshm_ctype = conn->ksnc_type; + hello->kshm_ctype = conn->ksnc_type; return conn->ksnc_proto->pro_send_hello(conn, hello); } @@ -1693,21 +1693,21 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, * EALREADY lost connection race * EPROTO protocol version mismatch */ - struct socket *sock = conn->ksnc_sock; - int active = (conn->ksnc_proto != NULL); - int timeout; - int proto_match; - int rc; - ksock_proto_t *proto; - lnet_process_id_t recv_id; + struct socket *sock = conn->ksnc_sock; + int active = (conn->ksnc_proto != NULL); + int timeout; + int proto_match; + int rc; + ksock_proto_t *proto; + lnet_process_id_t recv_id; /* socket type set on active connections - not set on passive */ - LASSERT (!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); + LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE)); timeout = active ? *ksocknal_tunables.ksnd_timeout : lnet_acceptor_timeout(); - rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); + rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -1726,12 +1726,12 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, return -EPROTO; } - rc = libcfs_sock_read(sock, &hello->kshm_version, - sizeof(hello->kshm_version), timeout); + rc = lnet_sock_read(sock, &hello->kshm_version, + sizeof(hello->kshm_version), timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + LASSERT(rc < 0); return rc; } @@ -1765,7 +1765,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, if (rc != 0) { CERROR("Error %d reading or checking hello from from %pI4h\n", rc, &conn->ksnc_ipaddr); - LASSERT (rc < 0); + LASSERT(rc < 0); return rc; } @@ -1830,22 +1830,22 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn, static int ksocknal_connect (ksock_route_t *route) { - LIST_HEAD (zombies); - ksock_peer_t *peer = route->ksnr_peer; - int type; - int wanted; - struct socket *sock; - unsigned long deadline; - int retry_later = 0; - int rc = 0; + LIST_HEAD(zombies); + ksock_peer_t *peer = route->ksnr_peer; + int type; + int wanted; + struct socket *sock; + unsigned long deadline; + int retry_later = 0; + int rc = 0; deadline = cfs_time_add(cfs_time_current(), cfs_time_seconds(*ksocknal_tunables.ksnd_timeout)); write_lock_bh(&ksocknal_data.ksnd_global_lock); - LASSERT (route->ksnr_scheduled); - LASSERT (!route->ksnr_connecting); + LASSERT(route->ksnr_scheduled); + LASSERT(!route->ksnr_connecting); route->ksnr_connecting = 1; @@ -2101,7 +2101,7 @@ static ksock_route_t * ksocknal_connd_get_route_locked(signed long *timeout_p) { ksock_route_t *route; - unsigned long now; + unsigned long now; now = cfs_time_current(); @@ -2124,13 +2124,13 @@ ksocknal_connd_get_route_locked(signed long *timeout_p) int ksocknal_connd (void *arg) { - spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; - ksock_connreq_t *cr; - wait_queue_t wait; - int nloops = 0; - int cons_retry = 0; + spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; + ksock_connreq_t *cr; + wait_queue_t wait; + int nloops = 0; + int cons_retry = 0; - cfs_block_allsigs (); + cfs_block_allsigs(); init_waitqueue_entry(&wait, current); @@ -2144,7 +2144,7 @@ ksocknal_connd (void *arg) ksock_route_t *route = NULL; long sec = get_seconds(); long timeout = MAX_SCHEDULE_TIMEOUT; - int dropped_lock = 0; + int dropped_lock = 0; if (ksocknal_connd_check_stop(sec, &timeout)) { /* wakeup another one to check stop */ @@ -2236,15 +2236,15 @@ static ksock_conn_t * ksocknal_find_timed_out_conn (ksock_peer_t *peer) { /* We're called with a shared lock on ksnd_global_lock */ - ksock_conn_t *conn; - struct list_head *ctmp; + ksock_conn_t *conn; + struct list_head *ctmp; list_for_each (ctmp, &peer->ksnp_conns) { - int error; + int error; conn = list_entry (ctmp, ksock_conn_t, ksnc_list); /* Don't need the {get,put}connsock dance to deref ksnc_sock */ - LASSERT (!conn->ksnc_closing); + LASSERT(!conn->ksnc_closing); /* SOCK_ERROR will reset error code of socket in * some platform (like Darwin8.x) */ @@ -2313,8 +2313,8 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer) static inline void ksocknal_flush_stale_txs(ksock_peer_t *peer) { - ksock_tx_t *tx; - LIST_HEAD (stale_txs); + ksock_tx_t *tx; + LIST_HEAD(stale_txs); write_lock_bh(&ksocknal_data.ksnd_global_lock); @@ -2338,9 +2338,9 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer) static int ksocknal_send_keepalive_locked(ksock_peer_t *peer) { - ksock_sched_t *sched; - ksock_conn_t *conn; - ksock_tx_t *tx; + ksock_sched_t *sched; + ksock_conn_t *conn; + ksock_tx_t *tx; if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */ return 0; @@ -2399,10 +2399,10 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer) static void ksocknal_check_peer_timeouts (int idx) { - struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; - ksock_peer_t *peer; - ksock_conn_t *conn; - ksock_tx_t *tx; + struct list_head *peers = &ksocknal_data.ksnd_peers[idx]; + ksock_peer_t *peer; + ksock_conn_t *conn; + ksock_tx_t *tx; again: /* NB. We expect to have a look at all the peers and not find any @@ -2411,9 +2411,9 @@ ksocknal_check_peer_timeouts (int idx) read_lock(&ksocknal_data.ksnd_global_lock); list_for_each_entry(peer, peers, ksnp_list) { - unsigned long deadline = 0; - int resid = 0; - int n = 0; + unsigned long deadline = 0; + int resid = 0; + int n = 0; if (ksocknal_send_keepalive_locked(peer) != 0) { read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2476,8 +2476,8 @@ ksocknal_check_peer_timeouts (int idx) tx = list_entry(peer->ksnp_zc_req_list.next, ksock_tx_t, tx_zc_list); deadline = tx->tx_deadline; - resid = tx->tx_resid; - conn = tx->tx_conn; + resid = tx->tx_resid; + conn = tx->tx_conn; ksocknal_conn_addref(conn); spin_unlock(&peer->ksnp_lock); @@ -2499,17 +2499,17 @@ ksocknal_check_peer_timeouts (int idx) int ksocknal_reaper (void *arg) { - wait_queue_t wait; - ksock_conn_t *conn; - ksock_sched_t *sched; - struct list_head enomem_conns; - int nenomem_conns; - long timeout; - int i; - int peer_index = 0; - unsigned long deadline = cfs_time_current(); - - cfs_block_allsigs (); + wait_queue_t wait; + ksock_conn_t *conn; + ksock_sched_t *sched; + struct list_head enomem_conns; + int nenomem_conns; + long timeout; + int i; + int peer_index = 0; + unsigned long deadline = cfs_time_current(); + + cfs_block_allsigs(); INIT_LIST_HEAD(&enomem_conns); init_waitqueue_entry(&wait, current); @@ -2580,7 +2580,7 @@ ksocknal_reaper (void *arg) cfs_time_current())) <= 0) { const int n = 4; const int p = 1; - int chunk = ksocknal_data.ksnd_peer_hash_size; + int chunk = ksocknal_data.ksnd_peer_hash_size; /* Time to check for timeouts on a few more peers: I do * checks every 'p' seconds on a proportion of the peer diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h deleted file mode 100644 index f5563881b..000000000 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * GPL HEADER START - * - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 only, - * as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License version 2 for more details (a copy is included - * in the LICENSE file that accompanied this code). - * - * You should have received a copy of the GNU General Public License - * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. - * - * GPL HEADER END - */ -/* - * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. - * Use is subject to license terms. - * - * Copyright (c) 2012, Intel Corporation. - */ -/* - * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. - */ - -#define DEBUG_PORTAL_ALLOC - -#ifndef __LINUX_SOCKNAL_LIB_H__ -#define __LINUX_SOCKNAL_LIB_H__ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/string.h> -#include <linux/stat.h> -#include <linux/errno.h> -#include <linux/unistd.h> -#include <net/sock.h> -#include <net/tcp.h> -#include <linux/uio.h> -#include <linux/if.h> -#include <linux/uaccess.h> - -#include <asm/irq.h> - -#include <linux/fs.h> -#include <linux/file.h> -#include <linux/list.h> -#include <linux/kmod.h> -#include <linux/sysctl.h> -#include <asm/div64.h> -#include <linux/syscalls.h> - -#include "../../../include/linux/libcfs/libcfs.h" - -#include <linux/crc32.h> -static inline __u32 ksocknal_csum(__u32 crc, unsigned char const *p, size_t len) -{ -#if 1 - return crc32_le(crc, p, len); -#else - while (len-- > 0) - crc = ((crc + 0x100) & ~0xff) | ((crc + *p++) & 0xff) ; - return crc; -#endif -} - -#define SOCKNAL_WSPACE(sk) sk_stream_wspace(sk) -#define SOCKNAL_MIN_WSPACE(sk) sk_stream_min_wspace(sk) - -/* assume one thread for each connection type */ -#define SOCKNAL_NSCHEDS 3 -#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) - -#endif diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index f5e8ab060..340706110 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -39,9 +39,8 @@ int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) { - int rc = libcfs_sock_getaddr(conn->ksnc_sock, 1, - &conn->ksnc_ipaddr, - &conn->ksnc_port); + int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr, + &conn->ksnc_port); /* Didn't need the {get,put}connsock dance to deref ksnc_sock... */ LASSERT(!conn->ksnc_closing); @@ -51,8 +50,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) return rc; } - rc = libcfs_sock_getaddr(conn->ksnc_sock, 0, - &conn->ksnc_myipaddr, NULL); + rc = lnet_sock_getaddr(conn->ksnc_sock, 0, &conn->ksnc_myipaddr, NULL); if (rc != 0) { CERROR("Error %d getting sock local IP\n", rc); return rc; @@ -64,7 +62,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn) int ksocknal_lib_zc_capable(ksock_conn_t *conn) { - int caps = conn->ksnc_sock->sk->sk_route_caps; + int caps = conn->ksnc_sock->sk->sk_route_caps; if (conn->ksnc_proto == &ksocknal_protocol_v1x) return 0; @@ -78,8 +76,8 @@ int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) { struct socket *sock = conn->ksnc_sock; - int nob; - int rc; + int nob; + int rc; if (*ksocknal_tunables.ksnd_enable_csum && /* checksum enabled */ conn->ksnc_proto == &ksocknal_protocol_v2x && /* V2.x connection */ @@ -92,15 +90,15 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) { #if SOCKNAL_SINGLE_FRAG_TX - struct kvec scratch; - struct kvec *scratchiov = &scratch; - unsigned int niov = 1; + struct kvec scratch; + struct kvec *scratchiov = &scratch; + unsigned int niov = 1; #else - struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - unsigned int niov = tx->tx_niov; + struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; + unsigned int niov = tx->tx_niov; #endif struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; - int i; + int i; for (nob = i = 0; i < niov; i++) { scratchiov[i] = tx->tx_iov[i]; @@ -120,9 +118,9 @@ int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) { struct socket *sock = conn->ksnc_sock; - lnet_kiov_t *kiov = tx->tx_kiov; - int rc; - int nob; + lnet_kiov_t *kiov = tx->tx_kiov; + int rc; + int nob; /* Not NOOP message */ LASSERT(tx->tx_lnetmsg != NULL); @@ -131,11 +129,11 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) * or leave them alone. */ if (tx->tx_msg.ksm_zc_cookies[0] != 0) { /* Zero copy is enabled */ - struct sock *sk = sock->sk; - struct page *page = kiov->kiov_page; - int offset = kiov->kiov_offset; - int fragsize = kiov->kiov_len; - int msgflg = MSG_DONTWAIT; + struct sock *sk = sock->sk; + struct page *page = kiov->kiov_page; + int offset = kiov->kiov_offset; + int fragsize = kiov->kiov_len; + int msgflg = MSG_DONTWAIT; CDEBUG(D_NET, "page %p + offset %x for %d\n", page, offset, kiov->kiov_len); @@ -148,23 +146,22 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) rc = sk->sk_prot->sendpage(sk, page, offset, fragsize, msgflg); } else { - rc = cfs_tcp_sendpage(sk, page, offset, fragsize, - msgflg); + rc = tcp_sendpage(sk, page, offset, fragsize, msgflg); } } else { #if SOCKNAL_SINGLE_FRAG_TX || !SOCKNAL_RISK_KMAP_DEADLOCK - struct kvec scratch; + struct kvec scratch; struct kvec *scratchiov = &scratch; - unsigned int niov = 1; + unsigned int niov = 1; #else #ifdef CONFIG_HIGHMEM #warning "XXX risk of kmap deadlock on multiple frags..." #endif struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - unsigned int niov = tx->tx_nkiov; + unsigned int niov = tx->tx_nkiov; #endif struct msghdr msg = {.msg_flags = MSG_DONTWAIT}; - int i; + int i; for (nob = i = 0; i < niov; i++) { scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + @@ -187,7 +184,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) void ksocknal_lib_eager_ack(ksock_conn_t *conn) { - int opt = 1; + int opt = 1; struct socket *sock = conn->ksnc_sock; /* Remind the socket to ACK eagerly. If I don't, the socket might @@ -203,23 +200,23 @@ int ksocknal_lib_recv_iov(ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX - struct kvec scratch; + struct kvec scratch; struct kvec *scratchiov = &scratch; - unsigned int niov = 1; + unsigned int niov = 1; #else struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - unsigned int niov = conn->ksnc_rx_niov; + unsigned int niov = conn->ksnc_rx_niov; #endif struct kvec *iov = conn->ksnc_rx_iov; struct msghdr msg = { - .msg_flags = 0 + .msg_flags = 0 }; - int nob; - int i; - int rc; - int fragnob; - int sum; - __u32 saved_csum; + int nob; + int i; + int rc; + int fragnob; + int sum; + __u32 saved_csum; /* NB we can't trust socket ops to either consume our iovs * or leave them alone. */ @@ -271,9 +268,9 @@ static void * ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, struct kvec *iov, struct page **pages) { - void *addr; - int nob; - int i; + void *addr; + int nob; + int i; if (!*ksocknal_tunables.ksnd_zc_recv || pages == NULL) return NULL; @@ -307,29 +304,29 @@ int ksocknal_lib_recv_kiov(ksock_conn_t *conn) { #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK - struct kvec scratch; - struct kvec *scratchiov = &scratch; - struct page **pages = NULL; - unsigned int niov = 1; + struct kvec scratch; + struct kvec *scratchiov = &scratch; + struct page **pages = NULL; + unsigned int niov = 1; #else #ifdef CONFIG_HIGHMEM #warning "XXX risk of kmap deadlock on multiple frags..." #endif - struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; - struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs; - unsigned int niov = conn->ksnc_rx_nkiov; + struct kvec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov; + struct page **pages = conn->ksnc_scheduler->kss_rx_scratch_pgs; + unsigned int niov = conn->ksnc_rx_nkiov; #endif lnet_kiov_t *kiov = conn->ksnc_rx_kiov; struct msghdr msg = { - .msg_flags = 0 + .msg_flags = 0 }; - int nob; - int i; - int rc; - void *base; - void *addr; - int sum; - int fragnob; + int nob; + int i; + int rc; + void *base; + void *addr; + int sum; + int fragnob; int n; /* NB we can't trust socket ops to either consume our iovs @@ -357,10 +354,10 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) { LASSERT(i < niov); - /* Dang! have to kmap again because I have nowhere to stash the - * mapped address. But by doing it while the page is still - * mapped, the kernel just bumps the map count and returns me - * the address it stashed. */ + /* Dang! have to kmap again because I have nowhere to + * stash the mapped address. But by doing it while the + * page is still mapped, the kernel just bumps the map + * count and returns me the address it stashed. */ base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; fragnob = kiov[i].kiov_len; if (fragnob > sum) @@ -386,9 +383,9 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn) void ksocknal_lib_csum_tx(ksock_tx_t *tx) { - int i; - __u32 csum; - void *base; + int i; + __u32 csum; + void *base; LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); LASSERT(tx->tx_conn != NULL); @@ -426,8 +423,8 @@ int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle) { struct socket *sock = conn->ksnc_sock; - int len; - int rc; + int len; + int rc; rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -436,7 +433,7 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int * return -ESHUTDOWN; } - rc = libcfs_sock_getbuf(sock, txmem, rxmem); + rc = lnet_sock_getbuf(sock, txmem, rxmem); if (rc == 0) { len = sizeof(*nagle); rc = kernel_getsockopt(sock, SOL_TCP, TCP_NODELAY, @@ -456,13 +453,13 @@ ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int * int ksocknal_lib_setup_sock(struct socket *sock) { - int rc; - int option; - int keep_idle; - int keep_intvl; - int keep_count; - int do_keepalive; - struct linger linger; + int rc; + int option; + int keep_idle; + int keep_intvl; + int keep_count; + int do_keepalive; + struct linger linger; sock->sk->sk_allocation = GFP_NOFS; @@ -498,9 +495,8 @@ ksocknal_lib_setup_sock(struct socket *sock) } } - rc = libcfs_sock_setbuf(sock, - *ksocknal_tunables.ksnd_tx_buffer_size, - *ksocknal_tunables.ksnd_rx_buffer_size); + rc = lnet_sock_setbuf(sock, *ksocknal_tunables.ksnd_tx_buffer_size, + *ksocknal_tunables.ksnd_rx_buffer_size); if (rc != 0) { CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", *ksocknal_tunables.ksnd_tx_buffer_size, @@ -555,11 +551,11 @@ ksocknal_lib_setup_sock(struct socket *sock) void ksocknal_lib_push_conn(ksock_conn_t *conn) { - struct sock *sk; + struct sock *sk; struct tcp_sock *tp; - int nonagle; - int val = 1; - int rc; + int nonagle; + int val = 1; + int rc; rc = ksocknal_connsock_addref(conn); if (rc != 0) /* being shut down */ @@ -592,7 +588,7 @@ extern void ksocknal_write_callback(ksock_conn_t *conn); static void ksocknal_data_ready(struct sock *sk) { - ksock_conn_t *conn; + ksock_conn_t *conn; /* interleave correctly with closing sockets... */ LASSERT(!in_irq()); @@ -611,17 +607,17 @@ ksocknal_data_ready(struct sock *sk) static void ksocknal_write_space(struct sock *sk) { - ksock_conn_t *conn; - int wspace; - int min_wpace; + ksock_conn_t *conn; + int wspace; + int min_wpace; /* interleave correctly with closing sockets... */ LASSERT(!in_irq()); read_lock(&ksocknal_data.ksnd_global_lock); conn = sk->sk_user_data; - wspace = SOCKNAL_WSPACE(sk); - min_wpace = SOCKNAL_MIN_WSPACE(sk); + wspace = sk_stream_wspace(sk); + min_wpace = sk_stream_min_wspace(sk); CDEBUG(D_NET, "sk %p wspace %d low water %d conn %p%s%s%s\n", sk, wspace, min_wpace, conn, @@ -689,7 +685,7 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn) int ksocknal_lib_memory_pressure(ksock_conn_t *conn) { - int rc = 0; + int rc = 0; ksock_sched_t *sched; sched = conn->ksnc_scheduler; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c index 86b88db1c..c3ac67698 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c @@ -145,40 +145,37 @@ ksock_tunables_t ksocknal_tunables; int ksocknal_tunables_init(void) { - /* initialize ksocknal_tunables structure */ - ksocknal_tunables.ksnd_timeout = &sock_timeout; - ksocknal_tunables.ksnd_nscheds = &nscheds; - ksocknal_tunables.ksnd_nconnds = &nconnds; - ksocknal_tunables.ksnd_nconnds_max = &nconnds_max; + ksocknal_tunables.ksnd_timeout = &sock_timeout; + ksocknal_tunables.ksnd_nscheds = &nscheds; + ksocknal_tunables.ksnd_nconnds = &nconnds; + ksocknal_tunables.ksnd_nconnds_max = &nconnds_max; ksocknal_tunables.ksnd_min_reconnectms = &min_reconnectms; ksocknal_tunables.ksnd_max_reconnectms = &max_reconnectms; - ksocknal_tunables.ksnd_eager_ack = &eager_ack; - ksocknal_tunables.ksnd_typed_conns = &typed_conns; - ksocknal_tunables.ksnd_min_bulk = &min_bulk; + ksocknal_tunables.ksnd_eager_ack = &eager_ack; + ksocknal_tunables.ksnd_typed_conns = &typed_conns; + ksocknal_tunables.ksnd_min_bulk = &min_bulk; ksocknal_tunables.ksnd_tx_buffer_size = &tx_buffer_size; ksocknal_tunables.ksnd_rx_buffer_size = &rx_buffer_size; - ksocknal_tunables.ksnd_nagle = &nagle; - ksocknal_tunables.ksnd_round_robin = &round_robin; - ksocknal_tunables.ksnd_keepalive = &keepalive; + ksocknal_tunables.ksnd_nagle = &nagle; + ksocknal_tunables.ksnd_round_robin = &round_robin; + ksocknal_tunables.ksnd_keepalive = &keepalive; ksocknal_tunables.ksnd_keepalive_idle = &keepalive_idle; ksocknal_tunables.ksnd_keepalive_count = &keepalive_count; ksocknal_tunables.ksnd_keepalive_intvl = &keepalive_intvl; - ksocknal_tunables.ksnd_credits = &credits; + ksocknal_tunables.ksnd_credits = &credits; ksocknal_tunables.ksnd_peertxcredits = &peer_credits; ksocknal_tunables.ksnd_peerrtrcredits = &peer_buffer_credits; - ksocknal_tunables.ksnd_peertimeout = &peer_timeout; - ksocknal_tunables.ksnd_enable_csum = &enable_csum; + ksocknal_tunables.ksnd_peertimeout = &peer_timeout; + ksocknal_tunables.ksnd_enable_csum = &enable_csum; ksocknal_tunables.ksnd_inject_csum_error = &inject_csum_error; ksocknal_tunables.ksnd_nonblk_zcack = &nonblk_zcack; ksocknal_tunables.ksnd_zc_min_payload = &zc_min_payload; - ksocknal_tunables.ksnd_zc_recv = &zc_recv; + ksocknal_tunables.ksnd_zc_recv = &zc_recv; ksocknal_tunables.ksnd_zc_recv_min_nfrags = &zc_recv_min_nfrags; - - #if SOCKNAL_VERSION_DEBUG - ksocknal_tunables.ksnd_protocol = &protocol; + ksocknal_tunables.ksnd_protocol = &protocol; #endif if (*ksocknal_tunables.ksnd_zc_min_payload < (2 << 10)) diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c index 8596581f5..986bce4c9 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c @@ -52,7 +52,7 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg) void ksocknal_next_tx_carrier(ksock_conn_t *conn) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + ksock_tx_t *tx = conn->ksnc_tx_carrier; /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */ LASSERT(!list_empty(&conn->ksnc_tx_queue)); @@ -119,7 +119,7 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn, static ksock_tx_t * ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) { - ksock_tx_t *tx = conn->ksnc_tx_carrier; + ksock_tx_t *tx = conn->ksnc_tx_carrier; /* * Enqueue tx_msg: @@ -361,10 +361,10 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) static int ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) { - ksock_peer_t *peer = c->ksnc_peer; - ksock_conn_t *conn; - ksock_tx_t *tx; - int rc; + ksock_peer_t *peer = c->ksnc_peer; + ksock_conn_t *conn; + ksock_tx_t *tx; + int rc; read_lock(&ksocknal_data.ksnd_global_lock); @@ -405,11 +405,11 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote) static int ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) { - ksock_peer_t *peer = conn->ksnc_peer; - ksock_tx_t *tx; - ksock_tx_t *tmp; + ksock_peer_t *peer = conn->ksnc_peer; + ksock_tx_t *tx; + ksock_tx_t *tmp; LIST_HEAD(zlist); - int count; + int count; if (cookie1 == 0) cookie1 = cookie2; @@ -452,11 +452,11 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2) static int ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) { - struct socket *sock = conn->ksnc_sock; - lnet_hdr_t *hdr; + struct socket *sock = conn->ksnc_sock; + lnet_hdr_t *hdr; lnet_magicversion_t *hmv; - int rc; - int i; + int rc; + int i; CLASSERT(sizeof(lnet_magicversion_t) == offsetof(lnet_hdr_t, src_nid)); @@ -470,7 +470,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) /* Re-organize V2.x message header to V1.x (lnet_hdr_t) * header and send out */ - hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); + hmv->magic = cpu_to_le32 (LNET_PROTO_TCP_MAGIC); hmv->version_major = cpu_to_le16 (KSOCK_PROTO_V1_MAJOR); hmv->version_minor = cpu_to_le16 (KSOCK_PROTO_V1_MINOR); @@ -488,16 +488,14 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) LNET_UNLOCK(); } - hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); - hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); - hdr->type = cpu_to_le32 (LNET_MSG_HELLO); + hdr->src_nid = cpu_to_le64 (hello->kshm_src_nid); + hdr->src_pid = cpu_to_le32 (hello->kshm_src_pid); + hdr->type = cpu_to_le32 (LNET_MSG_HELLO); hdr->payload_length = cpu_to_le32 (hello->kshm_nips * sizeof(__u32)); hdr->msg.hello.type = cpu_to_le32 (hello->kshm_ctype); hdr->msg.hello.incarnation = cpu_to_le64 (hello->kshm_src_incarnation); - rc = libcfs_sock_write(sock, hdr, sizeof(*hdr), - lnet_acceptor_timeout()); - + rc = lnet_sock_write(sock, hdr, sizeof(*hdr), lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -511,9 +509,9 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello) hello->kshm_ips[i] = __cpu_to_le32 (hello->kshm_ips[i]); } - rc = libcfs_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); + rc = lnet_sock_write(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), + lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, @@ -529,7 +527,7 @@ static int ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) { struct socket *sock = conn->ksnc_sock; - int rc; + int rc; hello->kshm_magic = LNET_PROTO_MAGIC; hello->kshm_version = conn->ksnc_proto->pro_version; @@ -544,9 +542,8 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) LNET_UNLOCK(); } - rc = libcfs_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), - lnet_acceptor_timeout()); - + rc = lnet_sock_write(sock, hello, offsetof(ksock_hello_msg_t, kshm_ips), + lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n", rc, &conn->ksnc_ipaddr, conn->ksnc_port); @@ -556,9 +553,9 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello) if (hello->kshm_nips == 0) return 0; - rc = libcfs_sock_write(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), - lnet_acceptor_timeout()); + rc = lnet_sock_write(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), + lnet_acceptor_timeout()); if (rc != 0) { CNETERR("Error %d sending HELLO payload (%d) to %pI4h/%d\n", rc, hello->kshm_nips, @@ -572,10 +569,10 @@ static int ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { - struct socket *sock = conn->ksnc_sock; - lnet_hdr_t *hdr; - int rc; - int i; + struct socket *sock = conn->ksnc_sock; + lnet_hdr_t *hdr; + int rc; + int i; LIBCFS_ALLOC(hdr, sizeof(*hdr)); if (hdr == NULL) { @@ -583,9 +580,9 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, return -ENOMEM; } - rc = libcfs_sock_read(sock, &hdr->src_nid, - sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), - timeout); + rc = lnet_sock_read(sock, &hdr->src_nid, + sizeof(*hdr) - offsetof(lnet_hdr_t, src_nid), + timeout); if (rc != 0) { CERROR("Error %d reading rest of HELLO hdr from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -602,12 +599,12 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, goto out; } - hello->kshm_src_nid = le64_to_cpu(hdr->src_nid); - hello->kshm_src_pid = le32_to_cpu(hdr->src_pid); + hello->kshm_src_nid = le64_to_cpu(hdr->src_nid); + hello->kshm_src_pid = le32_to_cpu(hdr->src_pid); hello->kshm_src_incarnation = le64_to_cpu(hdr->msg.hello.incarnation); - hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type); - hello->kshm_nips = le32_to_cpu(hdr->payload_length) / - sizeof(__u32); + hello->kshm_ctype = le32_to_cpu(hdr->msg.hello.type); + hello->kshm_nips = le32_to_cpu(hdr->payload_length) / + sizeof(__u32); if (hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %pI4h\n", @@ -619,8 +616,8 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello, if (hello->kshm_nips == 0) goto out; - rc = libcfs_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); + rc = lnet_sock_read(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { CERROR("Error %d reading IPs from ip %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -647,19 +644,19 @@ out: static int ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout) { - struct socket *sock = conn->ksnc_sock; - int rc; - int i; + struct socket *sock = conn->ksnc_sock; + int rc; + int i; if (hello->kshm_magic == LNET_PROTO_MAGIC) conn->ksnc_flip = 0; else conn->ksnc_flip = 1; - rc = libcfs_sock_read(sock, &hello->kshm_src_nid, - offsetof(ksock_hello_msg_t, kshm_ips) - - offsetof(ksock_hello_msg_t, kshm_src_nid), - timeout); + rc = lnet_sock_read(sock, &hello->kshm_src_nid, + offsetof(ksock_hello_msg_t, kshm_ips) - + offsetof(ksock_hello_msg_t, kshm_src_nid), + timeout); if (rc != 0) { CERROR("Error %d reading HELLO from %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -687,8 +684,8 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout if (hello->kshm_nips == 0) return 0; - rc = libcfs_sock_read(sock, hello->kshm_ips, - hello->kshm_nips * sizeof(__u32), timeout); + rc = lnet_sock_read(sock, hello->kshm_ips, + hello->kshm_nips * sizeof(__u32), timeout); if (rc != 0) { CERROR("Error %d reading IPs from ip %pI4h\n", rc, &conn->ksnc_ipaddr); @@ -746,9 +743,9 @@ ksocknal_pack_msg_v2(ksock_tx_t *tx) static void ksocknal_unpack_msg_v1(ksock_msg_t *msg) { - msg->ksm_csum = 0; - msg->ksm_type = KSOCK_MSG_LNET; - msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; + msg->ksm_csum = 0; + msg->ksm_type = KSOCK_MSG_LNET; + msg->ksm_zc_cookies[0] = msg->ksm_zc_cookies[1] = 0; } static void @@ -758,40 +755,40 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg) } ksock_proto_t ksocknal_protocol_v1x = { - .pro_version = KSOCK_PROTO_V1, - .pro_send_hello = ksocknal_send_hello_v1, - .pro_recv_hello = ksocknal_recv_hello_v1, - .pro_pack = ksocknal_pack_msg_v1, - .pro_unpack = ksocknal_unpack_msg_v1, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1, - .pro_handle_zcreq = NULL, - .pro_handle_zcack = NULL, - .pro_queue_tx_zcack = NULL, - .pro_match_tx = ksocknal_match_tx + .pro_version = KSOCK_PROTO_V1, + .pro_send_hello = ksocknal_send_hello_v1, + .pro_recv_hello = ksocknal_recv_hello_v1, + .pro_pack = ksocknal_pack_msg_v1, + .pro_unpack = ksocknal_unpack_msg_v1, + .pro_queue_tx_msg = ksocknal_queue_tx_msg_v1, + .pro_handle_zcreq = NULL, + .pro_handle_zcack = NULL, + .pro_queue_tx_zcack = NULL, + .pro_match_tx = ksocknal_match_tx }; ksock_proto_t ksocknal_protocol_v2x = { - .pro_version = KSOCK_PROTO_V2, - .pro_send_hello = ksocknal_send_hello_v2, - .pro_recv_hello = ksocknal_recv_hello_v2, - .pro_pack = ksocknal_pack_msg_v2, - .pro_unpack = ksocknal_unpack_msg_v2, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, - .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2, - .pro_handle_zcreq = ksocknal_handle_zcreq, - .pro_handle_zcack = ksocknal_handle_zcack, - .pro_match_tx = ksocknal_match_tx + .pro_version = KSOCK_PROTO_V2, + .pro_send_hello = ksocknal_send_hello_v2, + .pro_recv_hello = ksocknal_recv_hello_v2, + .pro_pack = ksocknal_pack_msg_v2, + .pro_unpack = ksocknal_unpack_msg_v2, + .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, + .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v2, + .pro_handle_zcreq = ksocknal_handle_zcreq, + .pro_handle_zcack = ksocknal_handle_zcack, + .pro_match_tx = ksocknal_match_tx }; ksock_proto_t ksocknal_protocol_v3x = { - .pro_version = KSOCK_PROTO_V3, - .pro_send_hello = ksocknal_send_hello_v2, - .pro_recv_hello = ksocknal_recv_hello_v2, - .pro_pack = ksocknal_pack_msg_v2, - .pro_unpack = ksocknal_unpack_msg_v2, - .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, - .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3, - .pro_handle_zcreq = ksocknal_handle_zcreq, - .pro_handle_zcack = ksocknal_handle_zcack, - .pro_match_tx = ksocknal_match_tx_v3 + .pro_version = KSOCK_PROTO_V3, + .pro_send_hello = ksocknal_send_hello_v2, + .pro_recv_hello = ksocknal_recv_hello_v2, + .pro_pack = ksocknal_pack_msg_v2, + .pro_unpack = ksocknal_unpack_msg_v2, + .pro_queue_tx_msg = ksocknal_queue_tx_msg_v2, + .pro_queue_tx_zcack = ksocknal_queue_tx_zcack_v3, + .pro_handle_zcreq = ksocknal_handle_zcreq, + .pro_handle_zcack = ksocknal_handle_zcack, + .pro_match_tx = ksocknal_match_tx_v3 }; |