summaryrefslogtreecommitdiff
path: root/drivers/staging/lustre/lnet/klnds/o2iblnd
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-01-20 14:01:31 -0300
commitb4b7ff4b08e691656c9d77c758fc355833128ac0 (patch)
tree82fcb00e6b918026dc9f2d1f05ed8eee83874cc0 /drivers/staging/lustre/lnet/klnds/o2iblnd
parent35acfa0fc609f2a2cd95cef4a6a9c3a5c38f1778 (diff)
Linux-libre 4.4-gnupck-4.4-gnu
Diffstat (limited to 'drivers/staging/lustre/lnet/klnds/o2iblnd')
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c44
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h293
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c100
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c1
4 files changed, 205 insertions, 233 deletions
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index c29d2ced2..7c730e3f7 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -2154,23 +2154,23 @@ static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
if (rc != 0)
return rc;
- LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs table\n");
- return -ENOMEM;
- }
+ LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
+ if (hdev->ibh_mrs == NULL) {
+ CERROR("Failed to allocate MRs table\n");
+ return -ENOMEM;
+ }
- hdev->ibh_mrs[0] = NULL;
- hdev->ibh_nmrs = 1;
+ hdev->ibh_mrs[0] = NULL;
+ hdev->ibh_nmrs = 1;
- mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
- if (IS_ERR(mr)) {
- CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
- kiblnd_hdev_cleanup_mrs(hdev);
- return PTR_ERR(mr);
- }
+ mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
+ if (IS_ERR(mr)) {
+ CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
+ kiblnd_hdev_cleanup_mrs(hdev);
+ return PTR_ERR(mr);
+ }
- hdev->ibh_mrs[0] = mr;
+ hdev->ibh_mrs[0] = mr;
return 0;
}
@@ -2228,13 +2228,10 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
return rc;
}
- if (dev->ibd_hdev->ibh_ibdev == cmid->device) {
- /* don't need device failover */
- rdma_destroy_id(cmid);
- return 0;
- }
+ rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
+ rdma_destroy_id(cmid);
- return 1;
+ return rc;
}
int kiblnd_dev_failover(kib_dev_t *dev)
@@ -2752,7 +2749,7 @@ int kiblnd_startup(lnet_ni_t *ni)
char *ifname;
kib_dev_t *ibdev = NULL;
kib_net_t *net;
- struct timeval tv;
+ struct timespec64 tv;
unsigned long flags;
int rc;
int newdev;
@@ -2770,8 +2767,9 @@ int kiblnd_startup(lnet_ni_t *ni)
if (net == NULL)
goto net_failed;
- do_gettimeofday(&tv);
- net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ ktime_get_real_ts64(&tv);
+ net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
+ tv.tv_nsec / NSEC_PER_USEC;
ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index f4b6c33ac..263db37de 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -79,38 +79,33 @@
#define IBLND_N_SCHED_HIGH 4
typedef struct {
- int *kib_dev_failover; /* HCA failover */
- unsigned int *kib_service; /* IB service number */
- int *kib_min_reconnect_interval; /* first failed connection
- * retry... */
- int *kib_max_reconnect_interval; /* ...exponentially increasing
- * to this */
- int *kib_cksum; /* checksum kib_msg_t? */
- int *kib_timeout; /* comms timeout (seconds) */
- int *kib_keepalive; /* keepalive timeout (seconds) */
- int *kib_ntx; /* # tx descs */
- int *kib_credits; /* # concurrent sends */
- int *kib_peertxcredits; /* # concurrent sends to 1 peer */
- int *kib_peerrtrcredits; /* # per-peer router buffer
- * credits */
- int *kib_peercredits_hiw; /* # when eagerly to return
- * credits */
- int *kib_peertimeout; /* seconds to consider peer dead */
- char **kib_default_ipif; /* default IPoIB interface */
- int *kib_retry_count;
- int *kib_rnr_retry_count;
- int *kib_concurrent_sends; /* send work queue sizing */
- int *kib_ib_mtu; /* IB MTU */
- int *kib_map_on_demand; /* map-on-demand if RD has more
- * fragments than this value, 0
- * disable map-on-demand */
- int *kib_fmr_pool_size; /* # FMRs in pool */
- int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
- int *kib_fmr_cache; /* enable FMR pool cache? */
- int *kib_require_priv_port; /* accept only privileged ports */
- int *kib_use_priv_port; /* use privileged port for active
- * connect */
- int *kib_nscheds; /* # threads on each CPT */
+ int *kib_dev_failover; /* HCA failover */
+ unsigned int *kib_service; /* IB service number */
+ int *kib_min_reconnect_interval; /* first failed connection retry... */
+ int *kib_max_reconnect_interval; /* exponentially increasing to this */
+ int *kib_cksum; /* checksum kib_msg_t? */
+ int *kib_timeout; /* comms timeout (seconds) */
+ int *kib_keepalive; /* keepalive timeout (seconds) */
+ int *kib_ntx; /* # tx descs */
+ int *kib_credits; /* # concurrent sends */
+ int *kib_peertxcredits; /* # concurrent sends to 1 peer */
+ int *kib_peerrtrcredits; /* # per-peer router buffer credits */
+ int *kib_peercredits_hiw; /* # when eagerly to return credits */
+ int *kib_peertimeout; /* seconds to consider peer dead */
+ char **kib_default_ipif; /* default IPoIB interface */
+ int *kib_retry_count;
+ int *kib_rnr_retry_count;
+ int *kib_concurrent_sends; /* send work queue sizing */
+ int *kib_ib_mtu; /* IB MTU */
+ int *kib_map_on_demand; /* map-on-demand if RD has more */
+ /* fragments than this value, 0 */
+ /* disable map-on-demand */
+ int *kib_fmr_pool_size; /* # FMRs in pool */
+ int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
+ int *kib_fmr_cache; /* enable FMR pool cache? */
+ int *kib_require_priv_port; /* accept only privileged ports */
+ int *kib_use_priv_port; /* use privileged port for active connect */
+ int *kib_nscheds; /* # threads on each CPT */
} kib_tunables_t;
extern kib_tunables_t kiblnd_tunables;
@@ -128,7 +123,9 @@ extern kib_tunables_t kiblnd_tunables;
IBLND_CREDIT_HIGHWATER_V1 : \
*kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(cb, dev, ps, qpt)
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
+ cb, dev, \
+ ps, qpt)
static inline int
kiblnd_concurrent_sends_v1(void)
@@ -199,8 +196,7 @@ typedef struct {
unsigned long ibd_next_failover;
int ibd_failed_failover; /* # failover failures */
unsigned int ibd_failover; /* failover in progress */
- unsigned int ibd_can_failover; /* IPoIB interface is a bonding
- * master */
+ unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
struct list_head ibd_nets;
struct kib_hca_dev *ibd_hdev;
} kib_dev_t;
@@ -249,28 +245,26 @@ typedef struct kib_poolset {
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
struct list_head ps_pool_list; /* list of pools */
struct list_head ps_failed_pool_list;/* failed pool list */
- unsigned long ps_next_retry; /* time stamp for retry if
- * failed to allocate */
+ unsigned long ps_next_retry; /* time stamp for retry if */
+ /* failed to allocate */
int ps_increasing; /* is allocating new pool */
int ps_pool_size; /* new pool size */
int ps_cpt; /* CPT id */
kib_ps_pool_create_t ps_pool_create; /* create a new pool */
kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
- kib_ps_node_init_t ps_node_init; /* initialize new allocated
- * node */
+ kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
kib_ps_node_fini_t ps_node_fini; /* finalize node */
} kib_poolset_t;
typedef struct kib_pool {
- struct list_head po_list; /* chain on pool list */
- struct list_head po_free_list; /* pre-allocated node */
- kib_poolset_t *po_owner; /* pool_set of this pool */
- unsigned long po_deadline; /* deadline of this pool */
- int po_allocated; /* # of elements in use */
- int po_failed; /* pool is created on failed
- * HCA */
- int po_size; /* # of pre-allocated elements */
+ struct list_head po_list; /* chain on pool list */
+ struct list_head po_free_list; /* pre-allocated node */
+ kib_poolset_t *po_owner; /* pool_set of this pool */
+ unsigned long po_deadline; /* deadline of this pool */
+ int po_allocated; /* # of elements in use */
+ int po_failed; /* pool is created on failed HCA */
+ int po_size; /* # of pre-allocated elements */
} kib_pool_t;
typedef struct {
@@ -295,8 +289,8 @@ typedef struct {
int fps_pool_size;
int fps_flush_trigger;
int fps_increasing; /* is allocating new pool */
- unsigned long fps_next_retry; /* time stamp for retry if
- * failed to allocate */
+ unsigned long fps_next_retry; /* time stamp for retry if*/
+ /* failed to allocate */
} kib_fmr_poolset_t;
typedef struct {
@@ -344,31 +338,22 @@ struct kib_sched_info {
};
typedef struct {
- int kib_init; /* initialisation state */
- int kib_shutdown; /* shut down? */
- struct list_head kib_devs; /* IB devices extant */
- struct list_head kib_failed_devs; /* list head of failed
- * devices */
- wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
- atomic_t kib_nthreads; /* # live threads */
- rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn
- * ops */
- struct list_head *kib_peers; /* hash table of all my known
- * peers */
- int kib_peer_hash_size; /* size of kib_peers */
- void *kib_connd; /* the connd task
- * (serialisation assertions)
- */
- struct list_head kib_connd_conns; /* connections to
- * setup/teardown */
- struct list_head kib_connd_zombies; /* connections with zero
- * refcount */
- wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps
- * here */
- spinlock_t kib_connd_lock; /* serialise */
- struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
- struct kib_sched_info **kib_scheds; /* percpt data for schedulers
- */
+ int kib_init; /* initialisation state */
+ int kib_shutdown; /* shut down? */
+ struct list_head kib_devs; /* IB devices extant */
+ struct list_head kib_failed_devs; /* list head of failed devices */
+ wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
+ atomic_t kib_nthreads; /* # live threads */
+ rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
+ struct list_head *kib_peers; /* hash table of all my known peers */
+ int kib_peer_hash_size; /* size of kib_peers */
+ void *kib_connd; /* the connd task (serialisation assertions) */
+ struct list_head kib_connd_conns; /* connections to setup/teardown */
+ struct list_head kib_connd_zombies; /* connections with zero refcount */
+ wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
+ spinlock_t kib_connd_lock; /* serialise */
+ struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
+ struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
} kib_data_t;
#define IBLND_INIT_NOTHING 0
@@ -480,10 +465,10 @@ typedef struct {
#define IBLND_REJECT_FATAL 3 /* Anything else */
#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
-#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match
- * mine */
-#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't
- * match mine */
+#define IBLND_REJECT_RDMA_FRAGS 6 /* Fatal: peer's rdma frags can't match */
+ /* mine */
+#define IBLND_REJECT_MSG_QUEUE_SIZE 7 /* Fatal: peer's msg queue size can't */
+ /* match mine */
/***********************************************************************/
@@ -491,8 +476,7 @@ typedef struct kib_rx /* receive message */
{
struct list_head rx_list; /* queue for attention */
struct kib_conn *rx_conn; /* owning conn */
- int rx_nob; /* # bytes received (-1 while
- * posted) */
+ int rx_nob; /* # bytes received (-1 while posted) */
enum ib_wc_status rx_status; /* completion status */
kib_msg_t *rx_msg; /* message buffer (host vaddr) */
__u64 rx_msgaddr; /* message buffer (I/O addr) */
@@ -501,38 +485,35 @@ typedef struct kib_rx /* receive message */
struct ib_sge rx_sge; /* ...and its memory */
} kib_rx_t;
-#define IBLND_POSTRX_DONT_POST 0 /* don't post */
-#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
-#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
-#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give myself back 1 reserved
- * credit */
+#define IBLND_POSTRX_DONT_POST 0 /* don't post */
+#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
+#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
+#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
typedef struct kib_tx /* transmit message */
{
- struct list_head tx_list; /* queue on idle_txs ibc_tx_queue
- * etc. */
- kib_tx_pool_t *tx_pool; /* pool I'm from */
- struct kib_conn *tx_conn; /* owning conn */
- short tx_sending; /* # tx callbacks outstanding */
- short tx_queued; /* queued for sending */
- short tx_waiting; /* waiting for peer */
- int tx_status; /* LNET completion status */
- unsigned long tx_deadline; /* completion deadline */
- __u64 tx_cookie; /* completion cookie */
- lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on
- * completion */
- kib_msg_t *tx_msg; /* message buffer (host vaddr) */
- __u64 tx_msgaddr; /* message buffer (I/O addr) */
+ struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
+ kib_tx_pool_t *tx_pool; /* pool I'm from */
+ struct kib_conn *tx_conn; /* owning conn */
+ short tx_sending; /* # tx callbacks outstanding */
+ short tx_queued; /* queued for sending */
+ short tx_waiting; /* waiting for peer */
+ int tx_status; /* LNET completion status */
+ unsigned long tx_deadline; /* completion deadline */
+ __u64 tx_cookie; /* completion cookie */
+ lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
+ kib_msg_t *tx_msg; /* message buffer (host vaddr) */
+ __u64 tx_msgaddr; /* message buffer (I/O addr) */
DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
- int tx_nwrq; /* # send work items */
- struct ib_send_wr *tx_wrq; /* send work items... */
- struct ib_sge *tx_sge; /* ...and their memory */
- kib_rdma_desc_t *tx_rd; /* rdma descriptor */
- int tx_nfrags; /* # entries in... */
- struct scatterlist *tx_frags; /* dma_map_sg descriptor */
- __u64 *tx_pages; /* rdma phys page addrs */
- kib_fmr_t fmr; /* FMR */
- int tx_dmadir; /* dma direction */
+ int tx_nwrq; /* # send work items */
+ struct ib_rdma_wr *tx_wrq; /* send work items... */
+ struct ib_sge *tx_sge; /* ...and their memory */
+ kib_rdma_desc_t *tx_rd; /* rdma descriptor */
+ int tx_nfrags; /* # entries in... */
+ struct scatterlist *tx_frags; /* dma_map_sg descriptor */
+ __u64 *tx_pages; /* rdma phys page addrs */
+ kib_fmr_t fmr; /* FMR */
+ int tx_dmadir; /* dma direction */
} kib_tx_t;
typedef struct kib_connvars {
@@ -540,53 +521,44 @@ typedef struct kib_connvars {
} kib_connvars_t;
typedef struct kib_conn {
- struct kib_sched_info *ibc_sched; /* scheduler information */
- struct kib_peer *ibc_peer; /* owning peer */
- kib_hca_dev_t *ibc_hdev; /* HCA bound on */
- struct list_head ibc_list; /* stash on peer's conn
- * list */
- struct list_head ibc_sched_list; /* schedule for attention */
- __u16 ibc_version; /* version of connection */
- __u64 ibc_incarnation; /* which instance of the
- * peer */
- atomic_t ibc_refcount; /* # users */
- int ibc_state; /* what's happening */
- int ibc_nsends_posted; /* # uncompleted sends */
- int ibc_noops_posted; /* # uncompleted NOOPs */
- int ibc_credits; /* # credits I have */
+ struct kib_sched_info *ibc_sched; /* scheduler information */
+ struct kib_peer *ibc_peer; /* owning peer */
+ kib_hca_dev_t *ibc_hdev; /* HCA bound on */
+ struct list_head ibc_list; /* stash on peer's conn list */
+ struct list_head ibc_sched_list; /* schedule for attention */
+ __u16 ibc_version; /* version of connection */
+ __u64 ibc_incarnation; /* which instance of the peer */
+ atomic_t ibc_refcount; /* # users */
+ int ibc_state; /* what's happening */
+ int ibc_nsends_posted; /* # uncompleted sends */
+ int ibc_noops_posted; /* # uncompleted NOOPs */
+ int ibc_credits; /* # credits I have */
int ibc_outstanding_credits; /* # credits to return */
int ibc_reserved_credits; /* # ACK/DONE msg credits */
- int ibc_comms_error; /* set on comms error */
- unsigned int ibc_nrx:16; /* receive buffers owned */
- unsigned int ibc_scheduled:1; /* scheduled for attention
- */
- unsigned int ibc_ready:1; /* CQ callback fired */
- unsigned long ibc_last_send; /* time of last send */
- struct list_head ibc_connd_list; /* link chain for
- * kiblnd_check_conns only
- */
- struct list_head ibc_early_rxs; /* rxs completed before
- * ESTABLISHED */
- struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for
- * IBLND_MSG_VERSION_1 */
- struct list_head ibc_tx_queue; /* sends that need a credit
- */
- struct list_head ibc_tx_queue_nocred; /* sends that don't need a
- * credit */
- struct list_head ibc_tx_queue_rsrvd; /* sends that need to
- * reserve an ACK/DONE msg
- */
- struct list_head ibc_active_txs; /* active tx awaiting
- * completion */
- spinlock_t ibc_lock; /* serialise */
- kib_rx_t *ibc_rxs; /* the rx descs */
- kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
-
- struct rdma_cm_id *ibc_cmid; /* CM id */
- struct ib_cq *ibc_cq; /* completion queue */
-
- kib_connvars_t *ibc_connvars; /* in-progress connection
- * state */
+ int ibc_comms_error; /* set on comms error */
+ unsigned int ibc_nrx:16; /* receive buffers owned */
+ unsigned int ibc_scheduled:1; /* scheduled for attention */
+ unsigned int ibc_ready:1; /* CQ callback fired */
+ unsigned long ibc_last_send; /* time of last send */
+ struct list_head ibc_connd_list; /* link chain for */
+ /* kiblnd_check_conns only */
+ struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
+ struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for */
+ /* IBLND_MSG_VERSION_1 */
+ struct list_head ibc_tx_queue; /* sends that need a credit */
+ struct list_head ibc_tx_queue_nocred; /* sends that don't need a */
+ /* credit */
+ struct list_head ibc_tx_queue_rsrvd; /* sends that need to */
+ /* reserve an ACK/DONE msg */
+ struct list_head ibc_active_txs; /* active tx awaiting completion */
+ spinlock_t ibc_lock; /* serialise */
+ kib_rx_t *ibc_rxs; /* the rx descs */
+ kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
+
+ struct rdma_cm_id *ibc_cmid; /* CM id */
+ struct ib_cq *ibc_cq; /* completion queue */
+
+ kib_connvars_t *ibc_connvars; /* in-progress connection state */
} kib_conn_t;
#define IBLND_CONN_INIT 0 /* being initialised */
@@ -780,8 +752,8 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
return NULL;
}
-/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the
- * lowest bits of the work request id to stash the work item type. */
+/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
+/* lowest bits of the work request id to stash the work item type. */
#define IBLND_WID_TX 0
#define IBLND_WID_RDMA 1
@@ -878,7 +850,6 @@ kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
}
-
static inline __u64
kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
{
@@ -928,23 +899,19 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
return ib_sg_dma_len(dev, sg);
}
-/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly
- * right because OFED1.2 defines it as const, to use it we have to add
- * (void *) cast to overcome "const" */
+/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly */
+/* right because OFED1.2 defines it as const, to use it we have to add */
+/* (void *) cast to overcome "const" */
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-
struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
kib_rdma_desc_t *rd);
struct ib_mr *kiblnd_find_dma_mr(kib_hca_dev_t *hdev,
__u64 addr, __u64 size);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
- kib_rdma_desc_t *rd, int nfrags);
-void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index a23a6d956..260750354 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -40,6 +40,8 @@
#include "o2iblnd.h"
+static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+
static void
kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
{
@@ -178,24 +180,28 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
rx->rx_nob = -1; /* flag posted */
+ /* NB: need an extra reference after ib_post_recv because we don't
+ * own this rx (and rx::rx_conn) anymore, LU-5678.
+ */
+ kiblnd_conn_addref(conn);
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
- if (rc != 0) {
+ if (unlikely(rc != 0)) {
CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
rx->rx_nob = 0;
}
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
- return rc;
+ goto out;
- if (rc != 0) {
+ if (unlikely(rc != 0)) {
kiblnd_close_conn(conn, rc);
kiblnd_drop_rx(rx); /* No more posts for this rx */
- return rc;
+ goto out;
}
if (credit == IBLND_POSTRX_NO_CREDIT)
- return 0;
+ goto out;
spin_lock(&conn->ibc_lock);
if (credit == IBLND_POSTRX_PEER_CREDIT)
@@ -205,7 +211,9 @@ kiblnd_post_rx(kib_rx_t *rx, int credit)
spin_unlock(&conn->ibc_lock);
kiblnd_check_sends(conn);
- return 0;
+out:
+ kiblnd_conn_decref(conn);
+ return rc;
}
static kib_tx_t *
@@ -253,11 +261,10 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
}
if (tx->tx_status == 0) { /* success so far */
- if (status < 0) { /* failed? */
+ if (status < 0) /* failed? */
tx->tx_status = status;
- } else if (txtype == IBLND_MSG_GET_REQ) {
+ else if (txtype == IBLND_MSG_GET_REQ)
lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
- }
}
tx->tx_waiting = 0;
@@ -591,8 +598,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
return 0;
}
-void
-kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
{
kib_net_t *net = ni->ni_data;
@@ -610,9 +616,8 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
}
}
-int
-kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
- kib_rdma_desc_t *rd, int nfrags)
+static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+ int nfrags)
{
kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
kib_net_t *net = ni->ni_data;
@@ -650,7 +655,6 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
return -EINVAL;
}
-
static int
kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
unsigned int niov, struct kvec *iov, int offset, int nob)
@@ -834,7 +838,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- rc = ib_post_send(conn->ibc_cmid->qp, tx->tx_wrq, &bad_wrq);
+ rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &bad_wrq);
}
conn->ibc_last_send = jiffies;
@@ -1008,7 +1012,7 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
{
kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
- struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
+ struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
struct ib_mr *mr;
@@ -1027,12 +1031,12 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
memset(wrq, 0, sizeof(*wrq));
- wrq->next = NULL;
- wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
- wrq->sg_list = sge;
- wrq->num_sge = 1;
- wrq->opcode = IB_WR_SEND;
- wrq->send_flags = IB_SEND_SIGNALED;
+ wrq->wr.next = NULL;
+ wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
+ wrq->wr.sg_list = sge;
+ wrq->wr.num_sge = 1;
+ wrq->wr.opcode = IB_WR_SEND;
+ wrq->wr.send_flags = IB_SEND_SIGNALED;
tx->tx_nwrq++;
}
@@ -1044,7 +1048,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
kib_msg_t *ibmsg = tx->tx_msg;
kib_rdma_desc_t *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_send_wr *wrq = &tx->tx_wrq[0];
+ struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
int rc = resid;
int srcidx;
int dstidx;
@@ -1090,16 +1094,17 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
sge->length = wrknob;
wrq = &tx->tx_wrq[tx->tx_nwrq];
+ next = wrq + 1;
- wrq->next = wrq + 1;
- wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
- wrq->sg_list = sge;
- wrq->num_sge = 1;
- wrq->opcode = IB_WR_RDMA_WRITE;
- wrq->send_flags = 0;
+ wrq->wr.next = &next->wr;
+ wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
+ wrq->wr.sg_list = sge;
+ wrq->wr.num_sge = 1;
+ wrq->wr.opcode = IB_WR_RDMA_WRITE;
+ wrq->wr.send_flags = 0;
- wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
- wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+ wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
+ wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx);
srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
@@ -1422,6 +1427,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
kib_msg_t *ibmsg;
+ kib_rdma_desc_t *rd;
kib_tx_t *tx;
int nob;
int rc;
@@ -1465,16 +1471,14 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
}
ibmsg = tx->tx_msg;
-
+ rd = &ibmsg->ibm_u.get.ibgm_rd;
if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
- rc = kiblnd_setup_rd_iov(ni, tx,
- &ibmsg->ibm_u.get.ibgm_rd,
+ rc = kiblnd_setup_rd_iov(ni, tx, rd,
lntmsg->msg_md->md_niov,
lntmsg->msg_md->md_iov.iov,
0, lntmsg->msg_md->md_length);
else
- rc = kiblnd_setup_rd_kiov(ni, tx,
- &ibmsg->ibm_u.get.ibgm_rd,
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
lntmsg->msg_md->md_niov,
lntmsg->msg_md->md_iov.kiov,
0, lntmsg->msg_md->md_length);
@@ -1485,7 +1489,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
return -EIO;
}
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
+ nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
ibmsg->ibm_u.get.ibgm_hdr = *hdr;
@@ -1650,7 +1654,6 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
kib_msg_t *rxmsg = rx->rx_msg;
kib_conn_t *conn = rx->rx_conn;
kib_tx_t *tx;
- kib_msg_t *txmsg;
int nob;
int post_credit = IBLND_POSTRX_PEER_CREDIT;
int rc = 0;
@@ -1687,7 +1690,10 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
lnet_finalize(ni, lntmsg, 0);
break;
- case IBLND_MSG_PUT_REQ:
+ case IBLND_MSG_PUT_REQ: {
+ kib_msg_t *txmsg;
+ kib_rdma_desc_t *rd;
+
if (mlen == 0) {
lnet_finalize(ni, lntmsg, 0);
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
@@ -1705,13 +1711,12 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
}
txmsg = tx->tx_msg;
+ rd = &txmsg->ibm_u.putack.ibpam_rd;
if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx,
- &txmsg->ibm_u.putack.ibpam_rd,
+ rc = kiblnd_setup_rd_iov(ni, tx, rd,
niov, iov, offset, mlen);
else
- rc = kiblnd_setup_rd_kiov(ni, tx,
- &txmsg->ibm_u.putack.ibpam_rd,
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
niov, kiov, offset, mlen);
if (rc != 0) {
CERROR("Can't setup PUT sink for %s: %d\n",
@@ -1723,7 +1728,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
break;
}
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
+ nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
@@ -1736,6 +1741,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
/* reposted buffer reserved for PUT_DONE */
post_credit = IBLND_POSTRX_NO_CREDIT;
break;
+ }
case IBLND_MSG_GET_REQ:
if (lntmsg != NULL) {
@@ -2148,6 +2154,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
unsigned long flags;
int rc;
struct sockaddr_in *peer_addr;
+
LASSERT(!in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
@@ -2163,6 +2170,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (*kiblnd_tunables.kib_require_priv_port &&
ntohs(peer_addr->sin_port) >= PROT_SOCK) {
__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+
CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
&ip, ntohs(peer_addr->sin_port));
goto failed;
@@ -3227,7 +3235,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
* consuming my CQ I could be called after all completions have
* occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
* and this CQ is about to be destroyed so I NOOP. */
- kib_conn_t *conn = (kib_conn_t *)arg;
+ kib_conn_t *conn = arg;
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b3d1b5d62..1d4e7efb5 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -135,7 +135,6 @@ static int dev_failover;
module_param(dev_failover, int, 0444);
MODULE_PARM_DESC(dev_failover, "HCA failover for bonding (0 off, 1 on, other values reserved)");
-
static int require_privileged_port;
module_param(require_privileged_port, int, 0644);
MODULE_PARM_DESC(require_privileged_port, "require privileged port when accepting connection");