diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-10-20 00:10:27 -0300 |
commit | d0b2f91bede3bd5e3d24dd6803e56eee959c1797 (patch) | |
tree | 7fee4ab0509879c373c4f2cbd5b8a5be5b4041ee /include/rdma | |
parent | e914f8eb445e8f74b00303c19c2ffceaedd16a05 (diff) |
Linux-libre 4.8.2-gnupck-4.8.2-gnu
Diffstat (limited to 'include/rdma')
-rw-r--r-- | include/rdma/ib_sa.h | 13 | ||||
-rw-r--r-- | include/rdma/ib_verbs.h | 130 | ||||
-rw-r--r-- | include/rdma/opa_port_info.h | 16 | ||||
-rw-r--r-- | include/rdma/rdma_cm.h | 4 | ||||
-rw-r--r-- | include/rdma/rdma_vt.h | 7 | ||||
-rw-r--r-- | include/rdma/rdmavt_mr.h | 1 | ||||
-rw-r--r-- | include/rdma/rdmavt_qp.h | 92 |
7 files changed, 219 insertions, 44 deletions
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 384041669..5ee7aab95 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -94,6 +94,19 @@ enum ib_sa_selector { IB_SA_BEST = 3 }; +/* + * There are 4 types of join states: + * FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember. + * The order corresponds to JoinState bits in MCMemberRecord. + */ +enum ib_sa_mc_join_states { + FULLMEMBER_JOIN, + NONMEMBER_JOIN, + SENDONLY_NONMEBER_JOIN, + SENDONLY_FULLMEMBER_JOIN, + NUM_JOIN_MEMBERSHIP_TYPES, +}; + #define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT BIT(12) /* diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e694f02d4..e1f96737c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -562,6 +562,7 @@ enum ib_event_type { IB_EVENT_QP_LAST_WQE_REACHED, IB_EVENT_CLIENT_REREGISTER, IB_EVENT_GID_CHANGE, + IB_EVENT_WQ_FATAL, }; const char *__attribute_const__ ib_event_msg(enum ib_event_type event); @@ -572,6 +573,7 @@ struct ib_event { struct ib_cq *cq; struct ib_qp *qp; struct ib_srq *srq; + struct ib_wq *wq; u8 port_num; } element; enum ib_event_type event; @@ -1015,6 +1017,7 @@ struct ib_qp_init_attr { * Only needed for special QP types, or when using the RW API. */ u8 port_num; + struct ib_rwq_ind_table *rwq_ind_tbl; }; struct ib_qp_open_attr { @@ -1323,6 +1326,8 @@ struct ib_ucontext { struct list_head ah_list; struct list_head xrcd_list; struct list_head rule_list; + struct list_head wq_list; + struct list_head rwq_ind_tbl_list; int closing; struct pid *tgid; @@ -1428,6 +1433,63 @@ struct ib_srq { } ext; }; +enum ib_wq_type { + IB_WQT_RQ +}; + +enum ib_wq_state { + IB_WQS_RESET, + IB_WQS_RDY, + IB_WQS_ERR +}; + +struct ib_wq { + struct ib_device *device; + struct ib_uobject *uobject; + void *wq_context; + void (*event_handler)(struct ib_event *, void *); + struct ib_pd *pd; + struct ib_cq *cq; + u32 wq_num; + enum ib_wq_state state; + enum ib_wq_type wq_type; + atomic_t usecnt; +}; + +struct ib_wq_init_attr { + void *wq_context; + enum ib_wq_type wq_type; + u32 max_wr; + u32 max_sge; + struct ib_cq *cq; + void (*event_handler)(struct ib_event *, void *); +}; + +enum ib_wq_attr_mask { + IB_WQ_STATE = 1 << 0, + IB_WQ_CUR_STATE = 1 << 1, +}; + +struct ib_wq_attr { + enum ib_wq_state wq_state; + enum ib_wq_state curr_wq_state; +}; + +struct ib_rwq_ind_table { + struct ib_device *device; + struct ib_uobject *uobject; + atomic_t usecnt; + u32 ind_tbl_num; + u32 log_ind_tbl_size; + struct ib_wq **ind_tbl; +}; + +struct ib_rwq_ind_table_init_attr { + u32 log_ind_tbl_size; + /* Each entry is a pointer to Receive Work Queue */ + struct ib_wq **ind_tbl; +}; + /* * @max_write_sge: Maximum SGE elements per RDMA WRITE request. * @max_read_sge: Maximum SGE elements per RDMA READ request. @@ -1456,6 +1518,7 @@ struct ib_qp { u32 max_write_sge; u32 max_read_sge; enum ib_qp_type qp_type; + struct ib_rwq_ind_table *rwq_ind_tbl; }; struct ib_mr { @@ -1512,6 +1575,7 @@ enum ib_flow_spec_type { IB_FLOW_SPEC_IB = 0x22, /* L3 header*/ IB_FLOW_SPEC_IPV4 = 0x30, + IB_FLOW_SPEC_IPV6 = 0x31, /* L4 headers*/ IB_FLOW_SPEC_TCP = 0x40, IB_FLOW_SPEC_UDP = 0x41 @@ -1573,6 +1637,18 @@ struct ib_flow_spec_ipv4 { struct ib_flow_ipv4_filter mask; }; +struct ib_flow_ipv6_filter { + u8 src_ip[16]; + u8 dst_ip[16]; +}; + +struct ib_flow_spec_ipv6 { + enum ib_flow_spec_type type; + u16 size; + struct ib_flow_ipv6_filter val; + struct ib_flow_ipv6_filter mask; +}; + struct ib_flow_tcp_udp_filter { __be16 dst_port; __be16 src_port; @@ -1594,6 +1670,7 @@ union ib_flow_spec { struct ib_flow_spec_ib ib; struct ib_flow_spec_ipv4 ipv4; struct ib_flow_spec_tcp_udp tcp_udp; + struct ib_flow_spec_ipv6 ipv6; }; struct ib_flow_attr { @@ -1927,7 +2004,18 @@ struct ib_device { struct ifla_vf_stats *stats); int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid, int type); - + struct ib_wq * (*create_wq)(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr, + struct ib_udata *udata); + int (*destroy_wq)(struct ib_wq *wq); + int (*modify_wq)(struct ib_wq *wq, + struct ib_wq_attr *attr, + u32 wq_attr_mask, + struct ib_udata *udata); + struct ib_rwq_ind_table * (*create_rwq_ind_table)(struct ib_device *device, + struct ib_rwq_ind_table_init_attr *init_attr, + struct ib_udata *udata); + int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table); struct ib_dma_mapping_ops *dma_ops; struct module *owner; @@ -1962,6 +2050,7 @@ struct ib_device { * in fast paths. */ int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); + void (*get_dev_fw_str)(struct ib_device *, char *str, size_t str_len); }; struct ib_client { @@ -1997,6 +2086,8 @@ struct ib_client { struct ib_device *ib_alloc_device(size_t size); void ib_dealloc_device(struct ib_device *device); +void ib_get_device_fw_str(struct ib_device *device, char *str, size_t str_len); + int ib_register_device(struct ib_device *device, int (*port_callback)(struct ib_device *, u8, struct kobject *)); @@ -2024,22 +2115,17 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata, size_t len) { const void __user *p = udata->inbuf + offset; - bool ret = false; + bool ret; u8 *buf; if (len > USHRT_MAX) return false; - buf = kmalloc(len, GFP_KERNEL); - if (!buf) + buf = memdup_user(p, len); + if (IS_ERR(buf)) return false; - if (copy_from_user(buf, p, len)) - goto free; - ret = !memchr_inv(buf, 0, len); - -free: kfree(buf); return ret; } @@ -2825,19 +2911,19 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, void *cpu_addr, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long dma_attrs) { return dma_map_single_attrs(dev->dma_device, cpu_addr, size, - direction, attrs); + direction, dma_attrs); } static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, u64 addr, size_t size, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long dma_attrs) { return dma_unmap_single_attrs(dev->dma_device, addr, size, - direction, attrs); + direction, dma_attrs); } /** @@ -2912,17 +2998,18 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, static inline int ib_dma_map_sg_attrs(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long dma_attrs) { - return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); + return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, + dma_attrs); } static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction, - struct dma_attrs *attrs) + unsigned long dma_attrs) { - dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); + dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); } /** * ib_sg_dma_address - Return the DMA address from a scatter/gather entry @@ -3173,6 +3260,15 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, u16 pkey, const union ib_gid *gid, const struct sockaddr *addr); +struct ib_wq *ib_create_wq(struct ib_pd *pd, + struct ib_wq_init_attr *init_attr); +int ib_destroy_wq(struct ib_wq *wq); +int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, + u32 wq_attr_mask); +struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device, + struct ib_rwq_ind_table_init_attr* + wq_ind_table_init_attr); +int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table); int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset, unsigned int page_size); diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h index 2b95c2c33..9303e0e4f 100644 --- a/include/rdma/opa_port_info.h +++ b/include/rdma/opa_port_info.h @@ -33,11 +33,6 @@ #if !defined(OPA_PORT_INFO_H) #define OPA_PORT_INFO_H -/* Temporary until HFI driver is updated */ -#ifndef USE_PI_LED_ENABLE -#define USE_PI_LED_ENABLE 0 -#endif - #define OPA_PORT_LINK_MODE_NOP 0 /* No change */ #define OPA_PORT_LINK_MODE_OPA 4 /* Port mode is OPA */ @@ -274,23 +269,12 @@ enum port_info_field_masks { OPA_PI_MASK_MTU_CAP = 0x0F, }; -#if USE_PI_LED_ENABLE struct opa_port_states { u8 reserved; u8 ledenable_offlinereason; /* 1 res, 1 bit, 6 bits */ u8 reserved2; u8 portphysstate_portstate; /* 4 bits, 4 bits */ }; -#define PI_LED_ENABLE_SUP 1 -#else -struct opa_port_states { - u8 reserved; - u8 offline_reason; /* 2 res, 6 bits */ - u8 reserved2; - u8 portphysstate_portstate; /* 4 bits, 4 bits */ -}; -#define PI_LED_ENABLE_SUP 0 -#endif struct opa_port_state_info { struct opa_port_states port_states; diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index afe44fde7..81fb1d15e 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -333,11 +333,13 @@ int rdma_disconnect(struct rdma_cm_id *id); * address. * @id: Communication identifier associated with the request. * @addr: Multicast address identifying the group to join. + * @join_state: Multicast JoinState bitmap requested by port. + * Bitmap is based on IB_SA_MCMEMBER_REC_JOIN_STATE bits. * @context: User-defined context associated with the join request, returned * to the user through the private_data pointer in multicast events. */ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, - void *context); + u8 join_state, void *context); /** * rdma_leave_multicast - Leave the multicast group specified by the given diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h index 9c9a27d42..e31502107 100644 --- a/include/rdma/rdma_vt.h +++ b/include/rdma/rdma_vt.h @@ -158,6 +158,7 @@ struct rvt_driver_params { u32 max_mad_size; u8 qos_shift; u8 max_rdma_atomic; + u8 reserved_operations; }; /* Protection domain */ @@ -351,6 +352,9 @@ struct rvt_dev_info { /* Driver specific properties */ struct rvt_driver_params dparms; + /* post send table */ + const struct rvt_operation_params *post_parms; + struct rvt_mregion __rcu *dma_mr; struct rvt_lkey_table lkey_table; @@ -484,6 +488,9 @@ void rvt_unregister_device(struct rvt_dev_info *rvd); int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr); int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port, int port_index, u16 *pkey_table); +int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, + int access); +int rvt_invalidate_rkey(struct rvt_qp *qp, u32 rkey); int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge, u32 len, u64 vaddr, u32 rkey, int acc); int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd, diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h index 5edffdca8..6b3c6c8b6 100644 --- a/include/rdma/rdmavt_mr.h +++ b/include/rdma/rdmavt_mr.h @@ -81,6 +81,7 @@ struct rvt_mregion { u32 mapsz; /* size of the map array */ u8 page_shift; /* 0 - non unform/non powerof2 sizes */ u8 lkey_published; /* in global table */ + atomic_t lkey_invalid; /* true if current lkey is invalid */ struct completion comp; /* complete when refcount goes to zero */ atomic_t refcount; struct rvt_segarray *map[0]; /* the segments */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 6d23b8794..bd34d0b56 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -145,6 +145,12 @@ (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) /* + * Internal send flags + */ +#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START +#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) + +/* * Send work request queue entry. * The size of the sg_list is determined when the QP is created and stored * in qp->s_max_sge. @@ -216,23 +222,43 @@ struct rvt_mmap_info { * to send a RDMA read response or atomic operation. */ struct rvt_ack_entry { - u8 opcode; - u8 sent; + struct rvt_sge rdma_sge; + u64 atomic_data; u32 psn; u32 lpsn; - union { - struct rvt_sge rdma_sge; - u64 atomic_data; - }; + u8 opcode; + u8 sent; }; #define RC_QP_SCALING_INTERVAL 5 -/* - * Variables prefixed with s_ are for the requester (sender). - * Variables prefixed with r_ are for the responder (receiver). - * Variables prefixed with ack_ are for responder replies. +#define RVT_OPERATION_PRIV 0x00000001 +#define RVT_OPERATION_ATOMIC 0x00000002 +#define RVT_OPERATION_ATOMIC_SGE 0x00000004 +#define RVT_OPERATION_LOCAL 0x00000008 +#define RVT_OPERATION_USE_RESERVE 0x00000010 + +#define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) + +/** + * rvt_operation_params - op table entry + * @length - the length to copy into the swqe entry + * @qpt_support - a bit mask indicating QP type support + * @flags - RVT_OPERATION flags (see above) * + * This supports table driven post send so that + * the driver can have differing an potentially + * different sets of operations. + * + **/ + +struct rvt_operation_params { + size_t length; + u32 qpt_support; + u32 flags; +}; + +/* * Common variables are protected by both r_rq.lock and s_lock in that order * which only happens in modify_qp() or changing the QP 'state'. */ @@ -307,6 +333,7 @@ struct rvt_qp { u32 s_next_psn; /* PSN for next request */ u32 s_avail; /* number of entries avail */ u32 s_ssn; /* SSN of tail entry */ + atomic_t s_reserved_used; /* reserved entries in use */ spinlock_t s_lock ____cacheline_aligned_in_smp; u32 s_flags; @@ -343,6 +370,8 @@ struct rvt_qp { struct rvt_sge_state s_ack_rdma_sge; struct timer_list s_timer; + atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ + /* * This sge list MUST be last. Do not add anything below here. */ @@ -436,6 +465,49 @@ static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) rq->max_sge * sizeof(struct ib_sge)) * n); } +/** + * rvt_qp_wqe_reserve - reserve operation + * @qp - the rvt qp + * @wqe - the send wqe + * + * This routine used in post send to record + * a wqe relative reserved operation use. + */ +static inline void rvt_qp_wqe_reserve( + struct rvt_qp *qp, + struct rvt_swqe *wqe) +{ + wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; + atomic_inc(&qp->s_reserved_used); +} + +/** + * rvt_qp_wqe_unreserve - clean reserved operation + * @qp - the rvt qp + * @wqe - the send wqe + * + * This decrements the reserve use count. + * + * This call MUST precede the change to + * s_last to insure that post send sees a stable + * s_avail. + * + * An smp_mp__after_atomic() is used to insure + * the compiler does not juggle the order of the s_last + * ring index and the decrementing of s_reserved_used. + */ +static inline void rvt_qp_wqe_unreserve( + struct rvt_qp *qp, + struct rvt_swqe *wqe) +{ + if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { + wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; + atomic_dec(&qp->s_reserved_used); + /* insure no compiler re-order up to s_last change */ + smp_mb__after_atomic(); + } +} + extern const int ib_rvt_state_ops[]; struct rvt_dev_info; |