Skip to content

Commit 5574ef9

Browse files
committed
libxscale: Add support for qp management
This patch adds support for following qp management verbs: 1. create_qp 2. query_qp 3. modify_qp 4. destroy_qp Signed-off-by: Tian Xin <[email protected]> Signed-off-by: Wei Honggang <[email protected]> Signed-off-by: Zhao Qianwei <[email protected]> Signed-off-by: Li Qiang <[email protected]> Signed-off-by: Yan Lei <[email protected]>
1 parent 3448bf3 commit 5574ef9

File tree

5 files changed

+767
-10
lines changed

5 files changed

+767
-10
lines changed

providers/xscale/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ rdma_provider(xscale
22
xscale.c
33
verbs.c
44
cq.c
5+
qp.c
56
xsc_hsi.c
67
buf.c
78
)

providers/xscale/cq.c

Lines changed: 62 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -76,16 +76,6 @@ static inline u8 xsc_get_cqe_opcode(struct xsc_context *ctx,
7676
return xsc_msg_opcode[msg_opcode][cqe->type][cqe->with_immdt];
7777
}
7878

79-
struct xsc_qp *xsc_find_qp(struct xsc_context *ctx, u32 qpn)
80-
{
81-
int tind = qpn >> XSC_QP_TABLE_SHIFT;
82-
83-
if (ctx->qp_table[tind].refcnt)
84-
return ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK];
85-
else
86-
return NULL;
87-
}
88-
8979
static inline int get_qp_ctx(struct xsc_context *xctx,
9080
struct xsc_resource **cur_rsc,
9181
u32 qpn) ALWAYS_INLINE;
@@ -519,3 +509,65 @@ void xsc_free_cq_buf(struct xsc_context *ctx, struct xsc_buf *buf)
519509
{
520510
return xsc_free_buf(buf);
521511
}
512+
513+
void __xsc_cq_clean(struct xsc_cq *cq, u32 qpn)
514+
{
515+
u32 prod_index;
516+
int nfreed = 0;
517+
void *cqe, *dest;
518+
519+
if (!cq || cq->flags & XSC_CQ_FLAGS_DV_OWNED)
520+
return;
521+
xsc_dbg(to_xctx(cq->verbs_cq.cq_ex.context)->dbg_fp, XSC_DBG_CQ, "\n");
522+
523+
/*
524+
* First we need to find the current producer index, so we
525+
* know where to start cleaning from. It doesn't matter if HW
526+
* adds new entries after this loop -- the QP we're worried
527+
* about is already in RESET, so the new entries won't come
528+
* from our QP and therefore don't need to be checked.
529+
*/
530+
for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index);
531+
++prod_index)
532+
if (prod_index == cq->cons_index + cq->verbs_cq.cq_ex.cqe)
533+
break;
534+
535+
/*
536+
* Now sweep backwards through the CQ, removing CQ entries
537+
* that match our QP by copying older entries on top of them.
538+
*/
539+
while ((int)(--prod_index) - (int)cq->cons_index >= 0) {
540+
u16 qp_id_combined;
541+
u32 qp_id;
542+
543+
cqe = get_cqe(cq, prod_index & (cq->verbs_cq.cq_ex.cqe - 1));
544+
qp_id_combined = __le16_to_cpu(*(u16 *)((void *)cqe + 1));
545+
qp_id = qp_id_combined & 0x7fff;
546+
if (qpn == qp_id) {
547+
++nfreed;
548+
} else if (nfreed) {
549+
dest = get_cqe(cq,
550+
(prod_index + nfreed) &
551+
(cq->verbs_cq.cq_ex.cqe - 1));
552+
memcpy(dest, cqe, cq->cqe_sz);
553+
}
554+
}
555+
556+
if (nfreed) {
557+
cq->cons_index += nfreed;
558+
/*
559+
* Make sure update of buffer contents is done before
560+
* updating consumer index.
561+
*/
562+
udma_to_device_barrier();
563+
update_cons_index(cq);
564+
}
565+
}
566+
567+
void xsc_cq_clean(struct xsc_cq *cq, uint32_t qpn)
568+
{
569+
xsc_spin_lock(&cq->lock);
570+
__xsc_cq_clean(cq, qpn);
571+
xsc_spin_unlock(&cq->lock);
572+
}
573+

providers/xscale/qp.c

Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd.
4+
* All rights reserved.
5+
*/
6+
7+
#include <config.h>
8+
9+
#include <stdlib.h>
10+
#include <pthread.h>
11+
#include <string.h>
12+
#include <errno.h>
13+
#include <stdio.h>
14+
#include <util/compiler.h>
15+
16+
#include "xscale.h"
17+
#include "xsc_hsi.h"
18+
19+
struct xsc_qp *xsc_find_qp(struct xsc_context *ctx, uint32_t qpn)
20+
{
21+
int tind = qpn >> XSC_QP_TABLE_SHIFT;
22+
23+
if (ctx->qp_table[tind].refcnt)
24+
return ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK];
25+
else
26+
return NULL;
27+
}
28+
29+
int xsc_store_qp(struct xsc_context *ctx, uint32_t qpn, struct xsc_qp *qp)
30+
{
31+
int tind = qpn >> XSC_QP_TABLE_SHIFT;
32+
33+
if (!ctx->qp_table[tind].refcnt) {
34+
ctx->qp_table[tind].table =
35+
calloc(XSC_QP_TABLE_MASK + 1, sizeof(struct xsc_qp *));
36+
if (!ctx->qp_table[tind].table)
37+
return -1;
38+
}
39+
40+
++ctx->qp_table[tind].refcnt;
41+
ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK] = qp;
42+
return 0;
43+
}
44+
45+
void xsc_clear_qp(struct xsc_context *ctx, uint32_t qpn)
46+
{
47+
int tind = qpn >> XSC_QP_TABLE_SHIFT;
48+
49+
if (!--ctx->qp_table[tind].refcnt)
50+
free(ctx->qp_table[tind].table);
51+
else
52+
ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK] = NULL;
53+
}
54+
55+
int xsc_err_state_qp(struct ibv_qp *qp, enum ibv_qp_state cur_state,
56+
enum ibv_qp_state state)
57+
{
58+
struct xsc_err_state_qp_node *tmp, *err_rq_node, *err_sq_node;
59+
struct xsc_qp *xqp = to_xqp(qp);
60+
int ret = 0;
61+
62+
xsc_dbg(to_xctx(qp->context)->dbg_fp, XSC_DBG_QP,
63+
"modify qp: qpid %d, cur_qp_state %d, qp_state %d\n",
64+
xqp->rsc.rsn, cur_state, state);
65+
if (cur_state == IBV_QPS_ERR && state != IBV_QPS_ERR) {
66+
if (qp->recv_cq) {
67+
list_for_each_safe(&to_xcq(qp->recv_cq)->err_state_qp_list,
68+
err_rq_node, tmp, entry) {
69+
if (err_rq_node->qp_id == xqp->rsc.rsn) {
70+
list_del(&err_rq_node->entry);
71+
free(err_rq_node);
72+
}
73+
}
74+
}
75+
76+
if (qp->send_cq) {
77+
list_for_each_safe(&to_xcq(qp->send_cq)->err_state_qp_list,
78+
err_sq_node, tmp, entry) {
79+
if (err_sq_node->qp_id == xqp->rsc.rsn) {
80+
list_del(&err_sq_node->entry);
81+
free(err_sq_node);
82+
}
83+
}
84+
}
85+
return ret;
86+
}
87+
88+
if (cur_state != IBV_QPS_ERR && state == IBV_QPS_ERR) {
89+
if (qp->recv_cq) {
90+
err_rq_node = calloc(1, sizeof(*err_rq_node));
91+
if (!err_rq_node)
92+
return ENOMEM;
93+
err_rq_node->qp_id = xqp->rsc.rsn;
94+
err_rq_node->is_sq = false;
95+
list_add_tail(&to_xcq(qp->recv_cq)->err_state_qp_list,
96+
&err_rq_node->entry);
97+
}
98+
99+
if (qp->send_cq) {
100+
err_sq_node = calloc(1, sizeof(*err_sq_node));
101+
if (!err_sq_node)
102+
return ENOMEM;
103+
err_sq_node->qp_id = xqp->rsc.rsn;
104+
err_sq_node->is_sq = true;
105+
list_add_tail(&to_xcq(qp->send_cq)->err_state_qp_list,
106+
&err_sq_node->entry);
107+
}
108+
}
109+
return ret;
110+
}

0 commit comments

Comments
 (0)