/* Get user space parameters */
uar = &to_hr_ucontext(context)->uar;
} else {
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
+ ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
+ if (ret)
+ goto err_cq;
+
+ hr_cq->set_ci_db = hr_cq->db.db_record;
+ *hr_cq->set_ci_db = 0;
+ }
+
/* Init mmt table and write buff address to mtt table */
ret = hns_roce_ib_alloc_cq_buf(hr_dev, &hr_cq->hr_buf,
cq_entries);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
- goto err_cq;
+ goto err_db;
}
uar = &hr_dev->priv_uar;
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
hr_cq->ib_cq.cqe);
+err_db:
+ if (!context && (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB))
+ hns_roce_free_db(hr_dev, &hr_cq->db);
+
err_cq:
kfree(hr_cq);
return ERR_PTR(ret);
/* Free the buff of stored cq */
hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf,
ib_cq->cqe);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
+ hns_roce_free_db(hr_dev, &hr_cq->db);
}
kfree(hr_cq);
static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
{
- struct hns_roce_v2_cq_db cq_db;
-
- cq_db.byte_4 = 0;
- cq_db.parameter = 0;
-
- roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_TAG_M,
- V2_CQ_DB_BYTE_4_TAG_S, hr_cq->cqn);
- roce_set_field(cq_db.byte_4, V2_CQ_DB_BYTE_4_CMD_M,
- V2_CQ_DB_BYTE_4_CMD_S, HNS_ROCE_V2_CQ_DB_PTR);
-
- roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CONS_IDX_M,
- V2_CQ_DB_PARAMETER_CONS_IDX_S,
- cons_index & ((hr_cq->cq_depth << 1) - 1));
- roce_set_field(cq_db.parameter, V2_CQ_DB_PARAMETER_CMD_SN_M,
- V2_CQ_DB_PARAMETER_CMD_SN_S, 1);
-
- hns_roce_write64_k((__be32 *)&cq_db, hr_cq->cq_db_l);
-
+ *hr_cq->set_ci_db = cons_index & 0xffffff;
}
static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,