ib_mthca-y := mthca_main.o mthca_cmd.o mthca_profile.o mthca_reset.o \
mthca_allocator.o mthca_eq.o mthca_pd.o mthca_cq.o \
mthca_mr.o mthca_qp.o mthca_av.o mthca_mcg.o mthca_mad.o \
- mthca_provider.o mthca_memfree.o mthca_uar.o
+ mthca_provider.o mthca_memfree.o mthca_uar.o mthca_srq.o
CMD_SW2HW_SRQ = 0x35,
CMD_HW2SW_SRQ = 0x36,
CMD_QUERY_SRQ = 0x37,
+ CMD_ARM_SRQ = 0x40,
/* QP/EE commands */
CMD_RST2INIT_QPEE = 0x19,
mthca_dbg(dev, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz);
+ mthca_dbg(dev, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
+ dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz);
mthca_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz);
mthca_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n",
CMD_TIME_CLASS_A, status);
}
+int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status)
+{
+ return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
+ CMD_TIME_CLASS_A, status);
+}
+
+int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status)
+{
+ return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
+ CMD_HW2SW_SRQ,
+ CMD_TIME_CLASS_A, status);
+}
+
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
+{
+ return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
+ CMD_TIME_CLASS_B, status);
+}
+
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status)
int cq_num, u8 *status);
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
int cq_num, u8 *status);
+int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status);
+int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
+ int srq_num, u8 *status);
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num,
int is_ee, struct mthca_mailbox *mailbox, u32 optmask,
u8 *status);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn)
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+ struct mthca_srq *srq)
{
struct mthca_cq *cq;
struct mthca_cqe *cqe;
*/
while (prod_index > cq->cons_index) {
cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
- if (cqe->my_qpn == cpu_to_be32(qpn))
+ if (cqe->my_qpn == cpu_to_be32(qpn)) {
+ if (srq)
+ mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
++nfreed;
+ }
else if (nfreed)
memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
cq->ibcq.cqe),
>> wq->wqe_shift);
entry->wr_id = (*cur_qp)->wrid[wqe_index +
(*cur_qp)->rq.max];
+ } else if ((*cur_qp)->ibqp.srq) {
+ struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq);
+ u32 wqe = be32_to_cpu(cqe->wqe);
+ wq = NULL;
+ wqe_index = wqe >> srq->wqe_shift;
+ entry->wr_id = srq->wrid[wqe_index];
+ mthca_free_srq_wqe(srq, wqe);
} else {
wq = &(*cur_qp)->rq;
wqe_index = be32_to_cpu(cqe->wqe) >> wq->wqe_shift;
entry->wr_id = (*cur_qp)->wrid[wqe_index];
}
- if (wq->last_comp < wqe_index)
- wq->tail += wqe_index - wq->last_comp;
- else
- wq->tail += wqe_index + wq->max - wq->last_comp;
-
- wq->last_comp = wqe_index;
+ if (wq) {
+ if (wq->last_comp < wqe_index)
+ wq->tail += wqe_index - wq->last_comp;
+ else
+ wq->tail += wqe_index + wq->max - wq->last_comp;
- if (0)
- mthca_dbg(dev, "%s completion for QP %06x, index %d (nr %d)\n",
- is_send ? "Send" : "Receive",
- (*cur_qp)->qpn, wqe_index, wq->max);
+ wq->last_comp = wqe_index;
+ }
if (is_error) {
err = handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send,
struct mthca_icm_table *table;
};
+struct mthca_srq_table {
+ struct mthca_alloc alloc;
+ spinlock_t lock;
+ struct mthca_array srq;
+ struct mthca_icm_table *table;
+};
+
struct mthca_qp_table {
struct mthca_alloc alloc;
u32 rdb_base;
struct mthca_mr_table mr_table;
struct mthca_eq_table eq_table;
struct mthca_cq_table cq_table;
+ struct mthca_srq_table srq_table;
struct mthca_qp_table qp_table;
struct mthca_av_table av_table;
struct mthca_mcg_table mcg_table;
int mthca_init_mr_table(struct mthca_dev *dev);
int mthca_init_eq_table(struct mthca_dev *dev);
int mthca_init_cq_table(struct mthca_dev *dev);
+int mthca_init_srq_table(struct mthca_dev *dev);
int mthca_init_qp_table(struct mthca_dev *dev);
int mthca_init_av_table(struct mthca_dev *dev);
int mthca_init_mcg_table(struct mthca_dev *dev);
void mthca_cleanup_mr_table(struct mthca_dev *dev);
void mthca_cleanup_eq_table(struct mthca_dev *dev);
void mthca_cleanup_cq_table(struct mthca_dev *dev);
+void mthca_cleanup_srq_table(struct mthca_dev *dev);
void mthca_cleanup_qp_table(struct mthca_dev *dev);
void mthca_cleanup_av_table(struct mthca_dev *dev);
void mthca_cleanup_mcg_table(struct mthca_dev *dev);
void mthca_free_cq(struct mthca_dev *dev,
struct mthca_cq *cq);
void mthca_cq_event(struct mthca_dev *dev, u32 cqn);
-void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn);
+void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
+ struct mthca_srq *srq);
+
+int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
+ struct ib_srq_attr *attr, struct mthca_srq *srq);
+void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
+void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
+ enum ib_event_type event_type);
+void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
+int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
+int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr);
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type);
profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ profile.num_srq = dev_lim.max_srqs;
err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (err < 0)
}
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
- dev_lim->cqc_entry_sz,
- mdev->limits.num_cqs,
- mdev->limits.reserved_cqs, 0);
+ dev_lim->cqc_entry_sz,
+ mdev->limits.num_cqs,
+ mdev->limits.reserved_cqs, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_rdb;
}
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
+ mdev->srq_table.table =
+ mthca_alloc_icm_table(mdev, init_hca->srqc_base,
+ dev_lim->srq_entry_sz,
+ mdev->limits.num_srqs,
+ mdev->limits.reserved_srqs, 0);
+ if (!mdev->srq_table.table) {
+ mthca_err(mdev, "Failed to map SRQ context memory, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_unmap_cq;
+ }
+ }
+
/*
* It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
- goto err_unmap_cq;
+ goto err_unmap_srq;
}
return 0;
+err_unmap_srq:
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
+
err_unmap_cq:
mthca_free_icm_table(mdev, mdev->cq_table.table);
profile = default_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0;
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ profile.num_srq = dev_lim.max_srqs;
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if ((int) icm_size < 0) {
return 0;
err_free_icm:
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mthca_CLOSE_HCA(mdev, 0, &status);
if (mthca_is_memfree(mdev)) {
+ if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
+ mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
goto err_cmd_poll;
}
+ err = mthca_init_srq_table(dev);
+ if (err) {
+ mthca_err(dev, "Failed to initialize "
+ "shared receive queue table, aborting.\n");
+ goto err_cq_table_free;
+ }
+
err = mthca_init_qp_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"queue pair table, aborting.\n");
- goto err_cq_table_free;
+ goto err_srq_table_free;
}
err = mthca_init_av_table(dev);
err_qp_table_free:
mthca_cleanup_qp_table(dev);
+err_srq_table_free:
+ mthca_cleanup_srq_table(dev);
+
err_cq_table_free:
mthca_cleanup_cq_table(dev);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
+ mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
+ mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
profile[MTHCA_RES_UARC].size = request->uarc_size;
profile[MTHCA_RES_QP].num = request->num_qp;
+ profile[MTHCA_RES_SRQ].num = request->num_srq;
profile[MTHCA_RES_EQP].num = request->num_qp;
profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp;
profile[MTHCA_RES_CQ].num = request->num_cq;
struct mthca_profile {
int num_qp;
int rdb_per_qp;
+ int num_srq;
int num_cq;
int num_mcg;
int num_mpt;
return 0;
}
+static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mthca_create_srq ucmd;
+ struct mthca_ucontext *context = NULL;
+ struct mthca_srq *srq;
+ int err;
+
+ srq = kmalloc(sizeof *srq, GFP_KERNEL);
+ if (!srq)
+ return ERR_PTR(-ENOMEM);
+
+ if (pd->uobject) {
+ context = to_mucontext(pd->uobject->context);
+
+ if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+ return ERR_PTR(-EFAULT);
+
+ err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
+ context->db_tab, ucmd.db_index,
+ ucmd.db_page);
+
+ if (err)
+ goto err_free;
+
+ srq->mr.ibmr.lkey = ucmd.lkey;
+ srq->db_index = ucmd.db_index;
+ }
+
+ err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
+ &init_attr->attr, srq);
+
+ if (err && pd->uobject)
+ mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
+ context->db_tab, ucmd.db_index);
+
+ if (err)
+ goto err_free;
+
+ if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
+ mthca_free_srq(to_mdev(pd->device), srq);
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ return &srq->ibsrq;
+
+err_free:
+ kfree(srq);
+
+ return ERR_PTR(err);
+}
+
+static int mthca_destroy_srq(struct ib_srq *srq)
+{
+ struct mthca_ucontext *context;
+
+ if (srq->uobject) {
+ context = to_mucontext(srq->uobject->context);
+
+ mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
+ context->db_tab, to_msrq(srq)->db_index);
+ }
+
+ mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
+ kfree(srq);
+
+ return 0;
+}
+
static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
dev->ib_dev.create_ah = mthca_ah_create;
dev->ib_dev.destroy_ah = mthca_ah_destroy;
+
+ if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
+ dev->ib_dev.create_srq = mthca_create_srq;
+ dev->ib_dev.destroy_srq = mthca_destroy_srq;
+
+ if (mthca_is_memfree(dev))
+ dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
+ else
+ dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
+ }
+
dev->ib_dev.create_qp = mthca_create_qp;
dev->ib_dev.modify_qp = mthca_modify_qp;
dev->ib_dev.destroy_qp = mthca_destroy_qp;
wait_queue_head_t wait;
};
+struct mthca_srq {
+ struct ib_srq ibsrq;
+ spinlock_t lock;
+ atomic_t refcount;
+ int srqn;
+ int max;
+ int max_gs;
+ int wqe_shift;
+ int first_free;
+ int last_free;
+ u16 counter; /* Arbel only */
+ int db_index; /* Arbel only */
+ __be32 *db; /* Arbel only */
+ void *last;
+
+ int is_direct;
+ u64 *wrid;
+ union mthca_buf queue;
+ struct mthca_mr mr;
+
+ wait_queue_head_t wait;
+};
+
struct mthca_wq {
spinlock_t lock;
int max;
return container_of(ibcq, struct mthca_cq, ibcq);
}
+static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct mthca_srq, ibsrq);
+}
+
static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct mthca_qp, ibqp);
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
if (mthca_is_memfree(dev)) {
- qp_context->rq_size_stride =
- ((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4);
- qp_context->sq_size_stride =
- ((ffs(qp->sq.max) - 1) << 3) | (qp->sq.wqe_shift - 4);
+ if (qp->rq.max)
+ qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
+ qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
+
+ if (qp->sq.max)
+ qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
+ qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
}
/* leave arbel_sched_queue as 0 */
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
+ if (ibqp->srq)
+ qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
+
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
}
+ if (ibqp->srq)
+ qp_context->srqn = cpu_to_be32(1 << 24 |
+ to_msrq(ibqp->srq)->srqn);
+
err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
qp->qpn, 0, mailbox, 0, &status);
if (status) {
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
- mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
- mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
+ mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
mthca_free_memfree(dev, qp);
mthca_free_wqe_buf(dev, qp);
{
struct mthca_next_seg *next;
+ /*
+ * For SRQs, all WQEs generate a CQE, so we're always at the
+ * end of the doorbell chain.
+ */
+ if (qp->ibqp.srq) {
+ *new_wqe = 0;
+ return 0;
+ }
+
if (is_send)
next = get_send_wqe(qp, index);
else
--- /dev/null
+/*
+ * Copyright (c) 2005 Cisco Systems. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * $Id: mthca_srq.c 3047 2005-08-10 03:59:35Z roland $
+ */
+
+#include "mthca_dev.h"
+#include "mthca_cmd.h"
+#include "mthca_memfree.h"
+#include "mthca_wqe.h"
+
+enum {
+ MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
+};
+
+struct mthca_tavor_srq_context {
+ __be64 wqe_base_ds; /* low 6 bits is descriptor size */
+ __be32 state_pd;
+ __be32 lkey;
+ __be32 uar;
+ __be32 wqe_cnt;
+ u32 reserved[2];
+};
+
+struct mthca_arbel_srq_context {
+ __be32 state_logsize_srqn;
+ __be32 lkey;
+ __be32 db_index;
+ __be32 logstride_usrpage;
+ __be64 wqe_base;
+ __be32 eq_pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved1;
+ __be16 wqe_counter;
+ u32 reserved2[3];
+};
+
+static void *get_wqe(struct mthca_srq *srq, int n)
+{
+ if (srq->is_direct)
+ return srq->queue.direct.buf + (n << srq->wqe_shift);
+ else
+ return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
+ ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
+}
+
+/*
+ * Return a pointer to the location within a WQE that we're using as a
+ * link when the WQE is in the free list. We use an offset of 4
+ * because in the Tavor case, posting a WQE may overwrite the first
+ * four bytes of the previous WQE. The offset avoids corrupting our
+ * free list if the WQE has already completed and been put on the free
+ * list when we post the next WQE.
+ */
+static inline int *wqe_to_link(void *wqe)
+{
+ return (int *) (wqe + 4);
+}
+
+static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_srq *srq,
+ struct mthca_tavor_srq_context *context)
+{
+ memset(context, 0, sizeof *context);
+
+ context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
+ context->state_pd = cpu_to_be32(pd->pd_num);
+ context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
+
+ if (pd->ibpd.uobject)
+ context->uar =
+ cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ else
+ context->uar = cpu_to_be32(dev->driver_uar.index);
+}
+
+static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_srq *srq,
+ struct mthca_arbel_srq_context *context)
+{
+ int logsize;
+
+ memset(context, 0, sizeof *context);
+
+ logsize = long_log2(srq->max) + srq->wqe_shift;
+ context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
+ context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
+ context->db_index = cpu_to_be32(srq->db_index);
+ context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
+ if (pd->ibpd.uobject)
+ context->logstride_usrpage |=
+ cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
+ else
+ context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
+ context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
+}
+
+static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+ mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
+ srq->is_direct, &srq->mr);
+ kfree(srq->wrid);
+}
+
+static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
+ struct mthca_srq *srq)
+{
+ struct mthca_data_seg *scatter;
+ void *wqe;
+ int err;
+ int i;
+
+ if (pd->ibpd.uobject)
+ return 0;
+
+ srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
+ if (!srq->wrid)
+ return -ENOMEM;
+
+ err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
+ MTHCA_MAX_DIRECT_SRQ_SIZE,
+ &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
+ if (err) {
+ kfree(srq->wrid);
+ return err;
+ }
+
+ /*
+ * Now initialize the SRQ buffer so that all of the WQEs are
+ * linked into the list of free WQEs. In addition, set the
+ * scatter list L_Keys to the sentry value of 0x100.
+ */
+ for (i = 0; i < srq->max; ++i) {
+ wqe = get_wqe(srq, i);
+
+ *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1;
+
+ for (scatter = wqe + sizeof (struct mthca_next_seg);
+ (void *) scatter < wqe + (1 << srq->wqe_shift);
+ ++scatter)
+ scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ }
+
+ return 0;
+}
+
+int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
+ struct ib_srq_attr *attr, struct mthca_srq *srq)
+{
+ struct mthca_mailbox *mailbox;
+ u8 status;
+ int ds;
+ int err;
+
+ /* Sanity check SRQ size before proceeding */
+ if (attr->max_wr > 16 << 20 || attr->max_sge > 64)
+ return -EINVAL;
+
+ srq->max = attr->max_wr;
+ srq->max_gs = attr->max_sge;
+ srq->last = NULL;
+ srq->counter = 0;
+
+ if (mthca_is_memfree(dev))
+ srq->max = roundup_pow_of_two(srq->max + 1);
+
+ ds = min(64UL,
+ roundup_pow_of_two(sizeof (struct mthca_next_seg) +
+ srq->max_gs * sizeof (struct mthca_data_seg)));
+ srq->wqe_shift = long_log2(ds);
+
+ srq->srqn = mthca_alloc(&dev->srq_table.alloc);
+ if (srq->srqn == -1)
+ return -ENOMEM;
+
+ if (mthca_is_memfree(dev)) {
+ err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
+ if (err)
+ goto err_out;
+
+ if (!pd->ibpd.uobject) {
+ srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
+ srq->srqn, &srq->db);
+ if (srq->db_index < 0) {
+ err = -ENOMEM;
+ goto err_out_icm;
+ }
+ }
+ }
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto err_out_db;
+ }
+
+ err = mthca_alloc_srq_buf(dev, pd, srq);
+ if (err)
+ goto err_out_mailbox;
+
+ spin_lock_init(&srq->lock);
+ atomic_set(&srq->refcount, 1);
+ init_waitqueue_head(&srq->wait);
+
+ if (mthca_is_memfree(dev))
+ mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
+ else
+ mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
+
+ err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
+
+ if (err) {
+ mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
+ goto err_out_free_buf;
+ }
+ if (status) {
+ mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
+ status);
+ err = -EINVAL;
+ goto err_out_free_buf;
+ }
+
+ spin_lock_irq(&dev->srq_table.lock);
+ if (mthca_array_set(&dev->srq_table.srq,
+ srq->srqn & (dev->limits.num_srqs - 1),
+ srq)) {
+ spin_unlock_irq(&dev->srq_table.lock);
+ goto err_out_free_srq;
+ }
+ spin_unlock_irq(&dev->srq_table.lock);
+
+ mthca_free_mailbox(dev, mailbox);
+
+ srq->first_free = 0;
+ srq->last_free = srq->max - 1;
+
+ return 0;
+
+err_out_free_srq:
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ if (err)
+ mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
+ else if (status)
+ mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
+
+err_out_free_buf:
+ if (!pd->ibpd.uobject)
+ mthca_free_srq_buf(dev, srq);
+
+err_out_mailbox:
+ mthca_free_mailbox(dev, mailbox);
+
+err_out_db:
+ if (!pd->ibpd.uobject && mthca_is_memfree(dev))
+ mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
+
+err_out_icm:
+ mthca_table_put(dev, dev->srq_table.table, srq->srqn);
+
+err_out:
+ mthca_free(&dev->srq_table.alloc, srq->srqn);
+
+ return err;
+}
+
+void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
+{
+ struct mthca_mailbox *mailbox;
+ int err;
+ u8 status;
+
+ mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
+ if (IS_ERR(mailbox)) {
+ mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
+ return;
+ }
+
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ if (err)
+ mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
+ else if (status)
+ mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
+
+ spin_lock_irq(&dev->srq_table.lock);
+ mthca_array_clear(&dev->srq_table.srq,
+ srq->srqn & (dev->limits.num_srqs - 1));
+ spin_unlock_irq(&dev->srq_table.lock);
+
+ atomic_dec(&srq->refcount);
+ wait_event(srq->wait, !atomic_read(&srq->refcount));
+
+ if (!srq->ibsrq.uobject) {
+ mthca_free_srq_buf(dev, srq);
+ if (mthca_is_memfree(dev))
+ mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
+ }
+
+ mthca_table_put(dev, dev->srq_table.table, srq->srqn);
+ mthca_free(&dev->srq_table.alloc, srq->srqn);
+ mthca_free_mailbox(dev, mailbox);
+}
+
+void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
+ enum ib_event_type event_type)
+{
+ struct mthca_srq *srq;
+ struct ib_event event;
+
+ spin_lock(&dev->srq_table.lock);
+ srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
+ if (srq)
+ atomic_inc(&srq->refcount);
+ spin_unlock(&dev->srq_table.lock);
+
+ if (!srq) {
+ mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
+ return;
+ }
+
+ if (!srq->ibsrq.event_handler)
+ goto out;
+
+ event.device = &dev->ib_dev;
+ event.event = event_type;
+ event.element.srq = &srq->ibsrq;
+ srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
+
+out:
+ if (atomic_dec_and_test(&srq->refcount))
+ wake_up(&srq->wait);
+}
+
+/*
+ * This function must be called with IRQs disabled.
+ */
+void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
+{
+ int ind;
+
+ ind = wqe_addr >> srq->wqe_shift;
+
+ spin_lock(&srq->lock);
+
+ if (likely(srq->first_free >= 0))
+ *wqe_to_link(get_wqe(srq, srq->last_free)) = ind;
+ else
+ srq->first_free = ind;
+
+ *wqe_to_link(get_wqe(srq, ind)) = -1;
+ srq->last_free = ind;
+
+ spin_unlock(&srq->lock);
+}
+
+int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ unsigned long flags;
+ int err = 0;
+ int first_ind;
+ int ind;
+ int next_ind;
+ int nreq;
+ int i;
+ void *wqe;
+ void *prev_wqe;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ first_ind = srq->first_free;
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ ind = srq->first_free;
+
+ if (ind < 0) {
+ mthca_err(dev, "SRQ %06x full\n", srq->srqn);
+ err = -ENOMEM;
+ *bad_wr = wr;
+ return nreq;
+ }
+
+ wqe = get_wqe(srq, ind);
+ next_ind = *wqe_to_link(wqe);
+ prev_wqe = srq->last;
+ srq->last = wqe;
+
+ ((struct mthca_next_seg *) wqe)->nda_op = 0;
+ ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+ /* flags field will always remain 0 */
+
+ wqe += sizeof (struct mthca_next_seg);
+
+ if (unlikely(wr->num_sge > srq->max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ srq->last = prev_wqe;
+ return nreq;
+ }
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ ((struct mthca_data_seg *) wqe)->byte_count =
+ cpu_to_be32(wr->sg_list[i].length);
+ ((struct mthca_data_seg *) wqe)->lkey =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ ((struct mthca_data_seg *) wqe)->addr =
+ cpu_to_be64(wr->sg_list[i].addr);
+ wqe += sizeof (struct mthca_data_seg);
+ }
+
+ if (i < srq->max_gs) {
+ ((struct mthca_data_seg *) wqe)->byte_count = 0;
+ ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ ((struct mthca_data_seg *) wqe)->addr = 0;
+ }
+
+ if (likely(prev_wqe)) {
+ ((struct mthca_next_seg *) prev_wqe)->nda_op =
+ cpu_to_be32((ind << srq->wqe_shift) | 1);
+ wmb();
+ ((struct mthca_next_seg *) prev_wqe)->ee_nds =
+ cpu_to_be32(MTHCA_NEXT_DBD);
+ }
+
+ srq->wrid[ind] = wr->wr_id;
+ srq->first_free = next_ind;
+ }
+
+ return nreq;
+
+ if (likely(nreq)) {
+ __be32 doorbell[2];
+
+ doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
+ doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
+
+ /*
+ * Make sure that descriptors are written before
+ * doorbell is rung.
+ */
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+ return err;
+}
+
+int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
+ struct ib_recv_wr **bad_wr)
+{
+ struct mthca_dev *dev = to_mdev(ibsrq->device);
+ struct mthca_srq *srq = to_msrq(ibsrq);
+ unsigned long flags;
+ int err = 0;
+ int ind;
+ int next_ind;
+ int nreq;
+ int i;
+ void *wqe;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ ind = srq->first_free;
+
+ if (ind < 0) {
+ mthca_err(dev, "SRQ %06x full\n", srq->srqn);
+ err = -ENOMEM;
+ *bad_wr = wr;
+ return nreq;
+ }
+
+ wqe = get_wqe(srq, ind);
+ next_ind = *wqe_to_link(wqe);
+
+ ((struct mthca_next_seg *) wqe)->nda_op =
+ cpu_to_be32((next_ind << srq->wqe_shift) | 1);
+ ((struct mthca_next_seg *) wqe)->ee_nds = 0;
+ /* flags field will always remain 0 */
+
+ wqe += sizeof (struct mthca_next_seg);
+
+ if (unlikely(wr->num_sge > srq->max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ return nreq;
+ }
+
+ for (i = 0; i < wr->num_sge; ++i) {
+ ((struct mthca_data_seg *) wqe)->byte_count =
+ cpu_to_be32(wr->sg_list[i].length);
+ ((struct mthca_data_seg *) wqe)->lkey =
+ cpu_to_be32(wr->sg_list[i].lkey);
+ ((struct mthca_data_seg *) wqe)->addr =
+ cpu_to_be64(wr->sg_list[i].addr);
+ wqe += sizeof (struct mthca_data_seg);
+ }
+
+ if (i < srq->max_gs) {
+ ((struct mthca_data_seg *) wqe)->byte_count = 0;
+ ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
+ ((struct mthca_data_seg *) wqe)->addr = 0;
+ }
+
+ srq->wrid[ind] = wr->wr_id;
+ srq->first_free = next_ind;
+ }
+
+ if (likely(nreq)) {
+ srq->counter += nreq;
+
+ /*
+ * Make sure that descriptors are written before
+ * we write doorbell record.
+ */
+ wmb();
+ *srq->db = cpu_to_be32(srq->counter);
+ }
+
+ spin_unlock_irqrestore(&srq->lock, flags);
+ return err;
+}
+
+int __devinit mthca_init_srq_table(struct mthca_dev *dev)
+{
+ int err;
+
+ if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
+ return 0;
+
+ spin_lock_init(&dev->srq_table.lock);
+
+ err = mthca_alloc_init(&dev->srq_table.alloc,
+ dev->limits.num_srqs,
+ dev->limits.num_srqs - 1,
+ dev->limits.reserved_srqs);
+ if (err)
+ return err;
+
+ err = mthca_array_init(&dev->srq_table.srq,
+ dev->limits.num_srqs);
+ if (err)
+ mthca_alloc_cleanup(&dev->srq_table.alloc);
+
+ return err;
+}
+
+void __devexit mthca_cleanup_srq_table(struct mthca_dev *dev)
+{
+ if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
+ return;
+
+ mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
+ mthca_alloc_cleanup(&dev->srq_table.alloc);
+}
__u32 reserved;
};
+struct mthca_create_srq {
+ __u32 lkey;
+ __u32 db_index;
+ __u64 db_page;
+};
+
+struct mthca_create_srq_resp {
+ __u32 srqn;
+ __u32 reserved;
+};
+
struct mthca_create_qp {
__u32 lkey;
__u32 reserved;