summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ehca/ehca_reqs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_reqs.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c100
1 files changed, 65 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index caec9dee09e1..94eed70fedf5 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -3,8 +3,9 @@
*
* post_send/recv, poll_cq, req_notify
*
- * Authors: Waleri Fomin <fomin@de.ibm.com>
- * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
+ * Waleri Fomin <fomin@de.ibm.com>
+ * Joachim Fenkes <fenkes@de.ibm.com>
* Reinhard Ernst <rernst@de.ibm.com>
*
* Copyright (c) 2005 IBM Corporation
@@ -78,7 +79,8 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
}
if (ehca_debug_level) {
- ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", ipz_rqueue);
+ ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
+ ipz_rqueue);
ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
}
@@ -98,7 +100,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
struct ib_sge *sge = send_wr->sg_list;
ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
- "send_flags=%x opcode=%x",idx, send_wr->wr_id,
+ "send_flags=%x opcode=%x", idx, send_wr->wr_id,
send_wr->num_sge, send_wr->send_flags,
send_wr->opcode);
if (mad_hdr) {
@@ -115,7 +117,7 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
mad_hdr->attr_mod);
}
for (j = 0; j < send_wr->num_sge; j++) {
- u8 *data = (u8 *) abs_to_virt(sge->addr);
+ u8 *data = (u8 *)abs_to_virt(sge->addr);
ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
"lkey=%x",
idx, j, data, sge->length, sge->lkey);
@@ -362,10 +364,10 @@ int ehca_post_send(struct ib_qp *qp,
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
- unsigned long spl_flags;
+ unsigned long flags;
/* LOCK the QUEUE */
- spin_lock_irqsave(&my_qp->spinlock_s, spl_flags);
+ spin_lock_irqsave(&my_qp->spinlock_s, flags);
/* loop processes list of send reqs */
for (cur_send_wr = send_wr; cur_send_wr != NULL;
@@ -406,26 +408,31 @@ int ehca_post_send(struct ib_qp *qp,
} /* eof for cur_send_wr */
post_send_exit0:
- /* UNLOCK the QUEUE */
- spin_unlock_irqrestore(&my_qp->spinlock_s, spl_flags);
iosync(); /* serialize GAL register access */
hipz_update_sqa(my_qp, wqe_cnt);
+ spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
return ret;
}
-int ehca_post_recv(struct ib_qp *qp,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
+static int internal_post_recv(struct ehca_qp *my_qp,
+ struct ib_device *dev,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
{
- struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
struct ib_recv_wr *cur_recv_wr;
struct ehca_wqe *wqe_p;
int wqe_cnt = 0;
int ret = 0;
- unsigned long spl_flags;
+ unsigned long flags;
+
+ if (unlikely(!HAS_RQ(my_qp))) {
+ ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
+ my_qp, my_qp->real_qp_num, my_qp->ext_type);
+ return -ENODEV;
+ }
/* LOCK the QUEUE */
- spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
+ spin_lock_irqsave(&my_qp->spinlock_r, flags);
/* loop processes list of send reqs */
for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
@@ -439,8 +446,8 @@ int ehca_post_recv(struct ib_qp *qp,
*bad_recv_wr = cur_recv_wr;
if (wqe_cnt == 0) {
ret = -ENOMEM;
- ehca_err(qp->device, "Too many posted WQEs "
- "qp_num=%x", qp->qp_num);
+ ehca_err(dev, "Too many posted WQEs "
+ "qp_num=%x", my_qp->real_qp_num);
}
goto post_recv_exit0;
}
@@ -455,23 +462,39 @@ int ehca_post_recv(struct ib_qp *qp,
*bad_recv_wr = cur_recv_wr;
if (wqe_cnt == 0) {
ret = -EINVAL;
- ehca_err(qp->device, "Could not write WQE "
- "qp_num=%x", qp->qp_num);
+ ehca_err(dev, "Could not write WQE "
+ "qp_num=%x", my_qp->real_qp_num);
}
goto post_recv_exit0;
}
wqe_cnt++;
- ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d",
- my_qp, qp->qp_num, wqe_cnt);
+ ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
+ my_qp, my_qp->real_qp_num, wqe_cnt);
} /* eof for cur_recv_wr */
post_recv_exit0:
- spin_unlock_irqrestore(&my_qp->spinlock_r, spl_flags);
iosync(); /* serialize GAL register access */
hipz_update_rqa(my_qp, wqe_cnt);
+ spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
return ret;
}
+int ehca_post_recv(struct ib_qp *qp,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
+ qp->device, recv_wr, bad_recv_wr);
+}
+
+int ehca_post_srq_recv(struct ib_srq *srq,
+ struct ib_recv_wr *recv_wr,
+ struct ib_recv_wr **bad_recv_wr)
+{
+ return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
+ srq->device, recv_wr, bad_recv_wr);
+}
+
/*
* ib_wc_opcode table converts ehca wc opcode to ib
* Since we use zero to indicate invalid opcode, the actual ib opcode must
@@ -494,6 +517,7 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
int ret = 0;
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
struct ehca_cqe *cqe;
+ struct ehca_qp *my_qp;
int cqe_count = 0;
poll_cq_one_read_cqe:
@@ -511,9 +535,11 @@ poll_cq_one_read_cqe:
cqe_count++;
if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
- struct ehca_qp *qp=ehca_cq_get_qp(my_cq, cqe->local_qp_number);
+ struct ehca_qp *qp;
int purgeflag;
- unsigned long spl_flags;
+ unsigned long flags;
+
+ qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
if (!qp) {
ehca_err(cq->device, "cq_num=%x qp_num=%x "
"could not find qp -> ignore cqe",
@@ -523,13 +549,13 @@ poll_cq_one_read_cqe:
/* ignore this purged cqe */
goto poll_cq_one_read_cqe;
}
- spin_lock_irqsave(&qp->spinlock_s, spl_flags);
+ spin_lock_irqsave(&qp->spinlock_s, flags);
purgeflag = qp->sqerr_purgeflag;
- spin_unlock_irqrestore(&qp->spinlock_s, spl_flags);
+ spin_unlock_irqrestore(&qp->spinlock_s, flags);
if (purgeflag) {
- ehca_dbg(cq->device, "Got CQE with purged bit qp_num=%x "
- "src_qp=%x",
+ ehca_dbg(cq->device,
+ "Got CQE with purged bit qp_num=%x src_qp=%x",
cqe->local_qp_number, cqe->remote_qp_number);
if (ehca_debug_level)
ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
@@ -545,7 +571,7 @@ poll_cq_one_read_cqe:
}
/* tracing cqe */
- if (ehca_debug_level) {
+ if (unlikely(ehca_debug_level)) {
ehca_dbg(cq->device,
"Received COMPLETION ehca_cq=%p cq_num=%x -----",
my_cq, my_cq->cq_number);
@@ -579,7 +605,11 @@ poll_cq_one_read_cqe:
} else
wc->status = IB_WC_SUCCESS;
- wc->qp = NULL;
+ read_lock(&ehca_qp_idr_lock);
+ my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
+ wc->qp = &my_qp->ib_qp;
+ read_unlock(&ehca_qp_idr_lock);
+
wc->byte_len = cqe->nr_bytes_transferred;
wc->pkey_index = cqe->pkey_index;
wc->slid = cqe->rlid;
@@ -589,7 +619,7 @@ poll_cq_one_read_cqe:
wc->imm_data = cpu_to_be32(cqe->immediate_data);
wc->sl = cqe->service_level;
- if (wc->status != IB_WC_SUCCESS)
+ if (unlikely(wc->status != IB_WC_SUCCESS))
ehca_dbg(cq->device,
"ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
"OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
@@ -610,7 +640,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
int nr;
struct ib_wc *current_wc = wc;
int ret = 0;
- unsigned long spl_flags;
+ unsigned long flags;
if (num_entries < 1) {
ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
@@ -619,14 +649,14 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
goto poll_cq_exit0;
}
- spin_lock_irqsave(&my_cq->spinlock, spl_flags);
+ spin_lock_irqsave(&my_cq->spinlock, flags);
for (nr = 0; nr < num_entries; nr++) {
ret = ehca_poll_cq_one(cq, current_wc);
if (ret)
break;
current_wc++;
} /* eof for nr */
- spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
+ spin_unlock_irqrestore(&my_cq->spinlock, flags);
if (ret == -EAGAIN || !ret)
ret = nr;
@@ -637,7 +667,6 @@ poll_cq_exit0:
int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
{
struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
- unsigned long spl_flags;
int ret = 0;
switch (notify_flags & IB_CQ_SOLICITED_MASK) {
@@ -652,6 +681,7 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
}
if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
+ unsigned long spl_flags;
spin_lock_irqsave(&my_cq->spinlock, spl_flags);
ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);