diff options
author | David Dillow <dillowda@ornl.gov> | 2010-10-08 18:48:14 (GMT) |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-10-25 05:14:08 (GMT) |
commit | 05a1d7504f836ee67e27f2488cb5b8126b51dbd4 (patch) | |
tree | 029c9e682a7070e1323bf71e0ca6977e54f5b64b /drivers/infiniband/ulp/srp | |
parent | bb12588a38e6db85e01dceadff7bc161fc92e7d2 (diff) | |
download | linux-fsl-qoriq-05a1d7504f836ee67e27f2488cb5b8126b51dbd4.tar.xz |
IB/srp: Eliminate two forward declarations
Signed-off-by: David Dillow <dillowda@ornl.gov>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/srp')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 142 |
1 files changed, 69 insertions, 73 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index a54eee9..d4c08d6 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -83,10 +83,6 @@ static void srp_remove_one(struct ib_device *device); static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); static void srp_send_completion(struct ib_cq *cq, void *target_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); -static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, - enum srp_iu_type iu_type); -static int __srp_post_send(struct srp_target_port *target, - struct srp_iu *iu, int len); static struct scsi_transport_template *ib_srp_transport_template; @@ -815,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, return len; } +/* + * Must be called with target->scsi_host->host_lock held to protect + * req_lim and tx_head. Lock cannot be dropped between call here and + * call to __srp_post_send(). + * + * Note: + * An upper limit for the number of allocated information units for each + * request type is: + * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues + * more than Scsi_Host.can_queue requests. + * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. + * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than + * one unanswered SRP request to an initiator. + */ +static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, + enum srp_iu_type iu_type) +{ + s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; + struct srp_iu *iu; + + srp_send_completion(target->send_cq, target); + + if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) + return NULL; + + /* Initiator responses to target requests do not consume credits */ + if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { + ++target->zero_req_lim; + return NULL; + } + + iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; + iu->type = iu_type; + return iu; +} + +/* + * Must be called with target->scsi_host->host_lock held to protect + * req_lim and tx_head. + */ +static int __srp_post_send(struct srp_target_port *target, + struct srp_iu *iu, int len) +{ + struct ib_sge list; + struct ib_send_wr wr, *bad_wr; + int ret = 0; + + list.addr = iu->dma; + list.length = len; + list.lkey = target->srp_host->srp_dev->mr->lkey; + + wr.next = NULL; + wr.wr_id = target->tx_head & SRP_SQ_MASK; + wr.sg_list = &list; + wr.num_sge = 1; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + + ret = ib_post_send(target->qp, &wr, &bad_wr); + + if (!ret) { + ++target->tx_head; + if (iu->type != SRP_IU_RSP) + --target->req_lim; + } + + return ret; +} + static int srp_post_recv(struct srp_target_port *target) { unsigned long flags; @@ -1058,75 +1123,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) } } -/* - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. Lock cannot be dropped between call here and - * call to __srp_post_send(). - * - * Note: - * An upper limit for the number of allocated information units for each - * request type is: - * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues - * more than Scsi_Host.can_queue requests. - * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE. - * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than - * one unanswered SRP request to an initiator. - */ -static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, - enum srp_iu_type iu_type) -{ - s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; - struct srp_iu *iu; - - srp_send_completion(target->send_cq, target); - - if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) - return NULL; - - /* Initiator responses to target requests do not consume credits */ - if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { - ++target->zero_req_lim; - return NULL; - } - - iu = target->tx_ring[target->tx_head & SRP_SQ_MASK]; - iu->type = iu_type; - return iu; -} - -/* - * Must be called with target->scsi_host->host_lock held to protect - * req_lim and tx_head. - */ -static int __srp_post_send(struct srp_target_port *target, - struct srp_iu *iu, int len) -{ - struct ib_sge list; - struct ib_send_wr wr, *bad_wr; - int ret = 0; - - list.addr = iu->dma; - list.length = len; - list.lkey = target->srp_host->srp_dev->mr->lkey; - - wr.next = NULL; - wr.wr_id = target->tx_head & SRP_SQ_MASK; - wr.sg_list = &list; - wr.num_sge = 1; - wr.opcode = IB_WR_SEND; - wr.send_flags = IB_SEND_SIGNALED; - - ret = ib_post_send(target->qp, &wr, &bad_wr); - - if (!ret) { - ++target->tx_head; - if (iu->type != SRP_IU_RSP) - --target->req_lim; - } - - return ret; -} - static int srp_queuecommand(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *)) { |