summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/main.c102
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c25
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c20
4 files changed, 120 insertions, 35 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6ad0489..1b04df7 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -38,6 +38,9 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#if defined(CONFIG_X86)
+#include <asm/pat.h>
+#endif
#include <linux/sched.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_addr.h>
@@ -517,6 +520,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_TSO;
}
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs))
+ props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+
props->vendor_part_id = mdev->pdev->device;
props->hw_ver = mdev->pdev->revision;
@@ -1068,38 +1075,89 @@ static int get_index(unsigned long offset)
return get_arg(offset);
}
+static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
+{
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ return "WC";
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ return "best effort WC";
+ case MLX5_IB_MMAP_NC_PAGE:
+ return "NC";
+ default:
+ return NULL;
+ }
+}
+
+static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
+ struct vm_area_struct *vma, struct mlx5_uuar_info *uuari)
+{
+ int err;
+ unsigned long idx;
+ phys_addr_t pfn, pa;
+ pgprot_t prot;
+
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+/* Some architectures don't support WC memory */
+#if defined(CONFIG_X86)
+ if (!pat_enabled())
+ return -EPERM;
+#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU)))
+ return -EPERM;
+#endif
+ /* fall through */
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ case MLX5_IB_MMAP_NC_PAGE:
+ prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ idx = get_index(vma->vm_pgoff);
+ if (idx >= uuari->num_uars)
+ return -EINVAL;
+
+ pfn = uar_index2pfn(dev, uuari->uars[idx].index);
+ mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
+
+ vma->vm_page_prot = prot;
+ err = io_remap_pfn_range(vma, vma->vm_start, pfn,
+ PAGE_SIZE, vma->vm_page_prot);
+ if (err) {
+ mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%lx, pfn=%pa, mmap_cmd=%s\n",
+ err, vma->vm_start, &pfn, mmap_cmd2str(cmd));
+ return -EAGAIN;
+ }
+
+ pa = pfn << PAGE_SHIFT;
+ mlx5_ib_dbg(dev, "mapped %s at 0x%lx, PA %pa\n", mmap_cmd2str(cmd),
+ vma->vm_start, &pa);
+
+ return 0;
+}
+
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
{
struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_uuar_info *uuari = &context->uuari;
unsigned long command;
- unsigned long idx;
phys_addr_t pfn;
command = get_command(vma->vm_pgoff);
switch (command) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_NC_PAGE:
case MLX5_IB_MMAP_REGULAR_PAGE:
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- idx = get_index(vma->vm_pgoff);
- if (idx >= uuari->num_uars)
- return -EINVAL;
-
- pfn = uar_index2pfn(dev, uuari->uars[idx].index);
- mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
- (unsigned long long)pfn);
-
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
- vma->vm_start,
- (unsigned long long)pfn << PAGE_SHIFT);
- break;
+ return uar_mmap(dev, command, vma, uuari);
case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
return -ENOSYS;
@@ -1108,7 +1166,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
- if (vma->vm_flags & (VM_WRITE | VM_EXEC))
+ if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* Don't expose to user-space information it shouldn't have */
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b46c255..c4a9825 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -70,6 +70,8 @@ enum {
enum mlx5_ib_mmap_cmd {
MLX5_IB_MMAP_REGULAR_PAGE = 0,
MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
+ MLX5_IB_MMAP_WC_PAGE = 2,
+ MLX5_IB_MMAP_NC_PAGE = 3,
/* 5 is chosen in order to be compatible with old versions of libmlx5 */
MLX5_IB_MMAP_CORE_CLOCK = 5,
};
@@ -356,6 +358,7 @@ enum mlx5_ib_qp_flags {
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
/* QP uses 1 as its source QP number */
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
+ MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
};
struct mlx5_umr_wr {
@@ -712,9 +715,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
enum ib_mr_type mr_type,
u32 max_num_sg);
-int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents);
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
const struct ib_wc *in_wc, const struct ib_grh *in_grh,
const struct ib_mad_hdr *in, size_t in_mad_size,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4d5bff1..8cf2ce5 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1751,26 +1751,33 @@ done:
static int
mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
struct scatterlist *sgl,
- unsigned short sg_nents)
+ unsigned short sg_nents,
+ unsigned int *sg_offset_p)
{
struct scatterlist *sg = sgl;
struct mlx5_klm *klms = mr->descs;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
u32 lkey = mr->ibmr.pd->local_dma_lkey;
int i;
- mr->ibmr.iova = sg_dma_address(sg);
+ mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
mr->ibmr.length = 0;
mr->ndescs = sg_nents;
for_each_sg(sgl, sg, sg_nents, i) {
if (unlikely(i > mr->max_descs))
break;
- klms[i].va = cpu_to_be64(sg_dma_address(sg));
- klms[i].bcount = cpu_to_be32(sg_dma_len(sg));
+ klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
klms[i].key = cpu_to_be32(lkey);
mr->ibmr.length += sg_dma_len(sg);
+
+ sg_offset = 0;
}
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+
return i;
}
@@ -1788,9 +1795,8 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
return 0;
}
-int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
- struct scatterlist *sg,
- int sg_nents)
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
@@ -1802,9 +1808,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
DMA_TO_DEVICE);
if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
- n = mlx5_ib_sg_to_klms(mr, sg, sg_nents);
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset);
else
- n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+ mlx5_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8dee8bc..5041176 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1028,6 +1028,7 @@ static int get_rq_pas_size(void *qpc)
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin)
{
+ struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
__be64 *qp_pas;
void *in;
@@ -1051,6 +1052,9 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
+ if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
+ MLX5_SET(rqc, rqc, scatter_fcs, 1);
+
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, end_padding_mode,
@@ -1136,11 +1140,12 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (qp->rq.wqe_cnt) {
+ rq->base.container_mibqp = qp;
+
err = create_raw_packet_qp_rq(dev, rq, in);
if (err)
goto err_destroy_sq;
- rq->base.container_mibqp = qp;
err = create_raw_packet_qp_tir(dev, rq, tdn);
if (err)
@@ -1252,6 +1257,19 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EOPNOTSUPP;
}
+ if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
+ return -EOPNOTSUPP;
+ }
+ if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
+ !MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
+ mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
+ }
+
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;